repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/plot/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/serialization/state_data.py
|
"""
This module defines a class that holds the current state of algorithm object.
"""
import numpy as np
class StateData(object):
"""
Holds the data to be serialized.
"""
def __init__(self):
self._auto_fix_time_shifts = True
self._power_signals_d = None
self._rank_k = None
self._matrix_l0 = None
self._matrix_r0 = None
self._l_value = np.array([])
self._r_value = np.array([])
self._beta_value = 0.0
self._component_r0 = np.array([])
self._mu_l = None
self._mu_r = None
self._tau = None
self._is_solver_error = False
self._is_problem_status_error = False
self._f1_increase = False
self._obj_increase = False
self._residuals_median = None
self._residuals_variance = None
self._residual_l0_norm = None
self._weights = np.array([])
@property
def auto_fix_time_shifts(self):
return self._auto_fix_time_shifts
@auto_fix_time_shifts.setter
def auto_fix_time_shifts(self, value):
self._auto_fix_time_shifts = value
@property
def power_signals_d(self):
return self._power_signals_d
@power_signals_d.setter
def power_signals_d(self, value):
self._power_signals_d = value
@property
def rank_k(self):
return self._rank_k
@rank_k.setter
def rank_k(self, value):
self._rank_k = value
@property
def matrix_l0(self):
return self._matrix_l0
@matrix_l0.setter
def matrix_l0(self, value):
self._matrix_l0 = value
@property
def matrix_r0(self):
return self._matrix_r0
@matrix_r0.setter
def matrix_r0(self, value):
self._matrix_r0 = value
@property
def l_value(self):
return self._l_value
@l_value.setter
def l_value(self, value):
self._l_value = value
@property
def r_value(self):
return self._r_value
@r_value.setter
def r_value(self, value):
self._r_value = value
@property
def beta_value(self):
return self._beta_value
@beta_value.setter
def beta_value(self, value):
self._beta_value = value
@property
def component_r0(self):
return self._component_r0
@component_r0.setter
def component_r0(self, value):
self._component_r0 = value
@property
def mu_l(self):
return self._mu_l
@mu_l.setter
def mu_l(self, value):
self._mu_l = value
@property
def mu_r(self):
return self._mu_r
@mu_r.setter
def mu_r(self, value):
self._mu_r = value
@property
def tau(self):
return self._tau
@tau.setter
def tau(self, value):
self._tau = value
@property
def is_solver_error(self):
return self._is_solver_error
@is_solver_error.setter
def is_solver_error(self, value):
self._is_solver_error = value
@property
def is_problem_status_error(self):
return self._is_problem_status_error
@is_problem_status_error.setter
def is_problem_status_error(self, value):
self._is_problem_status_error = value
@property
def f1_increase(self):
return self._f1_increase
@f1_increase.setter
def f1_increase(self, value):
self._f1_increase = value
@property
def obj_increase(self):
return self._obj_increase
@obj_increase.setter
def obj_increase(self, value):
self._obj_increase = value
@property
def residuals_median(self):
return self._residuals_median
@residuals_median.setter
def residuals_median(self, value):
self._residuals_median = value
@property
def residuals_variance(self):
return self._residuals_variance
@residuals_variance.setter
def residuals_variance(self, value):
self._residuals_variance = value
@property
def residual_l0_norm(self):
return self._residual_l0_norm
@residual_l0_norm.setter
def residual_l0_norm(self, value):
self._residual_l0_norm = value
@property
def weights(self):
return self._weights
@weights.setter
def weights(self, value):
self._weights = value
| 4,291 | 21.010256 | 77 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/serialization/serialization_mixin.py
|
"""
This module defines Mixin for serialization.
"""
import json
import numpy as np
from statistical_clear_sky.algorithm.serialization.state_data import StateData
class SerializationMixin(object):
"""
Mixin for IterativeClearSky, taking care of serialization.
"""
def save_instance(self, filepath):
save_dict = dict(
auto_fix_time_shifts = self._state_data.auto_fix_time_shifts,
power_signals_d = self._state_data.power_signals_d.tolist(),
rank_k = self._state_data.rank_k,
matrix_l0 = self._state_data.matrix_l0.tolist(),
matrix_r0 = self._state_data.matrix_r0.tolist(),
l_value = self._state_data.l_value.tolist(),
r_value = self._state_data.r_value.tolist(),
beta_value = float(self._state_data.beta_value),
component_r0 = self._state_data.component_r0.tolist(),
mu_l = self._state_data.mu_l,
mu_r = self._state_data.mu_r,
tau = self._state_data._tau,
is_solver_error = self._state_data.is_solver_error,
is_problem_status_error = self._state_data.is_problem_status_error,
f1_increase = self._state_data.f1_increase,
obj_increase = self._state_data.obj_increase,
residuals_median = self._state_data.residuals_median,
residuals_variance = self._state_data.residuals_variance,
residual_l0_norm = self._state_data.residual_l0_norm,
weights = self._state_data.weights.tolist()
)
with open(filepath, 'w') as file:
json.dump(save_dict, file)
@classmethod
def load_instance(cls, filepath):
with open(filepath, 'r') as file:
load_dict = json.load(file)
power_signals_d = np.array(load_dict['power_signals_d'])
rank_k = load_dict['rank_k']
instance = cls(np.array(power_signals_d), rank_k=rank_k)
instance.state_data.power_signals_d = power_signals_d
instance.state_data.rank_k = rank_k
instance.state_data.matrix_l0 = np.array(load_dict['matrix_l0'])
instance.state_data.matrix_r0 = np.array(load_dict['matrix_r0'])
instance.state_data.l_cs_value = np.array(load_dict['l_value'])
instance.state_data.r_cs_value = np.array(load_dict['r_value'])
instance.state_data.beta_value = load_dict['beta_value']
instance.state_data.component_r0 = np.array(load_dict['component_r0'])
instance.state_data.mu_l = load_dict['mu_l']
instance.state_data.mu_r = load_dict['mu_r']
instance.state_data.tau = load_dict['tau']
instance.state_data.is_solver_error = load_dict['is_solver_error']
instance.state_data.is_problem_status_error = load_dict[
'is_problem_status_error']
instance.state_data.f1_increase = load_dict['f1_increase']
instance.state_data.obj_increase = load_dict['obj_increase']
instance.state_data.residuals_median = load_dict['residuals_median']
instance.state_data.residuals_variance = load_dict['residuals_variance']
instance.state_data.residual_l0_norm = load_dict['residual_l0_norm']
instance.state_data.weights = np.array(load_dict['weights'])
instance._keep_result_variables_as_properties(instance.state_data.l_cs_value,
instance.state_data.r_cs_value,
instance.state_data.beta_value)
instance._keep_supporting_parameters_as_properties(instance.state_data.weights)
return instance
| 3,634 | 43.329268 | 87 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/statistical_clear_sky/algorithm/serialization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/test_iterative_fitting_execute.py
|
import unittest
from unittest.mock import Mock
import os
import numpy as np
import cvxpy as cvx
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
from statistical_clear_sky.algorithm.initialization.linearization_helper\
import LinearizationHelper
from statistical_clear_sky.algorithm.initialization.weight_setting\
import WeightSetting
from statistical_clear_sky.algorithm.minimization.left_matrix\
import LeftMatrixMinimization
from statistical_clear_sky.algorithm.minimization.right_matrix\
import RightMatrixMinimization
class TestIterativeFittingExecute(unittest.TestCase):
def setUp(self):
fixed_power_signals_d_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock/three_years_power_signals_d_1.csv"))
with open(fixed_power_signals_d_file_path) as file:
fixed_power_signals_d = np.loadtxt(file, delimiter=',')
initial_r0_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock/three_years_initial_component_r0.csv"))
with open(initial_r0_value_file_path) as file:
linearization_helper_return_value = np.loadtxt(file, delimiter=',')
self.mock_linearization_helper = Mock(spec=LinearizationHelper)
self.mock_linearization_helper.obtain_component_r0.return_value =\
linearization_helper_return_value
weights_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock/three_years_weights.csv"))
with open(weights_file_path) as file:
weight_setting_return_value = np.loadtxt(file, delimiter=',')
self.mock_weight_setting = Mock(spec=WeightSetting)
self.mock_weight_setting.obtain_weights.return_value =\
weight_setting_return_value
left_matrix_minimize_return_values = []
for i in range(13):
l_cs_value_left_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("l_cs_value_after_left_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(l_cs_value_left_matrix_file_path) as file:
l_cs_value_left_matrix = np.loadtxt(file, delimiter=',')
r_cs_value_left_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("r_cs_value_after_left_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(r_cs_value_left_matrix_file_path) as file:
r_cs_value_left_matrix = np.loadtxt(file, delimiter=',')
beta_value_left_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("beta_value_after_left_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(beta_value_left_matrix_file_path) as file:
beta_value_left_matrix = np.loadtxt(file, delimiter=',')
left_matrix_minimize_return_values.append(
(l_cs_value_left_matrix, r_cs_value_left_matrix,
beta_value_left_matrix))
self.mock_left_matrix_minimization = Mock(spec=LeftMatrixMinimization)
self.mock_left_matrix_minimization.minimize.side_effect =\
left_matrix_minimize_return_values
right_matrix_minimize_return_values = []
for i in range(13):
l_cs_value_right_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("l_cs_value_after_right_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(l_cs_value_right_matrix_file_path) as file:
l_cs_value_right_matrix = np.loadtxt(file, delimiter=',')
r_cs_value_right_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("r_cs_value_after_right_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(r_cs_value_right_matrix_file_path) as file:
r_cs_value_right_matrix = np.loadtxt(file, delimiter=',')
beta_value_right_matrix_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock",
("beta_value_after_right_matrix_minimization_iteration_{}.csv"
.format(i+1))))
with open(beta_value_right_matrix_file_path) as file:
beta_value_right_matrix = np.loadtxt(file, delimiter=',')
right_matrix_minimize_return_values.append(
(l_cs_value_right_matrix, r_cs_value_right_matrix,
beta_value_right_matrix))
self.mock_right_matrix_minimization = Mock(spec=RightMatrixMinimization)
self.mock_right_matrix_minimization.minimize.side_effect =\
right_matrix_minimize_return_values
def test_execute(self):
input_power_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock/three_years_power_signals_d_1.csv"))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
rank_k = 6
clear_sky_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/for_mock/three_years_clear_sky_signals.csv"))
with open(clear_sky_signals_file_path) as file:
expected_clear_sky_signals = np.loadtxt(file, delimiter=',')
expected_degradation_rate = np.array(-0.04069624)
iterative_fitting = IterativeFitting(power_signals_d, rank_k=rank_k)
# Inject mock objects by dependency injection:
iterative_fitting.set_linearization_helper(
self.mock_linearization_helper)
iterative_fitting.set_weight_setting(self.mock_weight_setting)
iterative_fitting.set_left_matrix_minimization(
self.mock_left_matrix_minimization)
iterative_fitting.set_right_matrix_minimization(
self.mock_right_matrix_minimization)
iterative_fitting.execute(mu_l=5e2, mu_r=1e3, tau=0.9,
max_iteration=15, verbose=False)
actual_clear_sky_signals = iterative_fitting.clear_sky_signals()
actual_degradation_rate = iterative_fitting.degradation_rate()
# Note: Discrepancy is due to the difference in Python 3.6 and 3.7.
# np.testing.assert_array_equal(actual_clear_sky_signals,
# expected_clear_sky_signals)
np.testing.assert_almost_equal(actual_clear_sky_signals,
expected_clear_sky_signals,
decimal=13)
# np.testing.assert_array_equal(actual_degradation_rate,
# expected_degradation_rate)
np.testing.assert_almost_equal(actual_degradation_rate,
expected_degradation_rate,
decimal=8)
| 7,472 | 46 | 80 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/test_iterative_fitting.py
|
import unittest
import os
import numpy as np
import cvxpy as cvx
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
class TestIterativeFitting(unittest.TestCase):
def test_calculate_objective(self):
input_power_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/objective_calculation",
"three_years_power_signals_d_1.csv"))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
rank_k = 6
mu_l = 5e2
mu_r = 1e3
tau = 0.9
initial_l_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/objective_calculation",
"three_years_initial_l_cs_value.csv"))
with open(initial_l_cs_value_file_path) as file:
l_cs_value = np.loadtxt(file, delimiter=',')
initial_r_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/objective_calculation",
"three_years_initial_r_cs_value.csv"))
with open(initial_r_cs_value_file_path) as file:
r_cs_value = np.loadtxt(file, delimiter=',')
beta_value = 0.0
weights_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../fixtures/objective_calculation",
"three_years_weights.csv"))
with open(weights_file_path) as file:
weights = np.loadtxt(file, delimiter=',')
expected_objective_values = np.array([117277.71151791142,
478.8539994379723, 23800125.708200675, 228653.22102385858])
iterative_fitting = IterativeFitting(power_signals_d, rank_k=rank_k)
actual_objective_values = iterative_fitting._calculate_objective(
mu_l, mu_r, tau, l_cs_value, r_cs_value,
beta_value, weights, sum_components=False)
# np.testing.assert_array_equal(actual_objective_values,
# expected_objective_values)
np.testing.assert_almost_equal(actual_objective_values,
expected_objective_values,
decimal=8)
| 2,327 | 37.163934 | 78 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/minimization/test_left_matrix_minimization.py
|
import unittest
import os
import numpy as np
import cvxpy as cvx
from statistical_clear_sky.algorithm.minimization.left_matrix\
import LeftMatrixMinimization
class TestLeftMatrixMinimization(unittest.TestCase):
def test_minimize(self):
power_signals_d = np.array([[0.0, 0.0, 0.0, 0.0],
[1.33389997, 1.40310001, 0.67150003,
0.77249998],
[1.42349994, 1.51800001, 1.43809998,
1.20449996],
[1.52020001, 1.45150006, 1.84809995,
0.99949998]])
rank_k = 4
weights = np.array([0.0, 0.0, 0.97073243, 0.97243198])
tau = 0.9
mu_l = 5e2
initial_l_cs_value = np.array([[0.12227644, -0.05536519,
-0.02796016, 0.11115515],
[0.12183656, -0.06418167,
-0.03631565, 0.09248578],
[0.12190038, -0.07035386,
-0.03077544, 0.06306368],
[0.12185763, -0.0822263,
-0.02468169, 0.03843156]])
initial_r_cs_value = np.array([[7.81948013, 11.26965908,
11.43521789, 8.89706298],
[0.18783052, -1.17162576,
-1.68541257, -1.14962881],
[0.97275831, 0.99957452,
0.92734892, 0.453427],
[-0.86265428, -3.28835462,
-4.00326343, -1.76664483]])
initial_beta_value = 0.0
initial_component_r0 = np.array([1.36527916, 2.70624333, 4.04720749,
5.38817165])
expected_l_cs_value = np.array([[2.610888e-14, -1.027025e-14,
1.481367e-14, -1.786423e-14],
[6.769088e-02, -5.028329e-14,
-4.090143e-14, 1.891483e-13],
[1.353818e-01, -8.877942e-14,
4.614613e-15, -1.047267e-14],
[2.030726e-01, 1.495160e-13,
1.955246e-14, -1.573292e-13]])
expected_r_cs_value = initial_r_cs_value
expected_beta_value = initial_beta_value
left_matrix_minimization = LeftMatrixMinimization(power_signals_d,
rank_k, weights, tau, mu_l, solver_type='ECOS')
actual_l_cs_value, actual_r_cs_value, actual_beta_value =\
left_matrix_minimization.minimize(initial_l_cs_value,
initial_r_cs_value,
initial_beta_value,
initial_component_r0)
# np.testing.assert_array_equal(actual_l_cs_value, expected_l_cs_value)
np.testing.assert_almost_equal(actual_l_cs_value, expected_l_cs_value,
decimal=6)
np.testing.assert_array_equal(actual_r_cs_value, expected_r_cs_value)
np.testing.assert_array_equal(actual_beta_value, expected_beta_value)
def test_minimize_with_large_data(self):
input_power_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"three_years_power_signals_d_1.csv"))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
rank_k = 6
weights_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"three_years_weights.csv"))
with open(weights_file_path) as file:
weights = np.loadtxt(file, delimiter=',')
tau = 0.9
mu_l = 5e2
initial_l_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"three_years_initial_l_cs_value.csv"))
with open(initial_l_cs_value_file_path) as file:
initial_l_cs_value = np.loadtxt(file, delimiter=',')
initial_r_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"three_years_initial_r_cs_value.csv"))
with open(initial_r_cs_value_file_path) as file:
initial_r_cs_value = np.loadtxt(file, delimiter=',')
initial_beta_value = 0.0
l_cs_value_after_iteration_1_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"l_cs_value_after_left_matrix_minimization_iteration_1_NEW.csv"))
with open(l_cs_value_after_iteration_1_file_path) as file:
expected_l_cs_value = np.loadtxt(file, delimiter=',')
r_cs_value_after_iteration_1_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"r_cs_value_after_left_matrix_minimization_iteration_1.csv"))
with open(r_cs_value_after_iteration_1_file_path) as file:
expected_r_cs_value = np.loadtxt(file, delimiter=',')
expected_beta_value = initial_beta_value
initial_r0_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/left_matrix_minimization",
"three_years_initial_component_r0.csv"))
with open(initial_r0_value_file_path) as file:
initial_component_r0_value = np.loadtxt(file, delimiter=',')
left_matrix_minimization = LeftMatrixMinimization(power_signals_d,
rank_k, weights, tau, mu_l, solver_type='MOSEK')
try:
actual_l_cs_value, actual_r_cs_value, actual_beta_value =\
left_matrix_minimization.minimize(initial_l_cs_value,
initial_r_cs_value,
initial_beta_value,
initial_component_r0_value)
except cvx.SolverError:
self.skipTest("This test uses MOSEK solver"
+ "because default ECOS solver fails with large data. "
+ "Unless MOSEK is installed, this test fails.")
else:
np.testing.assert_array_almost_equal(
actual_l_cs_value,
expected_l_cs_value,
decimal=2
)
np.testing.assert_array_almost_equal(
actual_r_cs_value,
expected_r_cs_value,
decimal=2
)
np.testing.assert_array_almost_equal(
actual_beta_value,
expected_beta_value,
decimal=2
)
| 7,344 | 45.783439 | 79 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/minimization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/minimization/test_right_matrix_minimization.py
|
import unittest
import os
import numpy as np
import cvxpy as cvx
from statistical_clear_sky.algorithm.minimization.right_matrix\
import RightMatrixMinimization
class TestRightMatrixMinimization(unittest.TestCase):
def test_minimize_with_large_data(self):
input_power_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"three_years_power_signals_d_1.csv"))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
rank_k = 6
weights_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"three_years_weights.csv"))
with open(weights_file_path) as file:
weights = np.loadtxt(file, delimiter=',')
tau = 0.9
mu_r = 1e3
initial_r0_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"three_years_initial_component_r0.csv"))
with open(initial_r0_value_file_path) as file:
initial_component_r0_value = np.loadtxt(file, delimiter=',')
initial_l_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"l_cs_value_after_left_matrix_minimization_iteration_1.csv"))
with open(initial_l_cs_value_file_path) as file:
initial_l_cs_value = np.loadtxt(file, delimiter=',')
initial_r_cs_value_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"r_cs_value_after_left_matrix_minimization_iteration_1.csv"))
with open(initial_r_cs_value_file_path) as file:
initial_r_cs_value = np.loadtxt(file, delimiter=',')
initial_beta_value = 0.0
l_cs_value_after_iteration_1_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"l_cs_value_after_right_matrix_minimization_iteration_1.csv"))
with open(l_cs_value_after_iteration_1_file_path) as file:
expected_l_cs_value = np.loadtxt(file, delimiter=',')
r_cs_value_after_iteration_1_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/right_matrix_minimization",
"r_cs_value_after_right_matrix_minimization_iteration_1.csv"))
with open(r_cs_value_after_iteration_1_file_path) as file:
expected_r_cs_value = np.loadtxt(file, delimiter=',')
expected_beta_value = -0.04015762
right_matrix_minimization = RightMatrixMinimization(power_signals_d,
rank_k, weights, tau, mu_r, solver_type='MOSEK')
try:
actual_l_cs_value, actual_r_cs_value, actual_beta_value =\
right_matrix_minimization.minimize(initial_l_cs_value,
initial_r_cs_value,
initial_beta_value,
initial_component_r0_value)
except cvx.SolverError:
self.skipTest("This test uses MOSEK solver"
+ "because default ECOS solver fails with large data. "
+ "Unless MOSEK is installed, this test fails.")
else:
np.testing.assert_array_almost_equal(
actual_l_cs_value, expected_l_cs_value, decimal=2
)
np.testing.assert_array_almost_equal(
actual_r_cs_value, expected_r_cs_value, decimal=1
)
np.testing.assert_almost_equal(
actual_beta_value, expected_beta_value, decimal=4
)
| 4,006 | 42.086022 | 78 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/initialization/test_linearization_helper.py
|
import unittest
import numpy as np
# import os
from statistical_clear_sky.algorithm.initialization.linearization_helper\
import LinearizationHelper
class TestLinealizationHelper(unittest.TestCase):
'''
Unit test for obtaining initial data of Right Vectors component r0,
which is used as a denomoniator of non-linear equation in order to make
it linear.
It convers the first part of the constructor of main.IterativeClearSky
in the original code.
'''
def setUp(self):
pass
def test_obtain_component_r0(self):
power_signals_d = np.array([[3.65099996e-01, 0.00000000e+00,
0.00000000e+00, 2.59570003e+00],
[6.21100008e-01, 0.00000000e+00,
0.00000000e+00, 2.67740011e+00],
[8.12500000e-01, 0.00000000e+00,
0.00000000e+00, 2.72729993e+00],
[9.00399983e-01, 0.00000000e+00,
0.00000000e+00, 2.77419996e+00]])
rank_k = 4
expected_result = np.array([1.36527916, 2.70624333, 4.04720749,
5.38817165])
linearization_helper = LinearizationHelper(solver_type='ECOS')
left_low_rank_matrix_u, singular_values_sigma, right_low_rank_matrix_v \
= np.linalg.svd(power_signals_d)
initial_r_cs_value = np.diag(singular_values_sigma[:rank_k]).dot(
right_low_rank_matrix_v[:rank_k, :])
actual_result = linearization_helper.obtain_component_r0(
initial_r_cs_value)
np.testing.assert_almost_equal(actual_result, expected_result,
decimal=2)
| 1,804 | 40.022727 | 80 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/initialization/test_weight_setting.py
|
import unittest
import os
import numpy as np
import cvxpy as cvx
from statistical_clear_sky.algorithm.initialization.weight_setting\
import WeightSetting
class TestWeightSetting(unittest.TestCase):
def test_obtain_weights(self):
power_signals_d = np.array([[3.65099996e-01, 0.00000000e+00,
0.00000000e+00, 2.59570003e+00],
[6.21100008e-01, 0.00000000e+00,
0.00000000e+00, 2.67740011e+00],
[8.12500000e-01, 0.00000000e+00,
0.00000000e+00, 2.72729993e+00],
[9.00399983e-01, 0.00000000e+00,
0.00000000e+00, 2.77419996e+00]])
expected_weights = np.array([0.0, 0.0, 0.0, 0.0])
weight_setting = WeightSetting()
actual_weights = weight_setting.obtain_weights(power_signals_d)
np.testing.assert_array_equal(actual_weights, expected_weights)
def test_obtain_weights_with_large_data(self):
input_power_signals_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/initialization/one_year_power_signals_1.csv"))
with open(input_power_signals_file_path) as file:
power_signals_d = np.loadtxt(file, delimiter=',')
weights_file_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
"../../fixtures/initialization/one_year_weights_1.csv"))
with open(weights_file_path) as file:
expected_weights = np.loadtxt(file, delimiter=',')
weight_setting = WeightSetting(solver_type='MOSEK')
try:
actual_weights = weight_setting.obtain_weights(power_signals_d)
except cvx.SolverError:
self.skipTest("This test uses MOSEK solver"
+ "because default ECOS solver fails with large data. "
+ "Unless MOSEK is installed, this test fails.")
else:
np.testing.assert_allclose(actual_weights, expected_weights, rtol=1e-5)
| 2,150 | 41.176471 | 83 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/initialization/test_singular_value_decomposition.py
|
import unittest
import numpy as np
from\
statistical_clear_sky.algorithm.initialization.singular_value_decomposition\
import SingularValueDecomposition
class TestSingularValueDecomposition(unittest.TestCase):
def test_adjust_singular_vectors(self):
power_signals_d = np.array([[3.65099996e-01, 0.00000000e+00,
0.00000000e+00, 2.59570003e+00],
[6.21100008e-01, 0.00000000e+00,
0.00000000e+00, 2.67740011e+00],
[8.12500000e-01, 0.00000000e+00,
0.00000000e+00, 2.72729993e+00],
[9.00399983e-01, 0.00000000e+00,
0.00000000e+00, 2.77419996e+00]])
left_singular_vectors_u = np.array([[0.46881027, -0.77474963,
0.39354624, 0.1584339],
[0.49437073, -0.15174524,
-0.6766346, -0.52415321],
[-0.51153077, 0.32155093,
-0.27710787, 0.74709605],
[-0.5235941, 0.52282062,
0.55722365, -0.37684163]])
right_singular_vectors_v = np.array([[0.24562222, 0.0, 0.0, 0.96936563],
[0.96936563, 0.0, 0.0, -0.24562222],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0]])
expected_left_singular_vectors_u = np.array([[-0.46881027, -0.77474963,
0.39354624, 0.1584339],
[-0.49437073, -0.15174524,
-0.6766346, -0.52415321],
[0.51153077, 0.32155093,
-0.27710787, 0.74709605],
[0.5235941, 0.52282062,
0.55722365, -0.37684163]])
expected_right_singular_vectors_v = np.array([[-0.24562222, 0.0,
0.0, -0.96936563],
[0.96936563, 0.0,
0.0, -0.24562222],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0]])
decomposition = SingularValueDecomposition()
actual_left_singular_vectors_u, actual_right_singular_vectors_v = \
decomposition._adjust_singular_vectors(
left_singular_vectors_u, right_singular_vectors_v)
np.testing.assert_array_equal(actual_left_singular_vectors_u,
expected_left_singular_vectors_u)
np.testing.assert_array_equal(actual_right_singular_vectors_v,
expected_right_singular_vectors_v)
| 3,272 | 55.431034 | 80 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/initialization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/serialization/__init__.py
| 0 | 0 | 0 |
py
|
|
StatisticalClearSky
|
StatisticalClearSky-master/tests/statistical_clear_sky/algorithm/serialization/test_serialization_mixin.py
|
import unittest
import numpy as np
import tempfile
import shutil
import os
from statistical_clear_sky.algorithm.iterative_fitting import IterativeFitting
class TestSerializationMixin(unittest.TestCase):
def setUp(self):
self._temp_directory = tempfile.mkdtemp()
self._filepath = os.path.join(self._temp_directory, 'state_data.json')
def tearDown(self):
shutil.rmtree(self._temp_directory)
def test_serialization(self):
power_signals_d = np.array([[3.65099996e-01, 0.00000000e+00,
0.00000000e+00, 2.59570003e+00],
[6.21100008e-01, 0.00000000e+00,
0.00000000e+00, 2.67740011e+00],
[8.12500000e-01, 0.00000000e+00,
0.00000000e+00, 2.72729993e+00],
[9.00399983e-01, 0.00000000e+00,
0.00000000e+00, 2.77419996e+00]])
rank_k = 4
original_iterative_fitting = IterativeFitting(power_signals_d,
rank_k=rank_k)
original_iterative_fitting.save_instance(self._filepath)
deserialized_iterative_fitting = IterativeFitting.load_instance(
self._filepath)
np.testing.assert_array_equal(deserialized_iterative_fitting.
_power_signals_d,
original_iterative_fitting.
_power_signals_d)
| 1,594 | 37.902439 | 78 |
py
|
StatisticalClearSky
|
StatisticalClearSky-master/docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'Statistical Clear Sky'
copyright = '2021, Bennet Meyers'
author = 'Bennet Meyers'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
master_doc = 'index'
| 1,991 | 32.762712 | 79 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/normalization-ablation/experiments.py
|
"""Biggest batch size that would fit in one GPU."""
import explib
from explib.expmaker import PROB_CIFAR10_RESNET18 as C10_R18
from explib.expmaker import PROB_DB_SQD as DB_SQD
from explib.expmaker import PROB_MNIST_LENET5 as MNI_LN5
from explib.expmaker import PROB_PTB_TENC as PTB_TEC
from explib.expmaker import PROB_WT2_TXL as WT2_TXL
from explib.expmaker import merge_dicts, merge_sets, nice_logspace
from explib.expmaker.slurm_configs import (
DEFAULT_GPU_8H,
DEFAULT_GPU_12H,
DEFAULT_GPU_16H,
DEFAULT_GPU_24H,
)
from explib.optim import NORMALIZED_GD, RESCALED_SIGN_D, SIGN_D
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
SEEDS = [0, 1, 2]
base_alphas = nice_logspace(start=-6, end=1, base=10, density=0)
base_alphas_RSD = nice_logspace(start=-10, end=-3, base=10, density=0)
alphas_for_dset_opt = {
"mnist": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-4, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-5, end=-1, base=10, density=1),
},
"cifar10": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-6, end=-4, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-5, base=10, density=1),
},
"ptb": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-8, end=-3, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
"squad": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-11, end=-7, base=10, density=1),
},
}
alphas_for_dset_opt_with_momentum = {
"mnist": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-6, end=-4, base=10, density=1),
},
"cifar10": {
NORMALIZED_GD: nice_logspace(start=-3, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-8, end=-5, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-5, base=10, density=1),
},
"ptb": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
"squad": {
NORMALIZED_GD: nice_logspace(start=-3, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-6, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-11, end=-9, base=10, density=1),
},
}
optimizer_names = [
NORMALIZED_GD,
SIGN_D,
RESCALED_SIGN_D,
]
base_optims = (
[
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in base_alphas
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in base_alphas
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in base_alphas_RSD
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in base_alphas_RSD
]
)
optimizers_for_dataset = {
k: [
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
for k in ["mnist", "cifar10", "ptb", "wt2", "squad"]
}
settings_mnist = [
{"batch_size": 256, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 1024, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 4096, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 200},
{"batch_size": 16384, "slurm_config": DEFAULT_GPU_12H, "max_epoch": 800},
]
settings_cifar = [
{"batch_size": 64, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 256, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 1024, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 200},
{"batch_size": 4096, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 400},
]
settings_ptb = [
{"batch_size": 16, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 64, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 100},
{"batch_size": 256, "slurm_config": DEFAULT_GPU_8H, "max_epoch": 200},
{"batch_size": 1024, "slurm_config": DEFAULT_GPU_12H, "max_epoch": 800},
]
settings_db = [
{
"batch_size": 16,
"accumulate_steps": 2,
"slurm_config": DEFAULT_GPU_12H,
"max_epoch": 10,
},
{
"batch_size": 16,
"accumulate_steps": 8,
"slurm_config": DEFAULT_GPU_12H,
"max_epoch": 10,
},
{
"batch_size": 16,
"accumulate_steps": 32,
"slurm_config": DEFAULT_GPU_12H,
"max_epoch": 10,
},
{
"batch_size": 16,
"accumulate_steps": 128,
"slurm_config": DEFAULT_GPU_12H,
"max_epoch": 10,
},
]
settings_wt2 = [
{
"batch_size": 20,
"accumulate_steps": 1,
"slurm_config": DEFAULT_GPU_8H,
"max_epoch": 40,
},
{
"batch_size": 80,
"accumulate_steps": 1,
"slurm_config": DEFAULT_GPU_8H,
"max_epoch": 40,
},
{
"batch_size": 80,
"accumulate_steps": 4,
"slurm_config": DEFAULT_GPU_8H,
"max_epoch": 80,
},
{
"batch_size": 80,
"accumulate_steps": 16,
"slurm_config": DEFAULT_GPU_8H,
"max_epoch": 160,
},
]
EXPERIMENTS = (
[
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_mnist
for opt_settings in base_optims
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_cifar
for opt_settings in base_optims
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_ptb
for opt_settings in base_optims
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_wt2
for opt_settings in base_optims
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_db
for opt_settings in base_optims
]
+ [
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_mnist
for opt_settings in optimizers_for_dataset["mnist"]
for seed in SEEDS
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_cifar
for opt_settings in optimizers_for_dataset["cifar10"]
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_for_dataset["ptb"]
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_for_dataset["wt2"]
for seed in SEEDS
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_db
for opt_settings in optimizers_for_dataset["squad"]
for seed in SEEDS
]
)
EXPERIMENTS = explib.expmaker.make_exp_dict_list_unique(EXPERIMENTS)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="normalization-ablation",
descr="Checking what form of normalization works",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 9,273 | 31.770318 | 78 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/no_dropout/experiment.py
|
import explib
from explib.expmaker import PROB_PTB_TENC_DET as PTB_TEC
from explib.expmaker import PROB_WT2_TXL_DET as WT2_TXL
from explib.expmaker import merge_dicts, merge_sets, nice_logspace
from explib.expmaker.slurm_configs import DEFAULT_GPU_12H, DEFAULT_GPU_16H
from explib.optim import NORMALIZED_GD, RESCALED_SIGN_D, SIGN_D
hyperparam_names = [
"dataset",
"model",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True): # his trickle
return {
"opt": {
"name": "SGD",
"alpha": stepsize,
"momentum": 0.9 if momentum else 0.0,
}
}
SEEDS = [0, 1, 2]
alphas_ptb_adam = nice_logspace(start=-5, end=-2, base=10, density=1)
alphas_ptb_sgd = nice_logspace(start=-3, end=0, base=10, density=1)
alphas_wt2_sgd = nice_logspace(start=-5, end=0, base=10, density=1)
alphas_wt2_adam = merge_sets(
nice_logspace(start=-6, end=-1, base=10, density=1),
)
optimizers_ptb = (
[adam(alpha, momentum=True) for alpha in alphas_ptb_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_ptb_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_ptb_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_ptb_sgd]
)
optimizers_wt2 = (
[adam(alpha, momentum=True) for alpha in alphas_wt2_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_wt2_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_wt2_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_wt2_sgd]
)
base_alphas = nice_logspace(start=-6, end=1, base=10, density=0)
base_alphas_RSD = base_alphas
alphas_for_dset_opt = {
"ptb": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-6, end=-3, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-3, base=10, density=1),
},
}
alphas_for_dset_opt_with_momentum = {
"ptb": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-8, end=-5, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
}
optimizers_for_dataset = {
k: [
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
for k in ["ptb", "wt2"]
}
settings_ptb = [
{
"batch_size": 1326,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 20,
"max_epoch": 800 * 4,
"drop_last": True,
},
]
settings_wt2 = [
{
"batch_size": 80,
"accumulate_steps": 203,
"slurm_config": DEFAULT_GPU_16H,
"max_epoch": 320,
"drop_last": True,
}
]
EXPERIMENTS = (
[
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_ptb
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_wt2
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_for_dataset["ptb"]
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_for_dataset["wt2"]
for seed in SEEDS
]
)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="no-dropout",
descr="Repeat of the same experiments without dropout",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 5,188 | 28.482955 | 77 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/full-batch-training/experiments.py
|
"""
Sanity checks for the full runs,
checking runtime and memory consumption of various configurations-
"""
import explib
from explib.expmaker.slurm_configs import (
DEFAULT_GPU_12H,
LARGE_GPU_24H,
DEFAULT_GPU_16H,
)
from explib.expmaker import merge_dicts, nice_logspace, merge_sets
from explib.expmaker import (
PROB_MNIST_LENET5 as MNI_LN5,
PROB_CIFAR10_RESNET18 as C10_R18,
PROB_PTB_TENC as PTB_TEC,
PROB_DB_SQD as DB_SQD,
PROB_WT2_TXL as WT2_TXL,
)
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True):
return {
"opt": {
"name": "SGD",
"alpha": stepsize,
"momentum": 0.9 if momentum else 0.0,
}
}
SEEDS = [0, 1, 2]
alphas_mnist_adam = nice_logspace(start=-4, end=0, base=10, density=1)
alphas_mnist_sgd = nice_logspace(start=-3, end=1, base=10, density=1)
alphas_cifar_adam = nice_logspace(start=-6, end=0, base=10, density=1)
alphas_cifar_sgd = nice_logspace(start=-5, end=1, base=10, density=1)
alphas_ptb_adam = nice_logspace(start=-5, end=-2, base=10, density=1)
alphas_ptb_sgd = nice_logspace(start=-3, end=0, base=10, density=1)
alphas_wt2_sgd = nice_logspace(start=-5, end=0, base=10, density=1)
alphas_wt2_adam = merge_sets(
nice_logspace(start=-6, end=-1, base=10, density=1),
nice_logspace(start=-4, end=-2, base=10, density=2),
)
alphas_squad_adam = nice_logspace(start=-6, end=-2, base=10, density=1)
alphas_squad_sgd = nice_logspace(start=-4, end=0, base=10, density=1)
optimizers = (
[adam(alpha, momentum=True) for alpha in alphas_mnist_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_mnist_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_mnist_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_mnist_sgd]
)
optimizers_cifar = (
[adam(alpha, momentum=True) for alpha in alphas_cifar_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_cifar_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_cifar_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_cifar_sgd]
)
optimizers_ptb = (
[adam(alpha, momentum=True) for alpha in alphas_ptb_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_ptb_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_ptb_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_ptb_sgd]
)
optimizers_squad = (
[adam(alpha, momentum=True) for alpha in alphas_squad_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_squad_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_squad_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_squad_sgd]
)
optimizers_wt2 = (
[adam(alpha, momentum=True) for alpha in alphas_wt2_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_wt2_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_wt2_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_wt2_sgd]
)
settings_mnist = [
{
"batch_size": 20000,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 3,
"max_epoch": 800,
},
]
settings_cifar = [
{
"batch_size": 10000,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 5,
"max_epoch": 800,
},
]
settings_ptb = [
{
"batch_size": 1326,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 20,
"max_epoch": 800 * 4,
"drop_last": True,
},
]
settings_squad = [
{
"batch_size": 64,
"slurm_config": LARGE_GPU_24H,
"accumulate_steps": 1370,
"max_epoch": 20,
"drop_last": True,
},
]
settings_wt2 = [
{
"batch_size": 80,
"accumulate_steps": 203,
"slurm_config": DEFAULT_GPU_16H,
"max_epoch": 320,
"drop_last": True,
}
]
EXPERIMENTS = (
[
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_mnist
for opt_settings in optimizers
for seed in SEEDS
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_cifar
for opt_settings in optimizers_cifar
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_ptb
for seed in SEEDS
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_squad
for opt_settings in optimizers_squad
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_wt2
for seed in SEEDS
]
)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="full-batch-training",
descr="Full batch training on the standard datasets",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 5,443 | 26.917949 | 73 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/longer_wt2/incr_batch.py
|
"""Sanity checks for the full runs, checking runtime and memory consumption of
various configurations-"""
import explib
from explib.expmaker.slurm_configs import (
SMALL_GPU_4H,
SMALL_GPU_12H,
SMALL_GPU_8H,
LARGE_GPU_24H,
LARGE_GPU_12H,
)
from explib.expmaker import merge_dicts, nice_logspace, merge_sets
from explib.expmaker import (
PROB_MNIST_LENET5 as MNI_LN5,
PROB_CIFAR10_RESNET18 as C10_R18,
PROB_PTB_TENC as PTB_TEC,
PROB_DB_SQD as DB_SQD,
PROB_WT2_TXL as WT2_TXL,
)
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True):
return {
"opt": {
"name": "SGD",
"alpha": stepsize,
"momentum": 0.9 if momentum else 0.0,
}
}
SEEDS = [0, 1, 2]
alphas_wt2_sgd = nice_logspace(start=-5, end=0, base=10, density=1)
alphas_wt2_adam = merge_sets(
nice_logspace(start=-6, end=-1, base=10, density=1),
nice_logspace(start=-4, end=-2, base=10, density=2),
)
optimizers_wt2 = (
[adam(alpha, momentum=True) for alpha in alphas_wt2_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_wt2_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_wt2_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_wt2_sgd]
)
def settings_wt2_(bs=32, accum=1, slurm=SMALL_GPU_4H, epoch=5):
return {
"batch_size": bs,
"accumulate_steps": accum,
"slurm_config": slurm,
"max_epoch": epoch,
}
settings_wt2 = [
settings_wt2_(bs=20, accum=1, slurm=SMALL_GPU_4H, epoch=40),
settings_wt2_(bs=80, accum=1, slurm=SMALL_GPU_4H, epoch=40),
settings_wt2_(bs=80, accum=4, slurm=SMALL_GPU_4H, epoch=80),
settings_wt2_(bs=80, accum=16, slurm=SMALL_GPU_8H, epoch=160),
]
EXPERIMENTS = +[
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_wt2
for seed in SEEDS
]
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="increasing-batch-size",
descr="Increasing batch size experiments",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 2,558 | 23.605769 | 78 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/longer_wt2/longer_wt2.py
|
import explib
from explib.expmaker import PROB_PTB_TENC_DET as PTB_TEC
from explib.expmaker import PROB_WT2_TXL_DET as WT2_TXL
from explib.expmaker import merge_dicts, merge_sets, nice_logspace
from explib.expmaker.slurm_configs import DEFAULT_GPU_12H, DEFAULT_GPU_16H
from explib.optim import NORMALIZED_GD, RESCALED_SIGN_D, SIGN_D
hyperparam_names = [
"dataset",
"model",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True): # his trickle
return {
"opt": {
"name": "SGD",
"alpha": stepsize,
"momentum": 0.9 if momentum else 0.0,
}
}
SEEDS = [0]
alphas_ptb_adam = nice_logspace(start=-5, end=-2, base=10, density=1)
alphas_ptb_sgd = nice_logspace(start=-3, end=0, base=10, density=1)
alphas_wt2_sgd = nice_logspace(start=-5, end=0, base=10, density=1)
alphas_wt2_adam = nice_logspace(start=-6, end=-1, base=10, density=1)
optimizers_wt2 = (
[adam(alpha, momentum=True) for alpha in alphas_wt2_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_wt2_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_wt2_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_wt2_sgd]
)
base_alphas = nice_logspace(start=-6, end=1, base=10, density=0)
base_alphas_RSD = base_alphas
alphas_for_dset_opt = {
"wt2": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-3, base=10, density=1),
},
}
alphas_for_dset_opt_with_momentum = {
"ptb": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-8, end=-5, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
}
optimizers_for_dataset = {
k: [
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
for k in ["ptb", "wt2"]
}
settings_wt2 = [
{
"batch_size": 80,
"accumulate_steps": 203,
"slurm_config": DEFAULT_GPU_16H,
"max_epoch": 320,
"drop_last": True,
}
]
EXPERIMENTS = (
[
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_ptb
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_wt2
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_for_dataset["ptb"]
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_for_dataset["wt2"]
for seed in SEEDS
]
)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="no-dropout",
descr="Repeat of the same experiments without dropout",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 4,456 | 28.322368 | 77 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/increasing-batch-size/experiments.py
|
"""
Sanity checks for the full runs,
checking runtime and memory consumption of various configurations-
"""
import explib
from explib.expmaker.slurm_configs import (
SMALL_GPU_4H,
SMALL_GPU_12H,
SMALL_GPU_8H,
LARGE_GPU_24H,
LARGE_GPU_12H,
)
from explib.expmaker import merge_dicts, nice_logspace, merge_sets
from explib.expmaker import (
PROB_MNIST_LENET5 as MNI_LN5,
PROB_CIFAR10_RESNET18 as C10_R18,
PROB_PTB_TENC as PTB_TEC,
PROB_DB_SQD as DB_SQD,
PROB_WT2_TXL as WT2_TXL,
)
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True):
return {
"opt": {
"name": "SGD",
"alpha": stepsize,
"momentum": 0.9 if momentum else 0.0,
}
}
SEEDS = [0, 1, 2]
alphas = nice_logspace(start=-5, end=1, base=10, density=1)
optimizers = (
[adam(alpha, momentum=True) for alpha in alphas]
+ [adam(alpha, momentum=False) for alpha in alphas]
+ [sgd(alpha, momentum=False) for alpha in alphas]
+ [sgd(alpha, momentum=True) for alpha in alphas]
)
alphas_cifar = nice_logspace(start=-7, end=1, base=10, density=1)
optimizers_cifar = (
[adam(alpha, momentum=True) for alpha in alphas_cifar]
+ [adam(alpha, momentum=False) for alpha in alphas_cifar]
+ [sgd(alpha, momentum=False) for alpha in alphas_cifar]
+ [sgd(alpha, momentum=True) for alpha in alphas_cifar]
)
alphas_db = nice_logspace(start=-6, end=0, base=10, density=1)
optimizers_db = (
[adam(alpha, momentum=True) for alpha in alphas_db]
+ [adam(alpha, momentum=False) for alpha in alphas_db]
+ [sgd(alpha, momentum=False) for alpha in alphas_db]
+ [sgd(alpha, momentum=True) for alpha in alphas_db]
)
alphas_wt2_sgd = nice_logspace(start=-5, end=0, base=10, density=1)
alphas_wt2_adam = merge_sets(
nice_logspace(start=-6, end=-1, base=10, density=1),
nice_logspace(start=-4, end=-2, base=10, density=2),
)
optimizers_wt2 = (
[adam(alpha, momentum=True) for alpha in alphas_wt2_adam]
+ [adam(alpha, momentum=False) for alpha in alphas_wt2_adam]
+ [sgd(alpha, momentum=False) for alpha in alphas_wt2_sgd]
+ [sgd(alpha, momentum=True) for alpha in alphas_wt2_sgd]
)
settings_mnist = [
{"batch_size": 256, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 1024, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 4096, "slurm_config": SMALL_GPU_4H, "max_epoch": 200},
{"batch_size": 16384, "slurm_config": SMALL_GPU_12H, "max_epoch": 800},
]
settings_cifar = [
{"batch_size": 64, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 256, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 1024, "slurm_config": SMALL_GPU_4H, "max_epoch": 200},
{"batch_size": 4096, "slurm_config": SMALL_GPU_12H, "max_epoch": 800},
]
settings_ptb = [
{"batch_size": 16, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 64, "slurm_config": SMALL_GPU_4H, "max_epoch": 100},
{"batch_size": 256, "slurm_config": SMALL_GPU_8H, "max_epoch": 200},
{"batch_size": 1024, "slurm_config": SMALL_GPU_12H, "max_epoch": 800},
]
def settings_db_(bs=32, accum=1, slurm=SMALL_GPU_4H, epoch=5):
return {
"batch_size": bs,
"accumulate_steps": accum,
"slurm_config": slurm,
"max_epoch": epoch,
}
settings_db = [
settings_db_(bs=32, accum=1, slurm=LARGE_GPU_12H, epoch=5),
settings_db_(bs=32, accum=4, slurm=LARGE_GPU_12H, epoch=5),
settings_db_(bs=32, accum=16, slurm=LARGE_GPU_12H, epoch=5),
settings_db_(bs=32, accum=64, slurm=LARGE_GPU_24H, epoch=20),
]
def settings_wt2_(bs=32, accum=1, slurm=SMALL_GPU_4H, epoch=5):
return {
"batch_size": bs,
"accumulate_steps": accum,
"slurm_config": slurm,
"max_epoch": epoch,
}
settings_wt2 = [
settings_wt2_(bs=20, accum=1, slurm=SMALL_GPU_4H, epoch=40),
settings_wt2_(bs=80, accum=1, slurm=SMALL_GPU_4H, epoch=40),
settings_wt2_(bs=80, accum=4, slurm=SMALL_GPU_4H, epoch=80),
settings_wt2_(bs=80, accum=16, slurm=SMALL_GPU_8H, epoch=160),
]
EXPERIMENTS = (
[
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_mnist
for opt_settings in optimizers
for seed in SEEDS
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_cifar
for opt_settings in optimizers_cifar
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers
for seed in SEEDS
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_db
for opt_settings in optimizers_db
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_wt2
for seed in SEEDS
]
)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="increasing-batch-size",
descr="Increasing batch size experiments",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 5,746 | 29.247368 | 75 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/hist_maker/make_wt2_hists.py
|
import explib
from explib.expmaker import slurm_configs, BASE_PROBLEMS
EXPERIMENTS = [
{
**BASE_PROBLEMS["WT2_TRANSFORMERXL"],
"batch_size": bs,
"max_epoch": 0,
"seed": seed,
"opt": {
"name": "Adam",
"alpha": 0.001,
"b1": 0.99,
"b2": 0.999,
},
"init_noise_norm": True,
"save_norm_samples": True,
"slurm_config": slurm_configs.DEFAULT_GPU_8H,
}
for bs in [1, 16]
for seed in range(5)
]
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="wt2_noise_hist",
descr="ptb noise histograms",
as_one_job=True,
experiments=EXPERIMENTS,
)
| 729 | 22.548387 | 56 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/hist_maker/make_squad_hists.py
|
import numpy as np
import explib
from explib.expmaker import slurm_configs
def merge_grids(*grids):
return sorted(list(set.union(*[set(grid) for grid in grids])))
EXPERIMENTS = []
EXPERIMENTS_ADAM = [
{
"dataset": dataset,
"model": "distilbert_base_pretrained",
"batch_size": bs,
"max_epoch": 0,
"seed": seed,
"model_args": {
"tgt_len": 384,
"doc_stride": 128,
},
"opt": {"name": "SGD", "alpha": alpha, "momentum": 0.0},
"init_noise_norm": True,
"save_norm_samples": True,
"slurm_config": slurm_configs.LARGE_GPU_6H,
}
for dataset in ["squad"]
for alpha in [1e-2]
for seed in range(5)
for bs in [1, 16]
]
EXPERIMENTS.extend(EXPERIMENTS_ADAM)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="distillbert_squad_noise_hists",
descr="distill squad norm",
experiments=EXPERIMENTS,
as_one_job=True,
)
| 1,008 | 22.465116 | 66 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/hist_maker/make_image_hists.py
|
import numpy as np
import explib
from explib.expmaker import slurm_configs
def merge_grids(*grids):
return sorted(list(set.union(*[set(grid) for grid in grids])))
EXPERIMENTS = []
EXPERIMENTS_MNIST = [
{
"loss_func": "logloss",
"metrics": ["accuracy"],
"dataset": "mnist",
"model": "lenet5",
"batch_size": bs,
"max_epoch": 0,
"init_noise_norm": True,
"seed": seed,
"slurm_config": slurm_configs.SMALL_GPU_2H,
"opt": {
"name": "Adam",
"alpha": 0.001,
"b1": 0.99,
"b2": 0.999,
},
}
for bs in [1, 256]
for seed in range(5)
]
EXPERIMENTS.extend(EXPERIMENTS_MNIST)
EXPERIMENTS_RESNET18 = [
{
"loss_func": "logloss",
"metrics": ["accuracy"],
"dataset": "cifar10",
"model": "resnet18",
"batch_size": bs,
"max_epoch": 0,
"init_noise_norm": True,
"seed": seed,
"slurm_config": slurm_configs.SMALL_GPU_2H,
"opt": {
"name": "Adam",
"alpha": 0.001,
"b1": 0.99,
"b2": 0.999,
},
}
for bs in [2, 64]
for seed in range(5)
]
EXPERIMENTS.extend(EXPERIMENTS_RESNET18)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="image_hists",
descr="image problem noise histograms",
as_one_job=True,
experiments=EXPERIMENTS,
)
| 1,480 | 20.779412 | 66 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/hist_maker/make_ptb_hists.py
|
import numpy as np
import explib
from explib.expmaker import slurm_configs
def merge_grids(*grids):
return sorted(list(set.union(*[set(grid) for grid in grids])))
EXPERIMENTS = []
EXPERIMENTS_SGD = [
{
"loss_func": "logloss",
"dataset": "ptb",
"model": "transformer_encoder",
"model_args": {
"tgt_len": 35,
},
"batch_size": bs,
"max_epoch": 0,
"seed": seed,
"opt": {
"name": "Adam",
"alpha": 0.001,
"b1": 0.99,
"b2": 0.999,
},
"init_noise_norm": True,
"save_norm_samples": True,
"slurm_config": slurm_configs.SMALL_GPU_2H,
}
for bs in [1, 16]
for seed in range(5)
]
EXPERIMENTS.extend(EXPERIMENTS_SGD)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="ptb_noise_hists",
descr="ptb noise histograms",
as_one_job=True,
experiments=EXPERIMENTS,
)
| 1,001 | 20.319149 | 66 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/fix-full-batch-squad/experiment.py
|
"""Sanity checks for the full runs, checking runtime and memory consumption of
various configurations-"""
import explib
from explib.expmaker import PROB_DB_SQD as DB_SQD
from explib.expmaker import merge_dicts, nice_logspace
from explib.expmaker.slurm_configs import DEFAULT_GPU_36H, LARGE_GPU_36H
from explib.optim import NORMALIZED_GD, SIGN_D
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"opt.b1",
"opt.momentum",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
settings_BRT = {
"batch_size": 16,
"slurm_config": LARGE_GPU_36H,
"accumulate_steps": 1370 * 4,
"max_epoch": 80,
"drop_last": True,
"shuffle": False,
}
def adam(stepsize, momentum=True):
return {
"opt": {
"name": "Adam",
"alpha": stepsize,
"b1": 0.9 if momentum else 0.0,
"b2": 0.999,
}
}
def sgd(stepsize, momentum=True):
return {
"opt": {"name": "SGD", "alpha": stepsize, "momentum": 0.9 if momentum else 0.0}
}
def normgd(stepsize, momentum=True):
return {
"opt": {
"name": NORMALIZED_GD,
"alpha": stepsize,
"momentum": 0.9 if momentum else 0,
}
}
def signd(stepsize, momentum=True):
return {
"opt": {"name": SIGN_D, "alpha": stepsize, "momentum": 0.9 if momentum else 0}
}
alphas_BRT_sgd = nice_logspace(start=-2, end=0, base=10, density=1)
alphas_BRT_sgd_m = nice_logspace(start=-2, end=-1, base=10, density=1)
alphas_BRT_adam = nice_logspace(start=-4, end=-3, base=10, density=1)
alphas_BRT_adam_m = nice_logspace(start=-4, end=-1, base=10, density=1)
alphas_BRT_NormalizedGD = nice_logspace(start=-1, end=0, base=10, density=1)
alphas_BRT_SignDescent = nice_logspace(start=-5, end=-3, base=10, density=1)
alphas_BRT_NormalizedGD_m = nice_logspace(start=-2, end=0, base=10, density=1)
alphas_BRT_SignDescent_m = nice_logspace(start=-6, end=-2, base=10, density=1)
opts_BRT = (
[sgd(alpha, False) for alpha in alphas_BRT_sgd]
+ [adam(alpha, False) for alpha in alphas_BRT_adam]
+ [normgd(alpha, False) for alpha in alphas_BRT_NormalizedGD]
+ [signd(alpha, False) for alpha in alphas_BRT_SignDescent]
+ [sgd(alpha, True) for alpha in alphas_BRT_sgd_m]
+ [adam(alpha, True) for alpha in alphas_BRT_adam_m]
+ [normgd(alpha, True) for alpha in alphas_BRT_NormalizedGD_m]
+ [signd(alpha, True) for alpha in alphas_BRT_SignDescent_m]
)
EXPERIMENTS = [
merge_dicts(DB_SQD, settings_BRT, opt_settings, {"seed": seed})
for opt_settings in opts_BRT
for seed in [0, 1, 2]
]
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="fix-full-batch-training-squad",
descr="Rerun of experiments on Squad with shuffle=False",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 2,935 | 28.36 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/experiment_scripts/full-batch-training-normalized-optimizers/experiments.py
|
"""Sanity checks for the full runs, checking runtime and memory consumption of
various configurations-"""
import explib
from explib.expmaker import PROB_CIFAR10_RESNET18 as C10_R18
from explib.expmaker import PROB_DB_SQD as DB_SQD
from explib.expmaker import PROB_MNIST_LENET5 as MNI_LN5
from explib.expmaker import PROB_PTB_TENC as PTB_TEC
from explib.expmaker import PROB_WT2_TXL as WT2_TXL
from explib.expmaker import merge_dicts, merge_sets, nice_logspace
from explib.expmaker.slurm_configs import (
DEFAULT_GPU_12H,
DEFAULT_GPU_16H,
DEFAULT_GPU_36H,
LARGE_GPU_24H,
LARGE_GPU_36H,
LARGE_GPU_72H,
)
from explib.optim import (
BLOCK_NORMALIZED_GD,
NORMALIZED_GD,
RESCALED_SIGN_D,
SGD,
SIGN_D,
)
hyperparam_names = [
"dataset",
"batch_size",
"opt.name",
"accumulate_steps",
"seed",
"opt.alpha",
"slurm_config",
]
SEEDS = [0, 1, 2]
base_alphas = nice_logspace(start=-6, end=1, base=10, density=0)
base_alphas_RSD = base_alphas
# base_alphas_RSD = nice_logspace(start=-10, end=-3, base=10, density=0)
alphas_for_dset_opt = {
"mnist": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-4, end=-2, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-6, end=-1, base=10, density=1),
},
"cifar10": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-6, end=-4, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-5, base=10, density=1),
},
"ptb": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-6, end=-3, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-1, end=1, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-3, base=10, density=1),
},
"squad": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-7, end=-5, base=10, density=1),
},
}
alphas_for_dset_opt_with_momentum = {
"mnist": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-6, end=-3, base=10, density=1),
},
"cifar10": {
NORMALIZED_GD: nice_logspace(start=-3, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-8, end=-5, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-8, end=-4, base=10, density=1),
},
"ptb": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-8, end=-5, base=10, density=1),
},
"wt2": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-5, end=-3, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-9, end=-6, base=10, density=1),
},
"squad": {
NORMALIZED_GD: nice_logspace(start=-2, end=0, base=10, density=1),
SIGN_D: nice_logspace(start=-3, end=-0, base=10, density=1),
RESCALED_SIGN_D: nice_logspace(start=-11, end=-8, base=10, density=1),
},
}
settings_mnist = [
{
"batch_size": 20000,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 3,
"max_epoch": 800,
},
]
settings_cifar = [
{
"batch_size": 10000,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 5,
"max_epoch": 800,
},
]
settings_ptb = [
{
"batch_size": 1326,
"slurm_config": DEFAULT_GPU_12H,
"accumulate_steps": 20,
"max_epoch": 800 * 4,
"drop_last": True,
},
]
settings_wt2 = [
{
"batch_size": 80,
"accumulate_steps": 203,
"slurm_config": DEFAULT_GPU_16H,
"max_epoch": 320,
"drop_last": True,
}
]
settings_squad = [
# {
# "batch_size": 16,
# "slurm_config": DEFAULT_GPU_24H,
# "accumulate_steps": 1370 * 4,
# "max_epoch": 20,
# "drop_last": True,
# "shuffle": False,
# },
{
"batch_size": 16,
"slurm_config": LARGE_GPU_36H,
"accumulate_steps": 1370 * 4,
"max_epoch": 80,
"drop_last": True,
},
]
base_optims = (
[
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in base_alphas
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in base_alphas
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in base_alphas_RSD
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in base_alphas_RSD
]
)
optimizers_for_dataset = {
k: [
{"opt": {"name": name, "alpha": alpha}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9}}
for name in [NORMALIZED_GD, SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt[k][name]
]
+ [
{"opt": {"name": name, "alpha": alpha, "momentum": 0.9, "norm": 1}}
for name in [RESCALED_SIGN_D]
for alpha in alphas_for_dset_opt_with_momentum[k][name]
]
for k in ["mnist", "cifar10", "ptb", "wt2", "squad"]
}
EXPERIMENTS = (
[
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_mnist
for opt_settings in base_optims
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_cifar
for opt_settings in base_optims
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_ptb
for opt_settings in base_optims
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_wt2
for opt_settings in base_optims
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": 0})
for size_settings in settings_squad
for opt_settings in base_optims
]
+ [
merge_dicts(MNI_LN5, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_mnist
for opt_settings in optimizers_for_dataset["mnist"]
for seed in SEEDS
]
+ [
merge_dicts(C10_R18, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_cifar
for opt_settings in optimizers_for_dataset["cifar10"]
for seed in SEEDS
]
+ [
merge_dicts(PTB_TEC, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_ptb
for opt_settings in optimizers_for_dataset["ptb"]
for seed in SEEDS
]
+ [
merge_dicts(WT2_TXL, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_wt2
for opt_settings in optimizers_for_dataset["wt2"]
for seed in SEEDS
]
+ [
merge_dicts(DB_SQD, size_settings, opt_settings, {"seed": seed})
for size_settings in settings_squad
for opt_settings in optimizers_for_dataset["squad"]
for seed in SEEDS
]
)
EXPERIMENTS = explib.expmaker.make_exp_dict_list_unique(EXPERIMENTS)
if __name__ == "__main__":
explib.expmaker.experiment_maker_cli(
exp_name="full-batch-training-normalized-optimizers",
descr="Full batch training with normalization-based optimizers (Sign, Rescaled Sign, Normalized GD)",
experiments=EXPERIMENTS,
hyperparam_names=hyperparam_names,
as_job_array=True,
)
| 8,479 | 30.176471 | 109 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/final_perf.py
|
import importlib
import os
from pathlib import Path
import explib.results.cleanup as cleanh
import explib.results.data as data_h
import explib.results.data as datah
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib
import matplotlib.colors
import matplotlib.pyplot as plt
def load_data():
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_end()
return {"plot_data": best_runs}
def settings(plt):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
plot_image = False
if plot_image:
plt.rcParams.update(
plth.iclr_config_2(nrows=2, ncols=5, height_to_width_ratio=1)
)
else:
plt.rcParams.update(
plth.iclr_config_2(nrows=2, ncols=5, height_to_width_ratio=1 / 1.2)
)
def make_figure(fig, data, with_momentum=False):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
importlib.reload(expdef)
dss = [expdef.PTB, expdef.WT2, expdef.SQUAD]
bss = [expdef.M, expdef.FULL]
if with_momentum:
opts = [expdef.SGD_M, expdef.ADAM_M, expdef.SIGN_M, expdef.NORM_M]
else:
opts = [expdef.SGD_NM, expdef.ADAM_NM, expdef.SIGN_NM, expdef.NORM_NM]
grid_type = "2x3"
plot_data = data["plot_data"]
def quickselect_agg(ds, bs, opt):
res_ds_bs = data_h.new_select(plot_data, selections=expdef.EXPERIMENTS[ds][bs])
res_ds_bs = res_ds_bs[res_ds_bs["epoch"].notna()]
res_ds_bs = res_ds_bs[res_ds_bs["epoch"] <= expdef.EPOCH_CLIP[ds][bs]]
res_opt = data_h.new_select(res_ds_bs, selections=[expdef.OPTIMS[opt]])
agg = res_opt.groupby("epoch")["training_loss"].agg([min, max, "median"])
return agg
axes = plth.make_grid_iclr(fig, grid_type=grid_type, tight=True)
for i, bs in enumerate(bss):
for j, ds in enumerate(dss):
ax = axes[i][j]
for opt in opts:
agg = quickselect_agg(ds, bs, opt)
n_samples = 100
linestyle = plth.linestyles_nm[opt]
linestyle["linewidth"] = 1.0 if "Sign" in opt else 0.9
linestyle["linestyle"] = "-" if "Sign" in opt else "dotted"
linestyle.pop("dashes", None)
ax.plot(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["median"], n_samples),
**linestyle,
)
fillstyle = plth.fillstyles[opt]
fillstyle["alpha"] = 0.1
ax.fill_between(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["min"], n_samples),
plth.subsample(agg["max"], n_samples),
**fillstyle,
)
ax.set_yscale("log")
ylims_trainingloss = {
expdef.MNIST: [10**-6, 10**1],
expdef.CIFAR10: [10**-7, 10**1.5],
expdef.PTB: [1.7, 10],
expdef.WT2: [10**-1.0, 10**1.3],
expdef.SQUAD: [10**-1, 10**1.0],
}
for i, bs in enumerate(bss):
for j, ds in enumerate(dss):
ax = axes[i][j]
ax.set_ylim(ylims_trainingloss[ds])
if ds == expdef.PTB:
ax.set_yticks([2, 4, 8], minor=False)
ax.set_yticklabels([2, 4, 8], minor=False)
ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9, 10], minor=True)
ax.set_yticklabels([], minor=True)
xticks_and_lims = {
expdef.PTB: {"ticks": [0, 1000, 2000, 3000], "labels": [0, "", "", 3000]},
expdef.WT2: {"ticks": [0, 100, 200, 300], "labels": [0, "", "", 300]},
expdef.SQUAD: {"ticks": [0, 20, 40, 60], "labels": [0, "", "", 60]},
}
for j, ds in enumerate(dss):
axes[0][j].set_title(plth.fdisplaynames(ds), y=1.0, pad=-1)
axes[1][j].set_xticks(xticks_and_lims[ds]["ticks"])
axes[1][j].set_xticklabels(xticks_and_lims[ds]["labels"])
axes[1][j].set_xlabel("Epoch ", labelpad=-7)
axes[0][0].set_ylabel("Medium batch\nTraining Loss")
axes[1][0].set_ylabel("Full batch\nTraining Loss")
## Names
def darker(color):
black = [0.0, 0.0, 0.0]
black_hsv = matplotlib.colors.rgb_to_hsv(black)
color_hsv = matplotlib.colors.rgb_to_hsv(color)
a = 0.0
avg_hsv = [a * bi + (1 - a) * ci for (bi, ci) in zip(black_hsv, color_hsv)]
avg = matplotlib.colors.hsv_to_rgb(avg_hsv)
a = 0.35
avg = [a * bi + (1 - a) * ci for (bi, ci) in zip(black, color)]
return avg
def no_mom(text):
return text.replace("($+$m)", "").replace("($-$m)", "")
dy = {
expdef.M: {
expdef.PTB: {
expdef.SIGN_M: 0.6,
expdef.SGD_M: 0.33,
expdef.NORM_M: 0.23,
expdef.ADAM_M: 0.10,
expdef.SIGN_NM: 0.6,
expdef.SGD_NM: 0.33,
expdef.NORM_NM: 0.23,
expdef.ADAM_NM: 0.10,
},
expdef.WT2: {
expdef.SIGN_M: 0.6,
expdef.SGD_M: 0.7,
expdef.NORM_M: 0.37,
expdef.ADAM_M: 0.05,
expdef.SIGN_NM: 0.6,
expdef.SGD_NM: 0.7,
expdef.NORM_NM: 0.45,
expdef.ADAM_NM: 0.15,
},
expdef.SQUAD: {
expdef.SGD_M: 0.35,
expdef.NORM_M: 0.25,
expdef.SIGN_M: 0.12,
expdef.ADAM_M: 0.02,
expdef.SGD_NM: 0.35,
expdef.NORM_NM: 0.25,
expdef.SIGN_NM: 0.12,
expdef.ADAM_NM: 0.02,
},
},
expdef.FULL: {
expdef.PTB: {
expdef.SGD_M: 0.45,
expdef.NORM_M: 0.25,
expdef.SIGN_M: 0.13,
expdef.ADAM_M: 0.02,
expdef.SGD_NM: 0.55,
expdef.NORM_NM: 0.43,
expdef.SIGN_NM: 0.18,
expdef.ADAM_NM: 0.06,
},
expdef.WT2: {
expdef.SGD_M: 0.77,
expdef.NORM_M: 0.67,
expdef.SIGN_M: 0.39,
expdef.ADAM_M: 0.05,
expdef.SGD_NM: 0.90,
expdef.NORM_NM: 0.80,
expdef.SIGN_NM: 0.65,
expdef.ADAM_NM: 0.53,
},
expdef.SQUAD: {
expdef.SGD_M: 0.50,
expdef.NORM_M: 0.40,
expdef.ADAM_M: 0.12,
expdef.SIGN_M: 0.02,
expdef.SGD_NM: 0.65,
expdef.NORM_NM: 0.54,
expdef.SIGN_NM: 0.42,
expdef.ADAM_NM: 0.30,
},
},
}
for i, bs in enumerate(bss):
for j, ds in enumerate(dss):
ax = axes[i][j]
xlims = ax.get_xlim()
ax.set_xlim([xlims[0], 1.2 * xlims[1]])
ylims = ax.get_ylim()
for opt in opts:
agg = quickselect_agg(ds, bs, opt)
x = agg.index[-1] + (xlims[1] - xlims[0]) * 0.025
y = (ylims[0] ** (1 - dy[bs][ds][opt])) * (
ylims[1] ** (dy[bs][ds][opt])
)
color = darker(plth.linestyles_nm[opt]["color"])
ax.text(
x, y, no_mom(plth.abbrev(opt)), fontsize="xx-small", color=color
)
if __name__ == "__main__":
settings(plt)
data = load_data()
for with_momentum in [True, False]:
fig = plt.figure()
make_figure(fig, data, with_momentum=with_momentum)
filename = Path(__file__).stem
if not with_momentum:
filename += "_nomom"
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
| 8,058 | 32.164609 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/histograms_and_small_training.py
|
import importlib
import os
from pathlib import Path
import explib.results.data as data_h
import explib.results.experiment_groups as expdef
import explib.results.plotting as helpers
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from explib.results import data_caching
from matplotlib import gridspec
from statsmodels.graphics.gofplots import qqplot
plth = helpers
def load_data(seed=4):
gradnorms = plth.load_gradnorms(seed)
runs_at_last_epoch, best_runs = data_caching.gridsearch_all_end()
best_runs = data_h.add_stop_at_info(best_runs, stop_at=expdef.EPOCH_CLIP)
return {"optims": best_runs, "gradnorms": gradnorms}
def settings(plt):
plt.rcParams.update(
plth.iclr_config_2(
rel_width=1.0, nrows=2, ncols=5, height_to_width_ratio=1 / 1.0
)
)
def make_figure(fig, data):
importlib.reload(helpers)
importlib.reload(expdef)
data_optims = data["optims"]
data_gradnorms = data["gradnorms"]
dsets = ["mnist", "cifar", "ptb", "wt2", "squad"]
settings = {
"mnist": {
"title": plth.fdisplaynames("mnist"),
"norm_name": "mnist_256",
},
"cifar": {
"title": plth.fdisplaynames("cifar10"),
"norm_name": "cifar10_64",
},
"ptb": {
"title": plth.fdisplaynames("ptb"),
"norm_name": "ptb_16",
},
"wt2": {
"title": plth.fdisplaynames("wikitext2"),
"norm_name": "wt2_16",
},
"squad": {
"title": plth.fdisplaynames("squad"),
"norm_name": "squad_16",
},
}
axes = plth.make_grid_iclr(fig, grid_type="2x2-3")
zoomax_bottom = 0.2
zoomax_left = 0.38 + 0.12
zoomax_width = 0.59 - 0.12
zoomaxes = [
axes[0][2].inset_axes(bounds=(zoomax_left, zoomax_bottom, zoomax_width, 0.35)),
axes[0][3].inset_axes(bounds=(zoomax_left, zoomax_bottom, zoomax_width, 0.35)),
axes[0][4].inset_axes(bounds=(zoomax_left, zoomax_bottom, zoomax_width, 0.35)),
]
plot_norm_squared = False
transform = (lambda x: x) if plot_norm_squared else np.sqrt
zoom_settings = {
"ptb": {
"id": 0,
"ymax": 14,
"xmin": transform(0.575),
"xmax": transform(0.989),
},
"wt2": {"id": 1, "ymax": 14, "xmin": transform(1.115), "xmax": transform(1.6)},
"squad": {
"id": 2,
"ymax": 14,
"xmin": transform(19.9),
"xmax": transform(48.6),
},
}
plth.hide_frame(*zoomaxes, left=True)
qq_w = 0.425
qq_h = 0.475
qqaxes = [ax.inset_axes(bounds=(1 - qq_w, 1 - qq_h, qq_w, qq_h)) for ax in axes[0]]
for ax in qqaxes:
for name, spine in ax.spines.items():
spine.set_linewidth(0.6)
helpers.hide_frame(*qqaxes, top=False, right=False, left=False, bottom=False)
helpers.hide_ticks(*qqaxes)
helpers.hide_frame(*zoomaxes, top=True, right=True, left=True, bottom=False)
helpers.hide_ticks(*zoomaxes, x=True, y=False)
C = [0.3333333333] * 3
colors = {
"mnist": C,
"cifar": C,
"ptb": C,
"wt2": C,
"squad": C,
}
for i, dset in enumerate(dsets):
xs = data_gradnorms[settings[dset]["norm_name"]][:-1]
axes[0][i].hist(transform(xs), bins=50, color=colors[dset])
ax = axes[0][i]
ax.set_title(settings[dset]["title"])
if True:
if dset in zoom_settings.keys():
zoom_setting = zoom_settings[dset]
zoomax = zoomaxes[zoom_setting["id"]]
zoomax.hist(transform(xs), bins=50, color=colors[dset])
zoomax.set_xlim([zoom_setting["xmin"], zoom_setting["xmax"]])
zoomax.set_ylim([0, zoom_setting["ymax"]])
left_in_datacoords = (
zoomax_left * (ax.get_xlim()[1] - ax.get_xlim()[0])
+ ax.get_xlim()[0]
)
right_in_datacoords = (zoomax_left + zoomax_width) * (
ax.get_xlim()[1] - ax.get_xlim()[0]
) + ax.get_xlim()[0]
ax.plot(
[zoom_setting["xmin"], left_in_datacoords],
[0, zoomax_bottom * ax.get_ylim()[1]],
linewidth=helpers._stroke_width,
color=helpers.BASE_COLORS["gray"],
)
ax.plot(
[ax.get_xlim()[1], right_in_datacoords],
[0, zoomax_bottom * ax.get_ylim()[1]],
linewidth=helpers._stroke_width,
color=helpers.BASE_COLORS["gray"],
)
ax.set_xlim([ax.get_xlim()[0], ax.get_xlim()[1] + 0.001])
zoomax.tick_params(
axis="both", which="major", labelsize=helpers.fontsizes["tiny"]
)
axes[0][0].set_ylabel("Counts")
for ax in axes[0]:
ax.set_xlabel("Gradient error")
ax.grid(False)
for i, dset in enumerate(dsets):
xs = data_gradnorms[settings[dset]["norm_name"]][:-1]
ax = qqaxes[i]
qqplot(
data=transform(xs),
ax=ax,
fit=True,
line=None,
markersize=0.5,
marker=".",
markeredgecolor=colors[dset],
linestyle="none",
)
ax.set_xlabel("", fontsize=helpers.fontsizes["tiny"])
ax.set_ylabel("", fontsize=helpers.fontsizes["tiny"])
end_pts = list(zip(ax.get_xlim(), ax.get_ylim()))
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(
end_pts,
end_pts,
color=helpers.BASE_COLORS["gray"],
zorder=0,
linewidth=helpers._stroke_width,
)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
dss = [expdef.MNIST, expdef.CIFAR10, expdef.PTB, expdef.WT2, expdef.SQUAD]
for i, ds in enumerate(dss):
ax = axes[1][i]
res_ds = data_h.new_select(
data_optims, selections=expdef.EXPERIMENTS[ds][expdef.S]
)
epoch_clip = expdef.EPOCH_CLIP[ds][expdef.S]
res_ds = res_ds[res_ds["epoch"] <= epoch_clip]
for opt in expdef.STANDARD_OPT:
res_ds_bs_opt = data_h.new_select(res_ds, selections=[expdef.OPTIMS[opt]])
res_ds_bs_opt = res_ds_bs_opt[res_ds_bs_opt["epoch"].notna()]
agg = res_ds_bs_opt.groupby("step")["training_loss"].agg(
[min, max, "median"]
)
n_samples = 50
ax.plot(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["median"], n_samples),
**plth.linestyles_nm[opt],
)
ax.fill_between(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["min"], n_samples),
plth.subsample(agg["max"], n_samples),
**plth.fillstyles[opt],
)
ax.set_yscale("log")
if ds == expdef.MNIST:
ax.set_ylim([10**-6, 10**1])
if ds == expdef.CIFAR10:
ax.set_ylim([10**-5, 10**2])
if ds == expdef.PTB:
ax.set_ylim([1.7, 7])
ax.set_yticks([], minor=False)
ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9], minor=True)
ax.set_yticklabels([2, "", 4, "", 6, "", 8, ""], minor=True)
if ds == expdef.WT2:
ax.set_ylim([10**-1, 10**1.5])
if ds == expdef.SQUAD:
ax.set_ylim([10**-1, 10**1])
for i, ds in enumerate(expdef.ALL_DS):
ax = axes[1][i]
if ds == expdef.MNIST:
ax.set_xticks([0, 25, 50, 75, 100])
ax.set_xticklabels([0, "", "", "", 100])
if ds == expdef.CIFAR10:
ax.set_xticks([0, 25, 50, 75, 100])
ax.set_xticklabels([0, "", "", "", 100])
if ds == expdef.PTB:
ax.set_xticks([0, 25, 50, 75, 100])
ax.set_xticklabels([0, "", "", "", 100])
if ds == expdef.WT2:
ax.set_xticks([0, 10, 20, 30, 40])
ax.set_xticklabels([0, "", "", "", 40])
if ds == expdef.SQUAD:
ax.set_xticks([0, 1, 2, 3, 4, 5])
ax.set_xticklabels([0, "", "", "", "", 5])
ax.set_xlabel("Epoch", labelpad=-5)
axes[1][0].set_ylabel("Training loss")
make_legend = False
if make_legend:
legsettings = {
"frameon": False,
"borderaxespad": -0.3,
"labelspacing": 0.1,
"handlelength": 1.3,
"handletextpad": 0.3,
"fontsize": "x-small",
"markerfirst": False,
}
linestyles = [
plth.linestyles_nm[expdef.SGD_M],
plth.linestyles_nm[expdef.SGD_NM],
plth.linestyles_nm[expdef.ADAM_M],
plth.linestyles_nm[expdef.ADAM_NM],
]
for i in range(len(linestyles)):
linestyles[i]["linewidth"] = 1.5
linestyles[1]["dashes"] = (2.0, 2.0)
linestyles[3]["dashes"] = (2.0, 2.0)
lines = [
matplotlib.lines.Line2D([0, 1], [0, 1], **linestyle)
for linestyle in linestyles
]
labels = [
plth.fdisplaynames(expdef.SGD_M),
plth.fdisplaynames(expdef.SGD_NM),
plth.fdisplaynames(expdef.ADAM_M),
plth.fdisplaynames(expdef.ADAM_NM),
]
lines_labels_and_ax = [
([lines[1]], [labels[1]], axes[1][0]),
([lines[3]], [labels[3]], axes[1][1]),
([lines[0]], [labels[0]], axes[1][2]),
([lines[2]], [labels[2]], axes[1][3]),
]
lines_labels_and_ax = [
([lines[1], lines[3]], [labels[1], labels[3]], axes[1][2]),
([lines[0], lines[2]], [labels[0], labels[2]], axes[1][4]),
]
for lines, labels, ax in lines_labels_and_ax:
legend = ax.legend(lines, labels, **legsettings, loc="upper right")
fig.canvas.draw()
if __name__ == "__main__":
settings(plt)
fig = plt.figure()
make_figure(fig, load_data())
helpers.save(fig, name=os.path.join("output", Path(__file__).stem))
| 10,301 | 31.913738 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/data_preprocessing_for_additional_plot.py
|
import os
import pdb
import pickle
import explib.results.cleanup as cleanh
import explib.results.data as datah
import explib.results.plotting as plth
import numpy as np
from explib import config
from tqdm import tqdm
def standard_gridsearch():
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names
batch_sizes = {"small": 0, "medium": 1, "large": 2, "larger": 3, "full": None}
def load_data(problem_slug):
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"model": model, "dataset": dataset}
summary = datah.df_select(df, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"value_at_init": None,
"ylims": None,
"alphas": None,
"best_alpha": None,
"best_alpha_idx": None,
"ys": None,
"ys+": None,
"ys-": None,
"batch_size": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes.keys()
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name, idx in tqdm(batch_sizes.items()):
for opt in optimizers:
if batch_size_name == "full":
if problem_slug == "BRT":
experiment_filter = {"group": "full-batch-training-2"}
else:
experiment_filter = {"group": "full-batch-training"}
epoch_filter = {
"epoch": plth.experiment_settings["full_batch"][
"clip_epoch"
][dataset]
}
else:
exp_settings = plth.experiment_settings["increasing_batch_size"]
experiment_filter = {
**exp_settings["problem_filters"][dataset][idx],
"group": "increasing-batch-size",
}
epoch_filter = {
"epoch": exp_settings["run_filters"][dataset][idx]["epoch"]
}
summary_filter = {
**problem_filter,
**experiment_filter,
**plth.opt_filters[opt],
}
runs_filter = {**epoch_filter}
runs_last_epoch = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, metric_key, ylims
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, "training_loss_runs", ylims_trainingloss
)
medians, mins, maxs, alphas = datah.median_min_max_by(
runs_last_epoch, key="opt.alpha", metric_name=metric_key
)
best_alpha = plth.find_best_stepsize(
runs_last_epoch, by_key="training_loss_runs"
)
best_alpha_idx = np.where(alphas == best_alpha)[0][0]
if batch_size_name == "full":
batch_size = "Full"
else:
batch_size = experiment_filter["batch_size"]
batch_size *= experiment_filter.get("accumulate_steps", 1)
plot_data[metric_type][batch_size_name][opt] = {
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"alphas": alphas,
"best_alpha": best_alpha,
"best_alpha_idx": best_alpha_idx,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"batch_size": batch_size,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["LEN", "RES", "TEC", "TXL", "BRT"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"gridsearch_{slug}.pk")
def normalized_gridsearch():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = [
"NormalizedGD",
"SignDescent",
"RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
"RescaledSignDescent+m",
]
batch_sizes = ["medium", "large", "larger", "full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
summary = df
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"model": model, "dataset": dataset}
summary = datah.df_select(summary, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"value_at_init": None,
"ylims": None,
"alphas": None,
"best_alpha": None,
"best_alpha_idx": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
# if "+m" in opt and problem_slug == "BRT":
# continue
# if batch_size_name == "full" and "Rescaled" in opt:
# continue
if batch_size_name == "full":
exp_set = plth.experiment_settings["norm-ablation-full"]
group = "full-batch-training-normalized-optimizers"
max_epoch = exp_set["max_epoch"][dataset]
clip_epoch = exp_set["clip_epoch"][dataset]
exp_filter = {"group": group, "max_epoch": max_epoch}
else:
exp_set = plth.experiment_settings["norm-ablation"]
exp_set = exp_set[dataset][batch_size_name]
group = "normalization-ablation"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {**epoch_filter}
runs_last_epoch = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, metric_key, ylims
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size
(medians, mins, maxs, alphas,) = datah.median_min_max_by(
runs_last_epoch, key="opt.alpha", metric_name=metric_key
)
try:
best_alpha = plth.find_best_stepsize(
runs_last_epoch, by_key="training_loss_runs"
)
except:
import pdb
pdb.set_trace()
best_alpha_idx = np.where(alphas == best_alpha)[0][0]
plot_data[metric_type][batch_size_name][opt] = {
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"alphas": alphas,
"best_alpha": best_alpha,
"best_alpha_idx": best_alpha_idx,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["LEN", "RES", "TEC", "TXL", "BRT"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"normalized_gridsearch_{slug}.pk")
def best_runs_for_each_batch_normalized_optimizers():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = [
"NormalizedGD",
"SignDescent",
"RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
"RescaledSignDescent+m",
]
batch_sizes = ["medium", "large", "larger", "full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"model": model, "dataset": dataset}
summary = datah.df_select(df, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"runs_wuuid": None,
"value_at_init": None,
"ylims": None,
"best_alpha": None,
"update_count": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
# if "+m" in opt and problem_slug == "BRT":
# continue
# if batch_size_name == "full" and "Rescaled" in opt:
# continue
if batch_size_name == "full":
exp_set = plth.experiment_settings["norm-ablation-full"]
group = "full-batch-training-normalized-optimizers"
max_epoch = exp_set["max_epoch"][dataset]
clip_epoch = exp_set["clip_epoch"][dataset]
exp_filter = {"group": group, "max_epoch": max_epoch}
else:
exp_set = plth.experiment_settings["norm-ablation"]
exp_set = exp_set[dataset][batch_size_name]
group = "normalization-ablation"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {}
runs_ = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Truncate runs to specified number of epochs
runs_ = runs_[runs_["epoch_runs"] <= epoch_filter["epoch"]]
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_ = plth.clip_metric_at(runs_, metric_key, ylims)
runs_ = plth.clip_metric_at(
runs_, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size and select runs
runs_at_lastepoch = runs_[
runs_["epoch_runs"] == epoch_filter["epoch"]
]
best_alpha = plth.find_best_stepsize(
runs_at_lastepoch, by_key="training_loss_runs"
)
runs_ = datah.df_select(runs_, **{"opt.alpha": best_alpha})
medians, mins, maxs, update_count = datah.median_min_max_by(
runs_, key="update_count", metric_name=metric_key
)
(
medians_epoch,
mins_epoch,
maxs_epoch,
epoch,
) = datah.median_min_max_by(
runs_, key="epoch_runs", metric_name=metric_key
)
detailed_info = {}
wuuids = list(runs_["wuuid"].unique())
for wuuid in wuuids:
runs_wuuid = datah.df_select(runs_, wuuid=wuuid)
runs_wuuid = runs_wuuid[
[
"step",
"norm_squared_gradients_runs",
"norm_squared_gradients_l1_runs",
"epoch_training_time_runs",
]
].sort_values("step")
detailed_info[wuuid] = {
"l2norms**2": list(
runs_wuuid["norm_squared_gradients_runs"]
),
"l1norms**2": list(
runs_wuuid["norm_squared_gradients_l1_runs"]
),
"runtimes": list(runs_wuuid["epoch_training_time_runs"]),
}
plot_data[metric_type][batch_size_name][opt] = {
"runs_wuuid": list(runs_["wuuid"].unique()),
"detailled_info": detailed_info,
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"best_alpha": best_alpha,
"update_count": update_count,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"epochs": epoch,
"epochs_ys": medians_epoch,
"epochs_ys+": maxs_epoch,
"epochs_ys-": mins_epoch,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["LEN", "RES", "TEC", "TXL", "BRT"]
# problem_slugs = ["TXL", "BRT"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"normalized_best_{slug}.pk")
def best_runs_for_each_batch_normal_optimizers():
problem_slugs = ["LEN", "RES", "TEC", "TXL", "BRT"]
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names
batch_sizes = {"small": 0, "medium": 1, "large": 2, "larger": 3, "full": None}
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
summary = df
plot_data = {
problem_slug: {
metric_type: {
batch_size: {
opt: {
"runs_wuuid": None,
"value_at_init": None,
"ylims": None,
"best_alpha": None,
"iter": None,
"ys": None,
"ys+": None,
"ys-": None,
"epochs": None,
"epochs_ys": None,
"epochs_ys+": None,
"epochs_ys-": None,
"batch_size": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes.keys()
}
for metric_type in metric_types
}
for problem_slug in problem_slugs
}
datasets = [plth.problems[slug]["dataset"] for slug in problem_slugs]
metrics_per_datasets = {
dataset: [
plth.metric_type_to_dset_to_metric[metric_type][dataset]
for metric_type in metric_types
]
for dataset in datasets
}
ylims_by_dataset_and_metric = {
dataset: {
metric: plth.get_metric_limits_for_dataset(runs, summary, dataset, metric)
for metric in metrics_per_datasets[dataset]
}
for dataset in datasets
}
for problem_slug in tqdm(problem_slugs):
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"model": model, "dataset": dataset}
summary_problem = datah.df_select(summary, **problem_filter)
for metric_type in metric_types:
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
ylims = ylims_by_dataset_and_metric[dataset][metric]
ylims_trainingloss = ylims_by_dataset_and_metric[dataset]["training_loss"]
for batch_size_name, idx in batch_sizes.items():
if batch_size_name == "full":
if problem_slug == "BRT":
experiment_filter = {"group": "full-batch-training-2"}
else:
experiment_filter = {"group": "full-batch-training"}
epoch_filter = {
"epoch": plth.experiment_settings["full_batch"]["clip_epoch"][
dataset
]
}
else:
exp_settings = plth.experiment_settings["increasing_batch_size"]
experiment_filter = {
**exp_settings["problem_filters"][dataset][idx],
"group": "increasing-batch-size",
}
epoch_filter = {
"epoch": exp_settings["run_filters"][dataset][idx]["epoch"]
}
summary_problem_bs = datah.df_select(
summary_problem, **experiment_filter
)
for opt in optimizers:
runs_ = cleanh.filter_merge(
summary_problem_bs, runs, plth.opt_filters[opt], {}
)
# %%
# Truncate runs to specified number of epochs
runs_ = runs_[runs_["epoch_runs"] <= epoch_filter["epoch"]]
# %%
# Find ylimits and truncate metric to worst-case if nan
runs_ = plth.clip_metric_at(runs_, metric_key, ylims)
runs_ = plth.clip_metric_at(
runs_, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size and select runs
runs_at_lastepoch = runs_[
runs_["epoch_runs"] == epoch_filter["epoch"]
]
best_alpha = plth.find_best_stepsize(
runs_at_lastepoch, by_key="training_loss_runs"
)
runs_ = datah.df_select(runs_, **{"opt.alpha": best_alpha})
medians, mins, maxs, update_count = datah.median_min_max_by(
runs_, key="update_count", metric_name=metric_key
)
(
medians_epoch,
mins_epoch,
maxs_epoch,
epoch,
) = datah.median_min_max_by(
runs_, key="epoch_runs", metric_name=metric_key
)
if batch_size_name == "full":
batch_size = "Full"
else:
batch_size = experiment_filter["batch_size"]
batch_size *= experiment_filter.get("accumulate_steps", 1)
try:
detailed_info = {}
wuuids = list(runs_["wuuid"].unique())
for wuuid in wuuids:
runs_wuuid = datah.df_select(runs_, wuuid=wuuid)
subset = runs_wuuid[
[
"step",
"norm_squared_gradients_runs",
"norm_squared_gradients_l1_runs",
"epoch_training_time_runs",
]
].sort_values("step")
detailed_info[wuuid] = {
"l2norms**2": list(
subset["norm_squared_gradients_runs"]
),
"l1norms**2": list(
subset["norm_squared_gradients_l1_runs"]
),
"runtimes": list(subset["epoch_training_time_runs"]),
}
except:
import pdb
pdb.set_trace()
plot_data[problem_slug][metric_type][batch_size_name][opt] = {
"runs_wuuid": list(runs_["wuuid"].unique()),
"detailled_info": detailed_info,
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"best_alpha": best_alpha,
"iter": update_count,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"epochs": epoch,
"epochs_ys": medians_epoch,
"epochs_ys+": maxs_epoch,
"epochs_ys-": mins_epoch,
"batch_size": batch_size,
"max_epoch": epoch_filter.get("epoch"),
}
plth.save_preprocessed(plot_data, "best_runs_for_each_batch_normal_optimizers.pk")
def gs_nodroupout():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names + [
"NormalizedGD",
"SignDescent",
"RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
"RescaledSignDescent+m",
]
batch_sizes = ["full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
summary = df
dataset = plth.problems[problem_slug]["dataset"]
problem_filter = {"dataset": dataset}
summary = datah.df_select(summary, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"value_at_init": None,
"ylims": None,
"alphas": None,
"best_alpha": None,
"best_alpha_idx": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
exp_set = plth.experiment_settings["no-dropout"]
exp_set = exp_set[dataset][batch_size_name]
group = "no-dropout"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {**epoch_filter}
runs_last_epoch = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, metric_key, ylims
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size
(medians, mins, maxs, alphas,) = datah.median_min_max_by(
runs_last_epoch, key="opt.alpha", metric_name=metric_key
)
try:
best_alpha = plth.find_best_stepsize(
runs_last_epoch, by_key="training_loss_runs"
)
except:
import pdb
pdb.set_trace()
best_alpha_idx = np.where(alphas == best_alpha)[0][0]
plot_data[metric_type][batch_size_name][opt] = {
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"alphas": alphas,
"best_alpha": best_alpha,
"best_alpha_idx": best_alpha_idx,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["TEC", "TXL"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"gs_nodropout_{slug}.pk")
def best_runs_nodropout():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names + [
"NormalizedGD",
"SignDescent",
"RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
"RescaledSignDescent+m",
]
batch_sizes = ["full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"dataset": dataset}
summary = datah.df_select(df, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"runs_wuuid": None,
"value_at_init": None,
"ylims": None,
"best_alpha": None,
"update_count": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
exp_set = plth.experiment_settings["no-dropout"]
exp_set = exp_set[dataset][batch_size_name]
group = "no-dropout"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {}
runs_ = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Truncate runs to specified number of epochs
runs_ = runs_[runs_["epoch_runs"] <= epoch_filter["epoch"]]
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_ = plth.clip_metric_at(runs_, metric_key, ylims)
runs_ = plth.clip_metric_at(
runs_, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size and select runs
runs_at_lastepoch = runs_[
runs_["epoch_runs"] == epoch_filter["epoch"]
]
best_alpha = plth.find_best_stepsize(
runs_at_lastepoch, by_key="training_loss_runs"
)
runs_ = datah.df_select(runs_, **{"opt.alpha": best_alpha})
medians, mins, maxs, update_count = datah.median_min_max_by(
runs_, key="update_count", metric_name=metric_key
)
(
medians_epoch,
mins_epoch,
maxs_epoch,
epoch,
) = datah.median_min_max_by(
runs_, key="epoch_runs", metric_name=metric_key
)
detailed_info = {}
wuuids = list(runs_["wuuid"].unique())
for wuuid in wuuids:
runs_wuuid = datah.df_select(runs_, wuuid=wuuid)
runs_wuuid = runs_wuuid[
[
"step",
"norm_squared_gradients_runs",
"norm_squared_gradients_l1_runs",
"epoch_training_time_runs",
]
].sort_values("step")
detailed_info[wuuid] = {
"l2norms**2": list(
runs_wuuid["norm_squared_gradients_runs"]
),
"l1norms**2": list(
runs_wuuid["norm_squared_gradients_l1_runs"]
),
"runtimes": list(runs_wuuid["epoch_training_time_runs"]),
}
plot_data[metric_type][batch_size_name][opt] = {
"runs_wuuid": list(runs_["wuuid"].unique()),
"detailled_info": detailed_info,
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"best_alpha": best_alpha,
"update_count": update_count,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"epochs": epoch,
"epochs_ys": medians_epoch,
"epochs_ys+": maxs_epoch,
"epochs_ys-": mins_epoch,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["TEC", "TXL"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"nodropout_best_{slug}.pk")
def gs_fullbatch_squad_fix():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names + [
"NormalizedGD",
"SignDescent",
# "RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
# "RescaledSignDescent+m",
]
batch_sizes = ["full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
summary = df
dataset = plth.problems[problem_slug]["dataset"]
problem_filter = {"dataset": dataset}
summary = datah.df_select(summary, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"value_at_init": None,
"ylims": None,
"alphas": None,
"best_alpha": None,
"best_alpha_idx": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
exp_set = plth.experiment_settings["fix-full-batch-training-squad"]
exp_set = exp_set[dataset][batch_size_name]
group = "fix-full-batch-training-squad"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {**epoch_filter}
runs_last_epoch = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, metric_key, ylims
)
runs_last_epoch = plth.clip_metric_at(
runs_last_epoch, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size
(medians, mins, maxs, alphas,) = datah.median_min_max_by(
runs_last_epoch, key="opt.alpha", metric_name=metric_key
)
try:
best_alpha = plth.find_best_stepsize(
runs_last_epoch, by_key="training_loss_runs"
)
except:
import pdb
pdb.set_trace()
best_alpha_idx = np.where(alphas == best_alpha)[0][0]
plot_data[metric_type][batch_size_name][opt] = {
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"alphas": alphas,
"best_alpha": best_alpha,
"best_alpha_idx": best_alpha_idx,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["BRT"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"gs_squadfix_{slug}.pk")
def best_runs_fullbatch_squad_fix():
metric_types = ["training_loss", "training_perf", "validation_perf"]
optimizers = plth.opt_names + [
"NormalizedGD",
"SignDescent",
# "RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
# "RescaledSignDescent+m",
]
batch_sizes = ["full"]
df, runs = datah.get_summary(), datah.get_all_runs()
df, runs = cleanh.clean_data(df, runs)
def load_data(problem_slug):
dataset = plth.problems[problem_slug]["dataset"]
model = plth.problems[problem_slug]["model"]
problem_filter = {"dataset": dataset}
summary = datah.df_select(df, **problem_filter)
plot_data = {
metric_type: {
batch_size: {
opt: {
"runs_wuuid": None,
"value_at_init": None,
"ylims": None,
"best_alpha": None,
"update_count": None,
"ys": None,
"ys+": None,
"ys-": None,
"max_epoch": None,
"metric": None,
}
for opt in optimizers
}
for batch_size in batch_sizes
}
for metric_type in metric_types
}
for metric_type in tqdm(metric_types):
metric = plth.metric_type_to_dset_to_metric[metric_type][dataset]
metric_key = f"{metric}_runs"
metric_value_at_init = plth.get_metric_at_start_for_dataset(
runs, summary, dataset, metric
)
for batch_size_name in tqdm(batch_sizes):
for opt in optimizers:
exp_set = plth.experiment_settings["fix-full-batch-training-squad"]
exp_set = exp_set[dataset][batch_size_name]
group = "fix-full-batch-training-squad"
max_epoch = exp_set["max_epoch"]
clip_epoch = exp_set["clip_epoch"]
batch_size = exp_set["batch_size"]
exp_filter = {
"group": group,
"max_epoch": max_epoch,
"batch_size": batch_size,
}
if "accumulate_steps" in exp_set:
exp_filter["accumulate_steps"] = exp_set["accumulate_steps"]
epoch_filter = {"epoch": clip_epoch}
summary_filter = {
**problem_filter,
**exp_filter,
**plth.opt_filters[opt],
}
runs_filter = {}
runs_ = cleanh.filter_merge(
summary, runs, summary_filter, runs_filter
)
# %%
# Truncate runs to specified number of epochs
runs_ = runs_[runs_["epoch_runs"] <= epoch_filter["epoch"]]
# %%
# Find ylimits and truncate metric to worst-case if nan
ylims = plth.get_metric_limits_for_dataset(
runs, summary, dataset, metric
)
ylims_trainingloss = plth.get_metric_limits_for_dataset(
runs, summary, dataset, "training_loss"
)
runs_ = plth.clip_metric_at(runs_, metric_key, ylims)
runs_ = plth.clip_metric_at(
runs_, "training_loss_runs", ylims_trainingloss
)
# %%
# Find best step-size and select runs
runs_at_lastepoch = runs_[
runs_["epoch_runs"] == epoch_filter["epoch"]
]
try:
best_alpha = plth.find_best_stepsize(
runs_at_lastepoch, by_key="training_loss_runs"
)
runs_ = datah.df_select(runs_, **{"opt.alpha": best_alpha})
except:
import pdb
pdb.set_trace()
medians, mins, maxs, update_count = datah.median_min_max_by(
runs_, key="update_count", metric_name=metric_key
)
(
medians_epoch,
mins_epoch,
maxs_epoch,
epoch,
) = datah.median_min_max_by(
runs_, key="epoch_runs", metric_name=metric_key
)
detailed_info = {}
wuuids = list(runs_["wuuid"].unique())
for wuuid in wuuids:
runs_wuuid = datah.df_select(runs_, wuuid=wuuid)
runs_wuuid = runs_wuuid[
[
"step",
"norm_squared_gradients_runs",
"norm_squared_gradients_l1_runs",
"epoch_training_time_runs",
]
].sort_values("step")
detailed_info[wuuid] = {
"l2norms**2": list(
runs_wuuid["norm_squared_gradients_runs"]
),
"l1norms**2": list(
runs_wuuid["norm_squared_gradients_l1_runs"]
),
"runtimes": list(runs_wuuid["epoch_training_time_runs"]),
}
plot_data[metric_type][batch_size_name][opt] = {
"runs_wuuid": list(runs_["wuuid"].unique()),
"detailled_info": detailed_info,
"metric": metric,
"value_at_init": metric_value_at_init,
"ylims": ylims,
"best_alpha": best_alpha,
"update_count": update_count,
"ys": medians,
"ys+": maxs,
"ys-": mins,
"epochs": epoch,
"epochs_ys": medians_epoch,
"epochs_ys+": maxs_epoch,
"epochs_ys-": mins_epoch,
"max_epoch": epoch_filter.get("epoch"),
}
return plot_data
problem_slugs = ["BRT"]
for slug in problem_slugs:
plth.save_preprocessed(load_data(slug), f"squadfix_best_{slug}.pk")
if __name__ == "__main__":
best_runs_fullbatch_squad_fix()
| 50,314 | 37.973664 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/full_performance_for_each_batch_size.py
|
"""Attempt at a figure that would show."""
import cmd
import importlib
import os
from pathlib import Path
import explib.results.cleanup as cleanh
import explib.results.data as data_h
import explib.results.data as datah
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from explib.results.cleanup import clean_data
from tqdm import tqdm
def load_data():
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_end()
return {"plot_data": best_runs}
def settings(plt):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
plt.rcParams.update(
plth.iclr_config_2(nrows=2, ncols=5, height_to_width_ratio=1 / 1.2)
)
def make_figure(fig, data, with_image=True, opts_to_plot="standard"):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
importlib.reload(expdef)
if opts_to_plot == "standard":
opts1 = [expdef.SGD_NM, expdef.ADAM_NM]
opts2 = [expdef.SGD_M, expdef.ADAM_M]
elif opts_to_plot == "normalized":
opts1 = [expdef.SIGN_NM, expdef.NORM_NM]
opts2 = [expdef.SIGN_M, expdef.NORM_M]
else:
raise ValueError(f"Unknown opts {opts_to_plot}")
dss = expdef.ALL_DS if with_image else [expdef.PTB, expdef.WT2, expdef.SQUAD]
grid_type = "2x2-3" if with_image else "2x3"
plot_data = data["plot_data"]
axes = plth.make_grid_iclr(fig, grid_type=grid_type, tight=True)
def make_plottype(ax, plot_data, ds, bs, opts):
res_ds_bs = data_h.new_select(plot_data, selections=expdef.EXPERIMENTS[ds][bs])
res_ds_bs = res_ds_bs[res_ds_bs["epoch"].notna()]
res_ds_bs = res_ds_bs[res_ds_bs["epoch"] <= expdef.EPOCH_CLIP[ds][bs]]
res_ds_bs["iter"] = res_ds_bs["epoch"] * res_ds_bs["grad_updates_per_epoch"] + 1
for opt in opts:
res_opt = data_h.new_select(res_ds_bs, selections=[expdef.OPTIMS[opt]])
agg = res_opt.groupby("iter")["training_loss"].agg([min, max, "median"])
n_samples = 50
ax.plot(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["median"], n_samples),
**plth.linestyles_nm[opt],
alpha=0.4,
)
fillstyle = plth.fillstyles[opt]
fillstyle["alpha"] = 0.1
ax.fill_between(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["min"], n_samples),
plth.subsample(agg["max"], n_samples),
**fillstyle,
)
linestyle_ = plth.linestyles[opt]
markersize = {
bs: x
for (bs, x) in zip(
expdef.ALL_BS, np.sqrt(np.linspace(1**2, 4.0**2, 5))
)
}
linestyle_["markersize"] = markersize[bs]
linestyle_["marker"] = "v"
ax.plot(
list(agg.index)[-1],
list(agg["median"])[-1],
**linestyle_,
)
for line in [0, 1]:
for j, ds in enumerate(dss):
ax = axes[line][j]
for idx, bs in enumerate(expdef.ALL_BS):
if line == 0:
make_plottype(ax, plot_data, ds, bs, opts=opts1)
else:
make_plottype(ax, plot_data, ds, bs, opts=opts2)
ax.set_xscale("log", base=10)
ax.set_xlim([1, ax.get_xlim()[1] * 2])
ylims_trainingloss = {
expdef.MNIST: [10**-6, 10**1],
expdef.CIFAR10: [10**-7, 10**1.5],
expdef.PTB: [1.7, 10],
expdef.WT2: [10**-1.0, 10**1.3],
expdef.SQUAD: [10**-1, 10**1.0],
}
ax.set_ylim(ylims_trainingloss[ds])
ax.set_yscale("log")
if ds == expdef.PTB:
ax.set_yticks([2, 4, 8], minor=False)
ax.set_yticklabels([2, 4, 8], minor=False)
ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9, 10], minor=True)
ax.set_yticklabels([], minor=True)
xticks_and_lims = {
expdef.MNIST: {"lim": [10**1, 10**4.5], "ticks": [1, 2, 3, 4, 5]},
expdef.CIFAR10: {"lim": [10**1, 10**5], "ticks": [1, 2, 3, 4, 5]},
expdef.PTB: {"lim": [10**1, 10**6], "ticks": [1, 2, 3, 4, 5, 6]},
expdef.WT2: {"lim": [10**1, 10**5], "ticks": [1, 2, 3, 4, 5]},
expdef.SQUAD: {"lim": [10**0.5, 10**5], "ticks": [1, 2, 3, 4, 5]},
}
for i, ds in enumerate(dss):
ticks = [10**i for i in xticks_and_lims[ds]["ticks"]]
axes[0][i].set_xlim(xticks_and_lims[ds]["lim"])
axes[1][i].set_xlim(xticks_and_lims[ds]["lim"])
axes[0][i].set_xticks(ticks, minor=False)
plth.make_xticks_pow10(axes[1][i], ticks)
for ax, ds in zip(axes[0], dss):
ax.set_title(
f"{plth.fdisplaynames(ds)}",
y=1.0,
pad=-0.0,
)
for ax in axes[1]:
ax.set_xlabel("Iteration", labelpad=-7)
axes[0][0].set_ylabel("Training loss")
axes[1][0].set_ylabel("Training loss")
if __name__ == "__main__":
settings(plt)
data = load_data()
for opts_to_plot in ["standard", "normalized"]:
for with_image in [True, False]:
fig = plt.figure()
make_figure(fig, data, with_image=with_image, opts_to_plot=opts_to_plot)
filename = Path(__file__).stem
if opts_to_plot == "normalized":
filename += "_norm"
if with_image:
filename += "_with_image"
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
| 5,946 | 31.856354 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/grid_search_best_runs.py
|
import importlib
import os
from pathlib import Path
import explib.results.cleanup as cleanh
import explib.results.data as data_h
import explib.results.data as datah
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import ticker
def load_data():
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_end()
return {"best_runs": best_runs, "last_epoch": runs_at_last_epoch}
def settings(plt):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
plt.rcParams.update(plth.iclr_config_2(nrows=3, ncols=5, height_to_width_ratio=1))
def make_figure(fig, data, dataset=expdef.WT2, opts_to_plot="normalized"):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
importlib.reload(expdef)
fig.set_dpi(250)
best_runs = data["best_runs"]
if opts_to_plot == "standard":
opts = [expdef.SGD_NM, expdef.ADAM_NM, expdef.SGD_M, expdef.ADAM_M]
elif opts_to_plot == "normalized":
opts = [expdef.SIGN_NM, expdef.NORM_NM, expdef.SIGN_M, expdef.NORM_M]
else:
raise ValueError(f"Unknown opts {opts_to_plot}")
bss = expdef.ALL_BS
grid_type = "3x5"
metrics_for_ds = [
plth.metric_type_to_dset_to_metric[metric][dataset]
for metric in ["training_loss", "training_perf", "validation_perf"]
]
axes = plth.make_grid_iclr(fig, grid_type=grid_type, tight=True)
for i, bs in enumerate(bss):
best_runs_bs = data_h.new_select(best_runs, expdef.EXPERIMENTS[dataset][bs])
best_runs_bs = best_runs_bs[best_runs_bs["epoch"].notna()]
best_runs_bs = best_runs_bs[
best_runs_bs["epoch"] <= expdef.EPOCH_CLIP[dataset][bs]
]
for opt in opts:
best_runs_ = data_h.new_select(best_runs_bs, [expdef.OPTIMS[opt]])
agg = best_runs_.groupby("epoch")[metrics_for_ds].agg([min, max, "median"])
for j, metric in enumerate(metrics_for_ds):
ax = axes[j][i]
n_subsample = 100
linestyle = plth.linestyles_nm[opt]
ax.plot(
plth.subsample(agg.index, n=n_subsample),
plth.subsample(agg[metric]["median"], n=n_subsample),
**linestyle,
)
ax.fill_between(
plth.subsample(agg.index, n=n_subsample),
plth.subsample(agg[metric]["min"], n=n_subsample),
plth.subsample(agg[metric]["max"], n=n_subsample),
**plth.fillstyles[opt],
)
ylims_trainingloss = {
expdef.MNIST: {
"lims": [10**-6, 10**1],
"ticks": [-5, -3, -1],
},
expdef.CIFAR10: {
"lims": [10**-7, 10**2],
"ticks": [-6, -3, 0],
},
expdef.PTB: {"lims": [1.0, 12], "ticks": [2, 4, 6]},
expdef.WT2: {"lims": [10**-1, 10**1.5], "ticks": [-1, 0, 1.0]},
expdef.SQUAD: {"lims": [10**-1.5, 10**1.0], "ticks": [-1, 0]},
}
ylims_PPL = {
expdef.PTB: {
"lims": {"train": [10**0, 10**4.5], "valid": [10**1, 10**4.5]}
},
expdef.WT2: {
"lims": {"train": [10**-1, 10**5], "valid": [10**1, 10**5]}
},
}
def get_ylims(metric, dataset):
if "accuracy" in metric or "f1" in metric:
return [0, 105]
elif metric == "training_loss":
return ylims_trainingloss[dataset]["lims"]
elif metric == "train_ppl":
return ylims_PPL[dataset]["lims"]["train"]
elif metric == "valid_ppl":
return ylims_PPL[dataset]["lims"]["valid"]
else:
print(metric)
raise ValueError
for i, bs in enumerate(bss):
for j, metric in enumerate(metrics_for_ds):
ax = axes[j][i]
ax.set_ylim(get_ylims(metric, dataset))
if plth.should_log(metric):
ax.set_yscale("log", base=10)
ax.tick_params(
axis="both", which="major", labelsize=plth.fontsizes["tiny"], pad=0
)
ax.yaxis.set_major_locator(ticker.LogLocator(numticks=5))
ax.yaxis.set_minor_locator(ticker.LogLocator(numticks=5))
else:
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=50))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(base=50))
for ax in axes[-1]:
ax.set_xlabel("Epoch", fontsize=plth.fontsizes["small"])
for bs, ax in zip(bss, axes[0]):
ax.set_title(plth.fdisplaynames(bs))
for j, metric in enumerate(metrics_for_ds):
axes[j][0].set_ylabel(plth.fdisplaynames(metric))
if __name__ == "__main__":
settings(plt)
data = load_data()
for dataset in expdef.ALL_DS:
for opt_to_plot in ["standard", "normalized"]:
fig = plt.figure()
make_figure(fig, data, dataset=dataset, opts_to_plot=opt_to_plot)
filename = Path(__file__).stem + f"_{dataset}" + f"_{opt_to_plot}"
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
| 5,424 | 33.775641 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/legends.py
|
"""Attempt at a figure that would show."""
import importlib
import os
from pathlib import Path
import explib.results.cleanup as cleanh
import explib.results.data as datah
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
def load_data():
return {}
def settings(plt):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
plt.rcParams.update(
plth.icml_config(nrows=1, ncols=1, height_to_width_ratio=1 / 50)
)
def make_figure(fig, data, normalized=True, with_bs=True):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
if normalized:
optims = [expdef.SIGN_M, expdef.NORM_M, expdef.SIGN_NM, expdef.NORM_NM]
else:
optims = [expdef.SGD_M, expdef.ADAM_M, expdef.SGD_NM, expdef.ADAM_NM]
lines = []
for opt in optims:
linestyle = plth.linestyles_nm[opt].copy()
linestyle["linewidth"] = 1.75
if "+m" not in opt:
linestyle["dashes"] = (2.0, 2.0)
label = plth.abbrev(opt)
lines.append(matplotlib.lines.Line2D([], [], **linestyle, label=label))
if with_bs:
leg = fig.legend(
handles=lines,
loc="center left",
ncol=len(optims),
frameon=False,
borderpad=1,
fontsize="small",
handletextpad=0.5,
handlelength=1.5,
columnspacing=1.25,
)
fig.add_artist(leg)
else:
leg = fig.legend(
handles=lines,
loc="center",
ncol=len(optims),
frameon=False,
borderpad=0,
fontsize="small",
handletextpad=0.5,
handlelength=2.0,
columnspacing=2.0,
)
fig.add_artist(leg)
if with_bs:
markersize = {
bs: x
for (bs, x) in zip(expdef.ALL_BS, np.sqrt(np.linspace(1**2, 4.0**2, 5)))
}
lines = []
for bs in reversed(expdef.ALL_BS):
linestyle = {
"linestyle": "",
"marker": "v",
"color": "grey",
"markersize": markersize[bs],
}
lines.append(matplotlib.lines.Line2D([], [], **linestyle, label=bs))
leg = fig.legend(
handles=lines,
loc="center right",
ncol=len(expdef.ALL_BS),
frameon=False,
borderpad=1,
handletextpad=-0.3,
columnspacing=0.6,
)
fig.add_artist(leg)
if __name__ == "__main__":
settings(plt)
data = load_data()
for normalized in [True, False]:
for with_bs in [True, False]:
fig = plt.figure()
make_figure(fig, data, normalized=normalized, with_bs=with_bs)
filename = Path(__file__).stem
filename += "_normalized" if normalized else "_standard"
filename += "_with_bs" if with_bs else ""
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
| 3,186 | 25.781513 | 84 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/convergence_full_batch.py
|
import importlib
import os
from pathlib import Path
import explib.results.data as data_h
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib
import matplotlib.pyplot as plt
from explib.results import data_caching
def load_data():
runs_at_last_epoch, best_runs = data_caching.gridsearch_all_end()
return {"optims": best_runs}
def settings(plt):
importlib.reload(plth)
plt.rcParams.update(
plth.iclr_config_2(
rel_width=1.0, nrows=1, ncols=5, height_to_width_ratio=1 / 1.0
)
)
def make_figure(fig, data):
importlib.reload(plth)
data_optims = data["optims"]
axes = plth.make_grid_iclr(fig, grid_type="2-3")
for i, ds in enumerate(expdef.ALL_DS):
ax = axes[0][i]
res_ds = data_h.new_select(
data_optims, selections=expdef.EXPERIMENTS[ds][expdef.FULL]
)
for opt in expdef.STANDARD_OPT:
res_ds_opt = data_h.new_select(res_ds, selections=[expdef.OPTIMS[opt]])
res_ds_opt = res_ds_opt[res_ds_opt["epoch"].notna()]
agg = res_ds_opt.groupby("step")[["training_loss", "epoch"]].agg(
[min, max, "median"]
)
epochs_min, epochs_max = min(agg[("epoch", "median")]), max(
agg[("epoch", "median")]
)
n_points = 50
ax.plot(
plth.subsample(agg[("epoch", "median")], n_points),
plth.subsample(agg[("training_loss", "median")], n_points),
**plth.linestyles_nm[opt],
)
ax.fill_between(
plth.subsample(agg[("epoch", "median")], n_points),
plth.subsample(agg[("training_loss", "min")], n_points),
plth.subsample(agg[("training_loss", "max")], n_points),
**plth.fillstyles[opt],
)
ax.set_yscale("log")
if ds == expdef.MNIST:
ax.set_ylim([10**-6, 10**1])
if ds == expdef.CIFAR10:
ax.set_ylim([10**-5, 10**2])
if ds == expdef.PTB:
ax.set_ylim([1.7, 7])
ax.set_yticks([], minor=False)
ax.set_yticks([2, 3, 4, 5, 6, 7, 8, 9], minor=True)
ax.set_yticklabels([2, "", 4, "", 6, "", 8, ""], minor=True)
if ds == expdef.WT2:
ax.set_ylim([10**-1, 10**1.5])
if ds == expdef.SQUAD:
ax.set_ylim([10**-1, 10**1])
ax.set_title(plth.fdisplaynames(ds))
ax.set_xticks([epochs_min, epochs_max / 2, epochs_max])
ax.set_xticklabels([0, "", int(epochs_max)])
ax.set_xlabel("Epoch", labelpad=-5)
make_legend = False
if make_legend:
legsettings = {
"frameon": False,
"borderaxespad": -0.1,
"labelspacing": 0.1,
"handlelength": 1.8,
"handletextpad": 0.3,
"fontsize": "x-small",
"markerfirst": False,
}
lines = axes[0][0].lines
axes[0][3].legend(
[lines[1], lines[3]],
[plth.fdisplaynames("adam-m"), plth.fdisplaynames("sgd-m")],
**legsettings,
loc="best",
)
axes[0][0].legend(
[lines[0], lines[2]],
[plth.fdisplaynames("sgd+m"), plth.fdisplaynames("adam+m")],
**legsettings,
loc="best",
)
axes[0][0].set_ylabel("Training loss")
fig.canvas.draw()
if __name__ == "__main__":
settings(plt)
fig = plt.figure()
make_figure(fig, load_data())
plth.save(fig, name=os.path.join("output", Path(__file__).stem))
| 3,655 | 29.466667 | 83 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/perf_vs_batchsize_at_comparable_iter.py
|
import cmd
import os
from pathlib import Path
import explib.results.data as data_h
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from explib.results.cleanup import clean_data
def load_data():
import importlib
importlib.reload(data_cache)
importlib.reload(data_h)
importlib.reload(expdef)
importlib.reload(data_cache)
# runs_at_last_epoch, best_runs = data_cache.gridsearch_all_start()
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_start_soft_increase()
data = [runs_at_last_epoch, best_runs]
for i in range(len(data)):
data[i] = data_h.add_stop_at_info(data[i], stop_at=expdef.EPOCH_CLIP_START_NEW)
return data
def postprocess(data):
return data
def settings(plt):
plt.rcParams.update(
plth.iclr_config_2(nrows=1, ncols=5, height_to_width_ratio=1 / 1.0)
)
pass
def make_figure(fig, data, opts_to_plot="normalized"):
import importlib
importlib.reload(plth)
dss = [expdef.MNIST, expdef.CIFAR10, expdef.PTB, expdef.WT2, expdef.SQUAD]
axes = plth.make_grid_iclr(fig, grid_type="2-3")
best_runs = data[1]
best_runs_stoped_epoch = best_runs[best_runs["epoch"] == best_runs["epoch_to_stop"]]
if opts_to_plot == "standard":
opts = expdef.STANDARD_OPT
elif opts_to_plot == "normalized":
opts = expdef.NORMALIZED_OPT
else:
raise ValueError(f"Opts to plot undef. Got {opts_to_plot}")
YLIMS = {ds: (plth.MIN_LOSSES[ds], plth.INIT_LOSSES[ds]) for ds in expdef.ALL_DS}
YLIMS.update(
{
# TODO
}
)
for i, ds in enumerate(dss):
res_ds = data_h.new_select(best_runs_stoped_epoch, selections=[{"dataset": ds}])
for opt in opts:
res_ds_bs_opt = data_h.new_select(res_ds, selections=[expdef.OPTIMS[opt]])
res_ds_bs_opt = res_ds_bs_opt[res_ds_bs_opt["epoch"].notna()]
agg = res_ds_bs_opt.groupby("eff_bs")["training_loss"].agg(
[min, max, "median"]
)
bss, medians, mins, maxs = agg.index, agg["median"], agg["min"], agg["max"]
# if ds == expdef.WT2:
# bss = bss[1:]
# elif ds == expdef.SQUAD:
# bss = bss[2:]
# medians, mins, maxs = medians[bss], mins[bss], maxs[bss]
axes[0][i].plot(bss, medians, **plth.linestyles[opt])
axes[0][i].fill_between(bss, mins, maxs, **plth.fillstyles[opt])
for i, ds in enumerate(dss):
ax = axes[0][i]
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_ylim(plth.compute_limits(*plth.get_min_max(ax, axis="y"), margin_p=0.1))
ylims_trainingloss = {
expdef.MNIST: {
"lims": [10**-5, 10**-0.5],
"ticks": [-5, -3, -1],
},
expdef.CIFAR10: {
"lims": [10**-6, 10**1],
"ticks": [-6, -3, 0],
},
expdef.PTB: {"lims": [1.5, 6], "ticks": [2, 4, 6]},
expdef.WT2: {"lims": [10**-1, 10**1.0], "ticks": [-1, 0, 1.0]},
expdef.SQUAD: {"lims": [10**-1.25, 10**0.5], "ticks": [-1, 0]},
}
ax.set_ylim(ylims_trainingloss[ds]["lims"])
if ds == expdef.PTB:
ax.set_yticks(ylims_trainingloss[ds]["ticks"])
ax.set_yticklabels(ylims_trainingloss[ds]["ticks"])
ax.set_yticklabels([], minor=True)
else:
ax.set_yticks([10**i for i in ylims_trainingloss[ds]["ticks"]])
# ax.set_yticks([], minor=True)
axes[0][0].set_ylabel("Training loss \n at comparable iter")
for i, ds in enumerate(dss):
ax = axes[0][i]
ax.set_title(plth.fdisplaynames(ds))
xmin, xmax = plth.get_min_max(axes[0][i], axis="x")
ax.set_xlim([xmin / 2, xmax * 2])
xlabel = "Batch size"
if ds == expdef.MNIST:
plth.make_xticks_pow10(ax, [10**2, 10**3, 10**4, 10**5])
xlabel = "" + xlabel + ""
if ds == expdef.CIFAR10:
plth.make_xticks_pow10(ax, [10**2, 10**3, 10**4, 10**5])
xlabel = " " + xlabel + ""
if ds == expdef.PTB:
plth.make_xticks_pow10(ax, [10**1, 10**2, 10**3, 10**4, 10**5])
xlabel = "" + xlabel + ""
if ds == expdef.WT2:
plth.make_xticks_pow10(ax, [10**1, 10**2, 10**3, 10**4])
xlabel = "" + xlabel + " "
if ds == expdef.SQUAD:
plth.make_xticks_pow10(ax, [10**1, 10**2, 10**3, 10**4, 10**5])
xlabel = "" + xlabel + ""
ax.set_xlabel(xlabel, labelpad=-4, fontsize="x-small")
if __name__ == "__main__":
settings(plt)
data = postprocess(load_data())
fig = plt.figure()
make_figure(fig, data, opts_to_plot="standard")
filename = Path(__file__).stem
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
fig = plt.figure()
make_figure(fig, data, opts_to_plot="normalized")
filename = Path(__file__).stem
plth.save(fig, name=os.path.join("output", filename + "_norm"))
plt.close(fig)
| 5,251 | 33.552632 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/table_of_comparable_iter.py
|
import cmd
import explib.results.data as data_h
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from explib.results.cleanup import clean_data
def load_data():
import importlib
importlib.reload(data_cache)
importlib.reload(data_h)
importlib.reload(expdef)
do_end = False
importlib.reload(data_cache)
if do_end:
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_end()
else:
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_start_soft_increase()
data = [runs_at_last_epoch, best_runs]
for i in range(len(data)):
data[i] = data_h.add_stop_at_info(data[i], stop_at=expdef.EPOCH_CLIP_START_NEW)
return data
def postprocess(data):
return data
def settings(plt):
plt.rcParams.update(
plth.smaller_config(
nrows=1,
ncols=1,
height_to_width_ratio=1 / 1,
rel_width=0.1,
),
)
pass
def make_figure(fig, data):
def make_table_of_iterations():
subset = data[0][
[
"dataset",
"max_epoch",
"grad_updates_per_epoch",
"epoch_to_stop",
"eff_bs",
]
]
subset = subset.drop_duplicates()
subset = subset.sort_values(by=["dataset", "eff_bs"])
subset["updates_at_end"] = (
subset["grad_updates_per_epoch"] * subset["epoch_to_stop"]
)
print(data)
def makeline(items):
return " & ".join([f"{x:<30}" for x in items]) + r" \\"
print(r"\begin{tabular}{lrllll}")
print(r"\toprule")
print(
makeline(
[
"Dataset",
r"\multicolumn{2}{l}{Batch size}",
r"\# Epochs",
r"\# Iterations",
]
)
)
print(r"\midrule")
for ds in expdef.ALL_DS:
for i, bs in enumerate(expdef.ALL_BS):
data_ = data_h.new_select(
subset,
selections=[{"dataset": ds, "eff_bs": expdef.EFF_BS[ds][bs]}],
).to_dict("records")[0]
if i == 0:
print(
makeline(
[
data_["dataset"],
f"{bs:>10} & {data_['eff_bs']:<10}",
data_["epoch_to_stop"],
data_["updates_at_end"],
]
)
)
else:
print(
makeline(
[
"",
f"{bs:>10} & {data_['eff_bs']:<10}",
data_["epoch_to_stop"],
data_["updates_at_end"],
]
)
)
print(r"\bottomrule")
print(r"\end{tabular}")
make_table_of_iterations()
if __name__ == "__main__":
make_figure(plt.figure(), load_data())
plt.show()
| 3,385 | 26.088 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/grid_search.py
|
import importlib
import os
from pathlib import Path
import explib.results.cleanup as cleanh
import explib.results.data as data_h
import explib.results.data as datah
import explib.results.data_caching as data_cache
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import ticker
def load_data():
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
runs_at_last_epoch, best_runs = data_cache.gridsearch_all_end()
data = data_cache.load_filtered_data()
return {"data": data, "best_runs": best_runs, "last_epoch": runs_at_last_epoch}
def settings(plt):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
plt.rcParams.update(plth.iclr_config_2(nrows=3, ncols=5, height_to_width_ratio=1))
def make_figure(fig, data, dataset=expdef.SQUAD, opts_to_plot="normalized"):
importlib.reload(datah)
importlib.reload(cleanh)
importlib.reload(plth)
importlib.reload(expdef)
last_epoch = data["last_epoch"]
best_runs = data["best_runs"]
all_data = data["data"]
epoch_filter = [{"epoch": 0}]
all_data_at_start = data_h.new_select(all_data, epoch_filter)
if opts_to_plot == "standard":
opts = [expdef.SGD_NM, expdef.ADAM_NM, expdef.SGD_M, expdef.ADAM_M]
elif opts_to_plot == "normalized":
opts = [expdef.SIGN_NM, expdef.NORM_NM, expdef.SIGN_M, expdef.NORM_M]
else:
raise ValueError(f"Unknown opts {opts_to_plot}")
bss = expdef.ALL_BS
grid_type = "3x5"
metrics = ["training_loss", "training_perf", "validation_perf"]
metrics_for_ds = [
plth.metric_type_to_dset_to_metric[metric][dataset] for metric in metrics
]
axes = plth.make_grid_iclr(fig, grid_type=grid_type, tight=True)
def get_data(ds, bs, opt):
bs_filter = expdef.EXPERIMENTS[dataset][bs]
last_epoch_for_ds = data_h.new_select(last_epoch, bs_filter)
all_data_at_start_for_ds = data_h.new_select(all_data_at_start, bs_filter)
best_runs_for_ds = data_h.new_select(best_runs, bs_filter)
opt_filter = [expdef.OPTIMS[opt]]
last_epoch_ = data_h.new_select(last_epoch_for_ds, opt_filter)
best_runs_ = data_h.new_select(best_runs_for_ds, opt_filter)
all_data_at_start_ = data_h.new_select(all_data_at_start_for_ds, opt_filter)
step_size_perf = last_epoch_.groupby("opt.alpha")[metrics_for_ds].agg(
[min, max, "median"]
)
best_ss = best_runs_["opt.alpha"].unique()[0]
all_data_at_start_ = all_data_at_start_.copy()
all_data_at_start_["has_diverged"] = all_data_at_start_["status"] == "Diverged"
ss_diverging_status = (
all_data_at_start_[["opt.alpha", "has_diverged"]]
.groupby("opt.alpha")
.agg("any")
)
step_size_perf.columns = step_size_perf.columns.to_flat_index()
ss_perf = pd.merge(
step_size_perf,
ss_diverging_status,
left_index=True,
right_index=True,
how="outer",
)
for col in ss_perf.keys():
if "accuracy" in col[0] or "f1" in col[0]:
ss_perf[col] = ss_perf[col].fillna(value=0)
else:
ss_perf[col] = ss_perf[col].fillna(
value=(10**4) * ss_perf[col].median()
)
ss_perf = ss_perf.drop("has_diverged", axis=1)
return ss_perf, best_ss, all_data_at_start_
for i, bs in enumerate(bss):
for opt in opts:
step_size_perf, best_ss, at_start = get_data(ds=dataset, bs=bs, opt=opt)
for j, metric in enumerate(metrics_for_ds):
ax = axes[j][i]
linestyle = plth.linestyles[opt].copy()
ax.plot(
step_size_perf.index,
step_size_perf[(metric, "median")],
**linestyle,
)
ax.fill_between(
step_size_perf.index,
step_size_perf[(metric, "min")],
step_size_perf[(metric, "max")],
**plth.fillstyles[opt],
)
linestyle["marker"] = "*"
linestyle["markersize"] = "4"
linestyle["color"] = "k"
ax.plot(
best_ss,
step_size_perf[(metric, "median")][best_ss],
**linestyle,
zorder=10,
)
ax.axhline(
at_start[metric].median(),
linewidth=plth.linewidth_small,
color=plth.BASE_COLORS["gray"],
label="Init.",
zorder=-10,
)
ylims_trainingloss = {
expdef.MNIST: {
"lims": [10**-6, 10**2],
"ticks": [-5, -3, -1],
},
expdef.CIFAR10: {
"lims": [10**-7, 10**2],
"ticks": [-6, -3, 0],
},
expdef.PTB: {"lims": [1.0, 12], "ticks": [2, 4, 6]},
expdef.WT2: {"lims": [10**-1, 10**1.5], "ticks": [-1, 0, 1.0]},
expdef.SQUAD: {"lims": [10**-1.5, 10**1.0], "ticks": [-1, 0]},
}
ylims_PPL = {
expdef.PTB: {
"lims": {"train": [10**0, 10**4.5], "valid": [10**1, 10**4.5]}
},
expdef.WT2: {
"lims": {"train": [10**-1, 10**5], "valid": [10**1, 10**5]}
},
}
def get_ylims(metric, dataset):
if "accuracy" in metric or "f1" in metric:
return [0, 105]
elif metric == "training_loss":
return ylims_trainingloss[dataset]["lims"]
elif metric == "train_ppl":
return ylims_PPL[dataset]["lims"]["train"]
elif metric == "valid_ppl":
return ylims_PPL[dataset]["lims"]["valid"]
else:
print(metric)
raise ValueError
for i, bs in enumerate(bss):
for j, metric in enumerate(metrics_for_ds):
ax = axes[j][i]
ax.set_xscale("log")
ax.set_ylim(get_ylims(metric, dataset))
if plth.should_log(metric):
ax.set_yscale("log", base=10)
ax.tick_params(
axis="both", which="major", labelsize=plth.fontsizes["tiny"], pad=0
)
ax.yaxis.set_major_locator(ticker.LogLocator(numticks=5))
ax.yaxis.set_minor_locator(ticker.LogLocator(numticks=5))
else:
ax.yaxis.set_major_locator(ticker.MultipleLocator(base=50))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(base=50))
for ax in axes[-1]:
ax.set_xlabel("Step-size", fontsize=plth.fontsizes["small"])
for bs, ax in zip(bss, axes[0]):
ax.set_title(plth.fdisplaynames(bs))
for j, metric in enumerate(metrics_for_ds):
axes[j][0].set_ylabel(plth.fdisplaynames(metric))
plth.same_xlims(*plth.flatten(axes))
if __name__ == "__main__":
settings(plt)
data = load_data()
for dataset in expdef.ALL_DS:
for opt_to_plot in ["standard", "normalized"]:
fig = plt.figure()
make_figure(fig, data, dataset=dataset, opts_to_plot=opt_to_plot)
filename = Path(__file__).stem + f"_{dataset}" + f"_{opt_to_plot}"
plth.save(fig, name=os.path.join("output", filename))
plt.close(fig)
| 7,557 | 33.669725 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/plots/no_dropout.py
|
import importlib
import os
from pathlib import Path
import explib.results.data as data_h
import explib.results.experiment_groups as expdef
import explib.results.plotting as plth
import matplotlib
import matplotlib.pyplot as plt
from explib.results import data_caching
def load_data():
runs_at_last_epoch, best_runs = data_caching.gridsearch_nd_all_end()
return {"best_runs": best_runs}
def settings(plt):
importlib.reload(plth)
plt.rcParams.update(
plth.iclr_config_2(
rel_width=1.0, nrows=2, ncols=2, height_to_width_ratio=1 / 2.0
)
)
def make_figure(fig, data):
importlib.reload(plth)
importlib.reload(expdef)
best_runs = data["best_runs"]
axes = plth.make_grid_iclr(fig, grid_type="2x2")
optss = [
[expdef.SGD_M, expdef.ADAM_M, expdef.SIGN_M, expdef.NORM_M],
[expdef.SGD_NM, expdef.ADAM_NM, expdef.SIGN_NM, expdef.NORM_NM],
]
dss = [expdef.PTB, expdef.WT2]
for i, ds in enumerate(dss):
results = data_h.new_select(
best_runs, selections=expdef.SPECIAL[expdef.NO_DROPOUT][ds][expdef.FULL]
)
results = results[results["epoch"].notna()]
results = results[results["epoch"] <= expdef.EPOCH_CLIP[ds][expdef.FULL]]
for j, opts in enumerate(optss):
ax = axes[j][i]
for opt in opts:
res_opt = data_h.new_select(results, selections=[expdef.OPTIMS[opt]])
agg = res_opt.groupby("epoch")["training_loss"].agg(
[min, max, "median"]
)
n_samples = 3200
ax.plot(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["median"], n_samples),
**plth.linestyles_nm[opt],
)
fillstyle = plth.fillstyles[opt]
ax.fill_between(
plth.subsample(agg.index, n_samples),
plth.subsample(agg["min"], n_samples),
plth.subsample(agg["max"], n_samples),
**fillstyle,
)
ylims_trainingloss = {
expdef.PTB: [10**-1.0, 10],
expdef.WT2: [10**-1.0, 10**1.3],
}
for i, ds in enumerate(dss):
for ax in [axes[0][i], axes[1][i]]:
ax.set_ylim(ylims_trainingloss[ds])
ax.set_yscale("log")
axes[0][i].set_title(plth.fdisplaynames(ds))
axes[1][i].set_xlabel("Epoch")
axes[0][0].set_ylabel("Training loss")
axes[1][0].set_ylabel("Training loss")
if __name__ == "__main__":
settings(plt)
fig = plt.figure()
make_figure(fig, load_data())
plth.save(fig, name=os.path.join("output", Path(__file__).stem))
| 2,749 | 29.555556 | 85 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/setup.py
|
"""Setup file for ExpLib."""
from setuptools import find_packages, setup
with open("requirements.txt") as f:
requirements = f.read().splitlines()
with open("requirements-nocc.txt") as f:
requirements += f.read().splitlines()
setup(
author="Author",
name="ExpLib",
version="0.1.0",
description="description",
long_description="long description",
long_description_content_type="text/markdown",
install_requires=requirements,
url="https://github.com/author/repo",
license="MIT",
packages=find_packages(),
zip_safe=False,
python_requires=">=3.7",
include_package_data=True,
)
| 634 | 25.458333 | 50 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/__main__.py
|
import json
import os
from explib import cli_helper
from .experiment import Experiment
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Experiment runner")
parser.add_argument(
"experiment_file",
type=str,
help="Experiment file",
default=None,
)
parser.add_argument(
"--disable_wandb",
action="store_true",
help="Debug mode, won't create wandb logs",
default=False,
)
parser.add_argument("--gpu", nargs="?", type=str, default="cuda", help="GPU name")
parser.add_argument(
"--dummy_run",
action="store_true",
help="Enable dummy run for tests - only runs one iteration per epoch",
default=False,
)
cli_helper.add_dotenv_option(parser)
args = parser.parse_args()
cli_helper.load_dotenv_if_required(args)
if args.experiment_file is None:
raise ValueError
with open(args.experiment_file, "r") as fp:
exp_dict = json.load(fp)
filename = os.path.split(args.experiment_file)[1]
basename, ext = os.path.splitext(filename)
slug, uuid = basename.rsplit("_", 1)
exp = Experiment(
exp_dict,
slug,
uuid,
args.disable_wandb,
args.gpu,
args.dummy_run,
)
exp.run()
| 1,383 | 25.113208 | 86 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/experiment.py
|
import time
import torch
import math
import os
import random
import datetime
from pathlib import Path
import numpy as np
from explib import config
from explib.expmaker.experiment_defs import make_wuuid, exp_dict_to_str
from . import logging, problem
class Experiment:
def __init__(
self,
exp_dict,
slug,
exp_uuid,
disable_wandb,
gpu,
dummy_run=False,
):
"""Create an experiment"""
self.seed = exp_dict["seed"]
self.apply_seed()
cpu_only = exp_dict.get("cpu_only", False)
self.device = gpu if torch.cuda.is_available() and not cpu_only else "cpu"
self.data_logger = logging.init_logging_for_exp(
slug,
exp_uuid,
exp_dict,
disable_wandb,
additional_config={
"device": self.device,
"uuid": exp_uuid,
"wuuid": make_wuuid(exp_dict),
"exp_dict_str": exp_dict_to_str(exp_dict),
},
)
logging.info(f"Creating experiment. Received experiment dictionnary {exp_dict}")
self.max_epoch = exp_dict["max_epoch"]
self.fake_full_batch_mode = exp_dict.get("fake_full_batch_mode", False)
self.drop_last = exp_dict.get("drop_last", False)
self.trained_norms = exp_dict.get("trained_norms", False)
self.init_noise_norm = exp_dict.get("init_noise_norm", False)
exp_dict["device"] = self.device
exp_dict["trained_norms"] = self.trained_norms
exp_dict["exp_uuid"] = exp_uuid
exp_dict["dummy_run"] = dummy_run
self.problem = problem.init(exp_dict)
self.save_path = os.path.join(
config.get_workspace(), exp_dict["dataset"], exp_uuid
)
self.model_dir = os.path.join(self.save_path, "model")
Path(self.model_dir).mkdir(parents=True, exist_ok=True)
self.model_file = os.path.join(self.model_dir, "model.pt")
if not os.path.isfile(self.model_file):
self.model_file = os.path.join(
self.model_dir, "model_{}.pt".format(self.max_epoch)
)
def apply_seed(self):
np.random.seed(self.seed)
random.seed(self.seed)
torch.manual_seed(self.seed)
torch.cuda.manual_seed_all(self.seed)
def run(self):
"""Run the experiment"""
start_time = time.time()
# TODO: Allow to continue training from a nonzero epoch?
logging.info("Starting experiment run")
starting_epoch = 0
if self.init_noise_norm or self.trained_norms:
logging.info("Initial run to compute noise norms")
r_start = time.time()
self.problem.calc_norms(norm_epoch=0)
r_end = time.time()
logging.info(f"Norm computation time: {r_end - r_start}")
logging.info("Initial evaluation")
r_start = time.time()
initial_train_metrics = self.problem.eval_loop(is_validation=False)
print(initial_train_metrics)
initial_valid_metrics = self.problem.eval_loop(is_validation=True)
self.data_logger(initial_valid_metrics, commit=False)
self.data_logger(initial_train_metrics)
r_end = time.time()
logging.info(f"Initial evaluation time: {r_end - r_start}")
epochs_to_compute_noise_norm = [
1,
int(self.max_epoch * 0.1),
int(self.max_epoch * 0.25),
int(self.max_epoch * 0.5),
int(self.max_epoch * 0.75),
]
for epoch in range(starting_epoch, self.max_epoch):
logging.info(f"Epoch {epoch}/{self.max_epoch}")
if self.trained_norms and epoch in epochs_to_compute_noise_norm:
logging.info(f"Computing noise norms at epoch {epoch}")
self.problem.calculate_noise_norm(epoch=epoch)
# run training loop
epoch_begin_time = time.time()
train_loss, func_vals, gnorms_1, gnorms_2 = self.problem.train_loop()
epoch_end_time = time.time()
epoch_training_time = epoch_end_time - epoch_begin_time
if math.isnan(train_loss) or math.isinf(train_loss):
if math.isnan(train_loss):
self.data_logger({"training_error": "nan"})
else:
self.data_logger({"training_error": "inf"})
break
# run eval loop
logging.info(f"Running evaluation")
train_metrics = self.problem.eval_loop(is_validation=False)
self.data_logger(train_metrics, commit=False)
valid_metrics = self.problem.eval_loop(is_validation=True)
self.data_logger(valid_metrics, commit=False)
self.data_logger(
{
"epoch": epoch,
"average_training_loss": train_loss,
"function_values": func_vals,
"norm_squared_gradients": gnorms_2,
"norm_squared_gradients_l1": gnorms_1,
"epoch_training_time": epoch_training_time,
}
)
# save model
if not os.path.isfile(self.model_file):
with open(self.model_file, "wb") as f:
torch.save(self.problem.model.state_dict(), f)
if self.trained_norms:
self.problem.calculate_noise_norm(epoch=self.max_epoch)
end_time = time.time()
self.data_logger(
{"exp_runtime": str(datetime.timedelta(seconds=end_time - start_time))}
)
| 5,622 | 34.815287 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/cli_helper.py
|
from explib import config
def add_dotenv_option(parser):
parser.add_argument(
"--dotenv",
type=str,
help=".env file to override local environment variables (including workspace)",
default=None,
)
return parser
def load_dotenv_if_required(args):
if getattr(args, "dotenv", None) is not None:
config.load_dotenv_file(args["dotenv"])
| 391 | 22.058824 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/config.py
|
import os
from dotenv import load_dotenv
def load_dotenv_file(path=None):
"""Load a dotenv file from path (defaults to cwd if None)"""
if path is None:
load_dotenv(verbose=True, override=True)
else:
load_dotenv_file(path, verbose=True, override=True)
def get_workspace():
return os.path.realpath(os.environ["EXPLIB_WORKSPACE"])
def get_wandb_project():
return os.environ["EXPLIB_WANDB_PROJECT"]
def get_notification_email():
return os.environ.get("EXPLIB_NOTIFICATION_EMAIL", None)
def get_wandb_entity():
return os.environ["EXPLIB_WANDB_ENTITY"]
def get_slurm_partition():
return os.environ.get("EXPLIB_SLURM_PARTITION", None)
def get_conda_env():
return os.environ.get("EXPLIB_LOAD_CONDA", None)
def get_env_file_to_source():
return os.environ.get("EXPLIB_ENVFILE", None)
def get_slurm_account():
return os.environ.get("EXPLIB_SLURM_ACCOUNT", "def-schmidtm")
def get_console_logging_level():
return os.environ.get("EXPLIB_CONSOLE_LOGGING_LEVEL", "INFO")
def get_all():
return {
"workspace": get_workspace(),
"wandb_project": get_wandb_project(),
"wandb_entity": get_wandb_entity(),
"slurm_account": get_slurm_account(),
"notification_email": get_notification_email(),
"console_logging_level": get_console_logging_level(),
}
| 1,368 | 22.603448 | 65 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/util.py
|
import torch
import torch.nn as nn
def get_grads(model):
res = []
for p in model.parameters():
if p.requires_grad:
res.append(p.grad.view(-1))
grad_flat = torch.cat(res)
return grad_flat
INIT_STD = 0.02
PROJ_INIT_STD = 0.01
def init_weight(weight):
nn.init.normal_(weight, 0.0, INIT_STD)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
elif classname.find("AdaptiveEmbedding") != -1:
if hasattr(m, "emb_projs"):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, PROJ_INIT_STD)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
init_weight(m.weight)
elif classname.find("ProjectedAdaptiveLogSoftmax") != -1:
if hasattr(m, "cluster_weight") and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, "cluster_bias") and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, "out_projs"):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, PROJ_INIT_STD)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, INIT_STD)
if hasattr(m, "bias") and m.bias is not None:
init_bias(m.bias)
elif classname.find("TransformerLM") != -1:
if hasattr(m, "r_emb"):
init_weight(m.r_emb)
if hasattr(m, "r_w_bias"):
init_weight(m.r_w_bias)
if hasattr(m, "r_r_bias"):
init_weight(m.r_r_bias)
if hasattr(m, "r_bias"):
init_bias(m.r_bias)
def disable_running_stats(m):
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.track_running_stats = False
def enable_running_stats(m):
if isinstance(m, torch.nn.modules.batchnorm._BatchNorm):
m.track_running_stats = True
| 2,310 | 30.22973 | 73 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/__init__.py
|
"""ExpLib"""
import json
from . import dataset, expmaker, logging, model, optim
from .experiment import Experiment
| 116 | 18.5 | 54 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/language_loader.py
|
import os, sys
import glob
from collections import Counter, OrderedDict
import numpy as np
import torch
import subprocess
# Code copied from https://github.com/kimiyoung/transformer-xl
from explib import config
class Vocab(object):
def __init__(
self,
special=[],
min_freq=0,
max_size=None,
lower_case=True,
delimiter=None,
vocab_file=None,
):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == "":
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ["<S>"] + symbols + ["<S>"]
elif add_eos:
return symbols + ["<eos>"]
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print("counting file {} ...".format(path))
assert os.path.exists(path)
sents = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print("counting {} sents ...".format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx["<UNK>"]
def build_vocab(self):
if self.vocab_file:
print("building vocab from {}".format(self.vocab_file))
self._build_from_file(self.vocab_file)
print("final vocab size {}".format(len(self)))
else:
print(
"building vocab with min_freq={}, max_size={}".format(
self.min_freq, self.max_size
)
)
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print(
"final vocab size {} from {} unique tokens".format(
len(self), len(self.counter)
)
)
def encode_file(
self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False
):
if verbose:
print("encoding file {} ...".format(path))
assert os.path.exists(path)
encoded = []
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
symbols = self.tokenize(
line, add_eos=add_eos, add_double_eos=add_double_eos
)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print("encoding {} sents ...".format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(" line {}".format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, "{}_idx".format(sym.strip("<>")), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), "Index {} out of range".format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert "<eos>" not in sym
assert hasattr(self, "unk_idx")
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return " ".join([self.get_sym(idx) for idx in indices])
else:
return " ".join(
[self.get_sym(idx) for idx in indices if idx not in exclude]
)
def convert_to_sent_from_tensor(self, indices):
sents = []
for sent in indices:
sents.append(" ".join([self.get_sym(int(idx)) for idx in sent]))
return sents
def __len__(self):
return len(self.idx2sym)
class LMOrderedIterator(object):
def __init__(
self,
data,
bsz,
bptt,
device="cpu",
ext_len=None,
drop_last=False,
outliers_filename=None,
):
"""
data -- LongTensor -- the LongTensor is strictly ordered
"""
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
# Work out how cleanly we can divide the dataset into bsz parts.
self.n_step = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, self.n_step * bsz)
if outliers_filename is not None:
outlier_indices = np.load(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"outliers",
outliers_filename,
)
)
outlier_indices = sorted(
list(map(int, np.ndarray.tolist(outlier_indices))), reverse=True
)
for idx in outlier_indices:
data = torch.cat(
[data[0 : idx * self.bptt], data[(idx + 1) * self.bptt :]]
)
self.n_step = data.size(0) // bsz
# Evenly divide the data across the bsz batches.
self.data = data.view(bsz, -1).t().contiguous().to(device)
# Number of mini-batches
self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
self.drop_last = drop_last
if self.drop_last and (self.n_step + self.bptt - 1) % self.bptt != 0:
self.n_batch = self.n_batch - 1
def __len__(self):
return self.n_batch
def get_batch(self, i, bptt=None):
if bptt is None:
bptt = self.bptt
seq_len = min(bptt, self.data.size(0) - 1 - i)
end_idx = i + seq_len
beg_idx = max(0, i - self.ext_len)
data = self.data[beg_idx:end_idx]
target = self.data[i + 1 : i + 1 + seq_len]
return data, target, seq_len
def get_fixlen_iter(self, start=0):
end = self.data.size(0) - 1
if self.drop_last:
end = self.data.size(0) - 1 - ((self.data.size(0) - 1) % self.bptt)
for i in range(start, end, self.bptt):
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
max_len = self.bptt + max_deviation * std
i = start
while True:
bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.0
bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
data, target, seq_len = self.get_batch(i, bptt)
i += seq_len
yield data, target, seq_len
if i >= self.data.size(0) - 2:
break
def __iter__(self):
return self.get_fixlen_iter()
class LMShuffledIterator(object):
def __init__(self, data, bsz, bptt, device="cpu", ext_len=None, shuffle=False):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.bsz = bsz
self.bptt = bptt
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = (
np.random.permutation(len(self.data))
if self.shuffle
else np.array(range(len(self.data)))
)
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.bsz
data = torch.LongTensor(self.bptt, self.bsz)
target = torch.LongTensor(self.bptt, self.bsz)
n_retain = 0
while True:
# data : [n_retain+bptt x bsz]
# target : [bptt x bsz]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.bsz):
n_filled = 0
try:
while n_filled < self.bptt:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
# first n_retain tokens are retained from last batch
data[
n_retain + n_filled : n_retain + n_filled + n_new, i
] = streams[i][:n_new]
target[n_filled : n_filled + n_new, i] = streams[i][
1 : n_new + 1
]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.bptt
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.bptt, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class Corpus(object):
def __init__(self, path, dataset, *args, **kwargs):
self.dataset = dataset
self.vocab = Vocab(*args, **kwargs)
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True
)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True, add_eos=False
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True, add_eos=False
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True, add_eos=False
)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=False, add_double_eos=True
)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, "cache.pt")
if os.path.exists(fn):
print("Loading cached dataset...")
corpus = torch.load(fn)
else:
print("Producing dataset {}...".format(dataset))
kwargs = {}
if dataset in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif dataset == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif dataset == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
elif dataset in ["enwik8", "text8"]:
pass
corpus = Corpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus
def ptb_loader(
batch_size,
device,
tgt_len,
drop_last=False,
outliers_filename=None,
):
datadir = os.path.join(config.get_workspace(), "datasets", "penn")
cwd = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir(datadir):
result = subprocess.run(
[
"sh",
"./get_ptb.sh",
os.path.abspath(os.path.join(config.get_workspace(), "datasets")),
],
check=True,
cwd=cwd,
capture_output=True,
text=True,
)
print("Shell get_ptb.sh: stdout")
print(result.stdout)
print("Shell get_ptb.sh: stderr")
print(result.stderr)
corpus = get_lm_corpus(datadir, "ptb")
ntokens = len(corpus.vocab)
tr_iter = corpus.get_iterator(
"train",
batch_size,
tgt_len,
device=device,
ext_len=0,
drop_last=drop_last,
outliers_filename=outliers_filename,
)
te_iter = corpus.get_iterator("test", batch_size, tgt_len, device=device, ext_len=0)
return tr_iter, te_iter, ntokens
def wikitext2_loader(
batch_size,
device,
tgt_len,
drop_last=False,
):
datadir = os.path.join(config.get_workspace(), "datasets", "wikitext-2")
cwd = os.path.dirname(os.path.realpath(__file__))
if not os.path.isdir(datadir):
result = subprocess.run(
[
"sh",
"./get_wikitext2.sh",
os.path.abspath(os.path.join(config.get_workspace(), "datasets")),
],
check=True,
cwd=cwd,
capture_output=True,
text=True,
)
print("Shell get_wikitext2.sh: stdout")
print(result.stdout)
print("Shell get_wikitext2.sh: stderr")
print(result.stderr)
corpus = get_lm_corpus(datadir, "wt2")
ntokens = len(corpus.vocab)
tr_iter = corpus.get_iterator(
"train",
batch_size,
tgt_len,
device=device,
ext_len=0,
drop_last=drop_last,
)
te_iter = corpus.get_iterator("test", batch_size, tgt_len, device=device, ext_len=0)
return tr_iter, te_iter, ntokens, corpus
| 17,819 | 31.459016 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/glue_loader.py
|
import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
MAX_LENGTH = 128
EVAL_BASE_BATCH_SIZE = 64
model_name = "bert-base-cased"
task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}
def glue_loader(task_name, accelerator, batch_size):
if task_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
"glue",
task_name,
cache_dir=os.path.join(config.get_workspace(), "datasets"),
)
# Labels
if task_name is not None:
is_regression = task_name == "stsb"
if not is_regression:
label_list = raw_datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
autoconfig = AutoConfig.from_pretrained(
model_name, num_labels=num_labels, finetuning_task=task_name
)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
model = AutoModelForSequenceClassification.from_pretrained(
model_name,
from_tf=False,
config=autoconfig,
)
# Preprocessing the datasets
if task_name is not None:
sentence1_key, sentence2_key = task_to_keys[task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [
name for name in raw_datasets["train"].column_names if name != "label"
]
if (
"sentence1" in non_label_column_names
and "sentence2" in non_label_column_names
):
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {
i: label_name_to_id[label_list[i]] for i in range(num_labels)
}
else:
print(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif task_name is None:
label_to_id = {v: i for i, v in enumerate(label_list)}
if label_to_id is not None:
model.config.label2id = label_to_id
model.config.id2label = {id: label for label, id in autoconfig.label2id.items()}
elif task_name is not None and not is_regression:
model.config.label2id = {l: i for i, l in enumerate(label_list)}
model.config.id2label = {id: label for label, id in autoconfig.label2id.items()}
padding = False
def preprocess_function(examples):
# Tokenize the texts
texts = (
(examples[sentence1_key],)
if sentence2_key is None
else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(
*texts, padding=padding, max_length=MAX_LENGTH, truncation=True
)
if "label" in examples:
if label_to_id is not None:
# Map labels to IDs (not necessary for GLUE tasks)
result["labels"] = [label_to_id[l] for l in examples["label"]]
else:
# In all cases, rename the column to labels because the model will expect that.
result["labels"] = examples["label"]
return result
with accelerator.main_process_first():
processed_datasets = raw_datasets.map(
preprocess_function,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
eval_dataset = processed_datasets[
"validation_matched" if task_name == "mnli" else "validation"
]
# for index in random.sample(range(len(train_dataset)), 3):
# print(f"Sample {index} of the training set: {train_dataset[index]}.")
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorWithPadding(
tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=batch_size
)
train_eval_dataloader = DataLoader(
train_dataset,
collate_fn=data_collator,
batch_size=max(EVAL_BASE_BATCH_SIZE, batch_size),
)
eval_dataloader = DataLoader(
eval_dataset,
collate_fn=data_collator,
batch_size=max(EVAL_BASE_BATCH_SIZE, batch_size),
)
return (
train_dataloader,
train_eval_dataloader,
eval_dataloader,
num_labels,
task_name,
is_regression,
)
| 6,484 | 34.828729 | 117 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/torchvision_loader.py
|
import os
import torch
import torchvision
from explib import config
from torchvision import transforms
from torchvision.datasets import MNIST, USPS
def torchvision_loader(dataset_name, batch_size, drop_last=False, shuffle=True):
if dataset_name == "mnist":
loader = MNIST
elif dataset_name == "usps":
loader = USPS
else:
raise Exception("Dataset {} not available".format(dataset_name))
train_dataloader = torch.utils.data.DataLoader(
loader(
os.path.join(config.get_workspace(), "datasets"),
train=True,
download=True,
transform=transforms.Compose(
[
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
),
),
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
)
valid_dataloader = torch.utils.data.DataLoader(
loader(
os.path.join(config.get_workspace(), "datasets"),
train=False,
download=True,
transform=transforms.Compose(
[
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
),
),
batch_size=batch_size,
shuffle=False,
# drop_last=drop_last,
)
return train_dataloader, valid_dataloader
| 1,511 | 26.490909 | 80 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/squad_loader.py
|
import tokenize
import datasets
import os
from datasets import load_dataset
from accelerate import Accelerator
from explib import config
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoTokenizer,
DataCollatorWithPadding,
)
from torch.utils.data.dataloader import DataLoader
import numpy as np
BERT_BASE_PRETRAINED = "bert-base-uncased"
DISTILBERT = "distilbert-base-uncased"
def squad_loader(
dataset_name,
batch_size,
tgt_len,
doc_stride,
model_name,
drop_last=False,
fake_full_batch_mode=False,
shuffle=True,
outliers_filename=None,
):
split = "train"
if fake_full_batch_mode:
seed = np.random.get_state()[1][0] % 13
start = seed * 6144
end = (seed + 1) * 6144
split = "train[{}:{}]".format(start, end)
if dataset_name == "squad":
raw_datasets = load_dataset(
"squad",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split=split,
)
else:
raw_datasets = load_dataset(
"adversarial_qa",
"dbert",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split=split,
)
column_names = raw_datasets.column_names
question_column_name = "question" if "question" in column_names else column_names[0]
context_column_name = "context" if "context" in column_names else column_names[1]
answer_column_name = "answers" if "answers" in column_names else column_names[2]
if model_name == "bert_base_pretrained":
model_name = BERT_BASE_PRETRAINED
else:
model_name = DISTILBERT
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
# Padding side determines if we do (question|context) or (context|question).
pad_on_right = tokenizer.padding_side == "right"
max_seq_length = tokenizer.model_max_length
# Training preprocessing
def prepare_train_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding=False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# The offset mappings will give us a map from token to character position in the original context. This will
# help us compute the start_positions and end_positions.
offset_mapping = tokenized_examples.pop("offset_mapping")
# Let's label those examples!
tokenized_examples["start_positions"] = []
tokenized_examples["end_positions"] = []
for i, offsets in enumerate(offset_mapping):
# We will label impossible answers with the index of the CLS token.
input_ids = tokenized_examples["input_ids"][i]
cls_index = input_ids.index(tokenizer.cls_token_id)
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
answers = examples[answer_column_name][sample_index]
# If no answers are given, set the cls_index as answer.
if len(answers["answer_start"]) == 0:
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Start/end character index of the answer in the text.
start_char = answers["answer_start"][0]
end_char = start_char + len(answers["text"][0])
# Start token index of the current span in the text.
token_start_index = 0
while sequence_ids[token_start_index] != (1 if pad_on_right else 0):
token_start_index += 1
# End token index of the current span in the text.
token_end_index = len(input_ids) - 1
while sequence_ids[token_end_index] != (1 if pad_on_right else 0):
token_end_index -= 1
# Detect if the answer is out of the span (in which case this feature is labeled with the CLS index).
if not (
offsets[token_start_index][0] <= start_char
and offsets[token_end_index][1] >= end_char
):
tokenized_examples["start_positions"].append(cls_index)
tokenized_examples["end_positions"].append(cls_index)
else:
# Otherwise move the token_start_index and token_end_index to the two ends of the answer.
# Note: we could go after the last offset if the answer is the last word (edge case).
while (
token_start_index < len(offsets)
and offsets[token_start_index][0] <= start_char
):
token_start_index += 1
tokenized_examples["start_positions"].append(token_start_index - 1)
while offsets[token_end_index][1] >= end_char:
token_end_index -= 1
tokenized_examples["end_positions"].append(token_end_index + 1)
return tokenized_examples
# if "train" not in raw_datasets:
# raise ValueError("--do_train requires a train dataset")
train_examples = raw_datasets
# Create train feature from dataset
train_dataset = train_examples.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset",
)
# Validation preprocessing
def prepare_validation_features(examples):
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
tokenized_examples = tokenizer(
examples[question_column_name if pad_on_right else context_column_name],
examples[context_column_name if pad_on_right else question_column_name],
truncation="only_second" if pad_on_right else "only_first",
max_length=max_seq_length,
stride=doc_stride,
return_overflowing_tokens=True,
return_offsets_mapping=True,
padding=False,
)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
tokenized_examples["example_id"] = []
for i in range(len(tokenized_examples["input_ids"])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
sequence_ids = tokenized_examples.sequence_ids(i)
context_index = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
tokenized_examples["offset_mapping"][i] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i])
]
return tokenized_examples
if dataset_name == "squad":
eval_examples = load_dataset(
"squad",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split="validation",
)
else:
eval_examples = load_dataset(
"adversarial_qa",
"dbert",
cache_dir=os.path.join(config.get_workspace(), "datasets"),
split="validation",
)
# Validation Feature Creation
eval_dataset = eval_examples.map(
prepare_train_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
eval_dataset_valid = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on validation dataset",
)
train_dataset_for_eval = train_examples.map(
prepare_validation_features,
batched=True,
num_proc=4,
remove_columns=column_names,
load_from_cache_file=True,
desc="Running tokenizer on train dataset for eval",
)
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=None)
if outliers_filename is not None:
outlier_indices = np.load(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"outliers",
outliers_filename,
)
)
outlier_indices = np.ndarray.tolist(outlier_indices)
indices = [
i for i in range(len(train_dataset)) if str(i) not in outlier_indices
]
train_dataset = train_dataset.select(indices)
train_dataset_for_eval = train_dataset_for_eval.select(indices)
train_dataloader = DataLoader(
train_dataset,
shuffle=shuffle,
collate_fn=data_collator,
batch_size=batch_size,
drop_last=drop_last,
)
# eval_dataset_for_model = eval_dataset_prepared.remove_columns(
# ["example_id", "offset_mapping"]
# )
eval_dataloader = DataLoader(
eval_dataset, collate_fn=data_collator, batch_size=batch_size
)
# train_dataset_eval_for_model = train_dataset_for_eval.remove_columns(
# ["example_id", "offset_mapping"]
# )
train_dataloader_for_eval = DataLoader(
train_dataset,
shuffle=False,
collate_fn=data_collator,
batch_size=batch_size,
drop_last=drop_last,
)
return (
train_dataloader,
train_dataloader_for_eval,
eval_dataloader,
eval_dataset_valid,
eval_examples,
train_dataset_for_eval,
train_examples,
tokenizer,
)
| 11,801 | 38.209302 | 118 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/cifar_loader.py
|
import os
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from explib import config
def cifar_loader(
batch_size,
load_100=False,
drop_last=False,
fake_full_batch_mode=False,
shuffle=True,
):
data_class = "CIFAR100" if load_100 else "CIFAR10"
stats = (
{"mean": [0.5071, 0.4867, 0.4408], "std": [0.2675, 0.2565, 0.2761]}
if load_100
else {"mean": [0.491, 0.482, 0.447], "std": [0.247, 0.243, 0.262]}
)
trans = [
transforms.ToTensor(),
lambda t: t.type(torch.get_default_dtype()),
transforms.Normalize(**stats),
]
tr_data = getattr(datasets, data_class)(
root=os.path.join(config.get_workspace(), "datasets"),
train=True,
download=True,
transform=transforms.Compose(trans),
)
te_data = getattr(datasets, data_class)(
root=os.path.join(config.get_workspace(), "datasets"),
train=False,
download=True,
transform=transforms.Compose(trans),
)
if fake_full_batch_mode:
tr_data = torch.utils.data.Subset(tr_data, torch.randperm(batch_size))
shuffle = False
train_loader = torch.utils.data.DataLoader(
dataset=tr_data,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
)
val_loader = torch.utils.data.DataLoader(
dataset=te_data,
batch_size=batch_size,
shuffle=False,
# drop_last=drop_last,
)
return train_loader, val_loader
| 1,561 | 23.40625 | 78 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/dataset/__init__.py
|
"""Datasets.
General interface to load a dataset
"""
import os
from pathlib import Path
from explib import config
from .cifar_loader import cifar_loader
from .glue_loader import glue_loader
from .language_loader import ptb_loader, wikitext2_loader
from .squad_loader import squad_loader
from .torchvision_loader import torchvision_loader
MNIST = "mnist"
WIKITEXT2 = "wikitext2"
CIFAR10 = "cifar10"
CIFAR100 = "cifar100"
PTB = "ptb"
SQUAD = "squad"
ADVERSARIAL_QA = "adversarial_qa"
GLUE = "glue"
AVAILABLE_DATASET = [
MNIST,
WIKITEXT2,
CIFAR10,
CIFAR100,
PTB,
SQUAD,
# GLUE,
]
def init(
dataset_name,
batch_size,
device,
extra_params=None,
drop_last=False,
fake_full_batch_mode=False,
accelerator=None,
shuffle=True,
outliers_filename=None,
):
extra_params = extra_params if extra_params is not None else {}
dataset_path = os.path.join(config.get_workspace(), "datasets")
Path(dataset_path).mkdir(parents=True, exist_ok=True)
if fake_full_batch_mode and dataset_name not in [CIFAR10, CIFAR100, SQUAD]:
raise NotImplementedError(
"Fake full batch mode not implemented for {dataset_name}"
)
if dataset_name == MNIST:
return torchvision_loader(
dataset_name, batch_size, drop_last=drop_last, shuffle=shuffle
)
elif dataset_name == WIKITEXT2:
return wikitext2_loader(
batch_size,
device,
extra_params.get("tgt_len", 128),
drop_last=drop_last,
)
elif dataset_name == CIFAR10:
return cifar_loader(
batch_size,
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
shuffle=shuffle,
)
elif dataset_name == CIFAR100:
return cifar_loader(
batch_size,
load_100=True,
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
)
elif dataset_name == PTB:
return ptb_loader(
batch_size,
device,
extra_params.get("tgt_len", 128),
drop_last=drop_last,
outliers_filename=outliers_filename,
)
elif dataset_name == SQUAD or dataset_name == ADVERSARIAL_QA:
return squad_loader(
dataset_name,
batch_size,
extra_params.get("tgt_len", 384),
extra_params.get("doc_stride", 128),
model_name=extra_params.get("model_name", "bert_base_pretrained"),
drop_last=drop_last,
fake_full_batch_mode=fake_full_batch_mode,
shuffle=shuffle,
outliers_filename=outliers_filename,
)
else:
raise Exception("Dataset {} not available".format(dataset_name))
| 2,805 | 26.242718 | 79 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/experiment_groups.py
|
"""Definition of "groups" of experiments mapping (dataset x batch size) to keys
to be selected from the experiment dataframe.
Simplified from explib/results/plotting.py
"""
##
# Dataset names
MNIST = "mnist"
WT2 = "wikitext2"
PTB = "ptb"
CIFAR10 = "cifar10"
SQUAD = "squad"
ALL_DS = [MNIST, CIFAR10, PTB, WT2, SQUAD]
##
# Batch size names
S = "S"
M = "M"
L = "L"
XL = "XL"
FULL = "Full"
ALL_BS = [S, M, L, XL, FULL]
##
# Experiment group names
NO_DROPOUT = "no-dropout"
NORM_ABLATION_FULL = "full-batch-training-normalized-optimizers"
FULL_BATCH = "full-batch-training"
INCREASING_BATCH_SIZE = "increasing-batch-size"
NORM_ABLATION = "normalization-ablation"
##
# Optimizer names
SGD_ANY = "SGD"
SGD_NM = "sgd-m"
SGD_M = "sgd+m"
ADAM_NM = "adam-m"
ADAM_M = "adam+m"
SIGN_NM = "SignDescent"
SIGN_M = "SignDescent+m"
BLOCK_NM = "BlockNormalizedGD"
BLOCK_M = "BlockNormalizedGD+m"
NORM_NM = "NormalizedGD"
NORM_M = "NormalizedGD+m"
RSD_NM = "RescaledSignDescent"
RSD_M = "RescaledSignDescent+m"
STANDARD_OPT = [SGD_M, SGD_NM, ADAM_M, ADAM_NM]
NORMALIZED_OPT = [
SIGN_M,
SIGN_NM,
NORM_NM,
NORM_M,
]
ALL_MAIN_OPT = STANDARD_OPT + NORMALIZED_OPT
##
# Helper functions to generate selection dictionaries
def no_dropout(ds, max, batch, acc):
return {
"group": NO_DROPOUT,
"dataset": ds,
"max_epoch": max,
"accumulate_steps": acc,
"batch_size": batch,
}
def norm_abl(ds, max, batch, acc=None):
acc_dict = {}
if acc is not None:
acc_dict["accumulate_steps"] = acc
return {
"group": NORM_ABLATION,
"dataset": ds,
"max_epoch": max,
"batch_size": batch,
**acc_dict,
}
def incr(ds, batch, max, acc=None):
acc_dict = {}
if acc is not None:
acc_dict["accumulate_steps"] = acc
return {
"group": INCREASING_BATCH_SIZE,
"dataset": ds,
"batch_size": batch,
"max_epoch": max,
**acc_dict,
}
def full(ds):
return {"group": FULL_BATCH, "dataset": ds}
def full_norm(ds, max):
return {"group": NORM_ABLATION_FULL, "dataset": ds, "max_epoch": max}
##
# Main experiment groups
#
# Experimental settings used for the main experiment
EXPERIMENTS = {
MNIST: {
S: [
norm_abl(ds=MNIST, batch=256, max=100),
incr(ds=MNIST, batch=256, max=100),
],
M: [
norm_abl(ds=MNIST, max=100, batch=1024),
incr(ds=MNIST, max=100, batch=1024),
],
L: [
norm_abl(ds=MNIST, max=200, batch=4096),
incr(ds=MNIST, max=200, batch=4096),
],
XL: [
norm_abl(ds=MNIST, max=800, batch=16384),
incr(ds=MNIST, max=800, batch=16384),
],
FULL: [
full(ds=MNIST),
full_norm(ds=MNIST, max=800),
],
},
CIFAR10: {
S: [
incr(ds=CIFAR10, max=100, batch=64),
norm_abl(ds=CIFAR10, max=100, batch=64),
],
M: [
incr(ds=CIFAR10, max=100, batch=256),
norm_abl(ds=CIFAR10, max=100, batch=256),
],
L: [
incr(ds=CIFAR10, max=200, batch=1024),
norm_abl(ds=CIFAR10, max=200, batch=1024),
],
XL: [
incr(ds=CIFAR10, max=800, batch=4096),
norm_abl(ds=CIFAR10, max=400, batch=4096),
],
FULL: [
full_norm(ds=CIFAR10, max=800),
full(ds=CIFAR10),
],
},
PTB: {
S: [
incr(ds=PTB, max=100, batch=16),
norm_abl(ds=PTB, max=100, batch=16),
],
M: [
incr(ds=PTB, max=100, batch=64),
norm_abl(ds=PTB, max=100, batch=64),
],
L: [
incr(ds=PTB, max=200, batch=256),
norm_abl(ds=PTB, max=200, batch=256),
],
XL: [
incr(ds=PTB, max=800, batch=1024),
norm_abl(ds=PTB, max=800, batch=1024),
],
FULL: [
full_norm(ds=PTB, max=3200),
full(ds=PTB),
],
},
WT2: {
S: [
incr(ds=WT2, batch=20, max=40, acc=1),
norm_abl(ds=WT2, batch=20, max=40, acc=1),
],
M: [
incr(ds=WT2, batch=80, max=40, acc=1),
norm_abl(ds=WT2, batch=80, max=40, acc=1),
],
L: [
incr(ds=WT2, batch=80, max=80, acc=4),
norm_abl(ds=WT2, batch=80, max=80, acc=4),
],
XL: [
incr(ds=WT2, batch=80, max=160, acc=16),
norm_abl(ds=WT2, batch=80, max=160, acc=16),
],
FULL: [
full(ds=WT2),
full_norm(ds=WT2, max=320),
],
},
SQUAD: {
S: [
incr(ds=SQUAD, batch=32, max=5, acc=1),
norm_abl(ds=SQUAD, max=10, batch=16, acc=2),
],
M: [
incr(ds=SQUAD, batch=32, max=5, acc=4),
norm_abl(ds=SQUAD, max=10, batch=16, acc=8),
],
L: [
incr(ds=SQUAD, batch=32, max=5, acc=16),
norm_abl(ds=SQUAD, max=10, batch=16, acc=32),
],
XL: [
incr(ds=SQUAD, batch=32, max=20, acc=64),
norm_abl(ds=SQUAD, max=10, batch=16, acc=128),
],
FULL: [
{
"group": "fix-full-batch-training-squad",
"max_epoch": 80,
"batch_size": 16,
"accumulate_steps": 1370 * 4,
}
],
},
}
EFF_BS = {
CIFAR10: {S: 64, M: 256, L: 1024, XL: 4096, FULL: 50000},
MNIST: {S: 256, M: 1024, L: 4096, XL: 16384, FULL: 60000},
PTB: {S: 16, M: 64, L: 256, XL: 1024, FULL: 26520},
WT2: {S: 20, M: 80, L: 320, XL: 1280, FULL: 16240},
SQUAD: {S: 32, M: 128, L: 512, XL: 2048, FULL: 87680},
}
##
# Sanity check experiments
SPECIAL = {
NO_DROPOUT: {
PTB: {FULL: [no_dropout(ds=PTB, max=3200, acc=20, batch=1326)]},
WT2: {FULL: [no_dropout(ds=WT2, max=320, batch=80, acc=203)]},
}
}
##
# At what epoch to truncate the runs
EPOCH_CLIP = {
MNIST: {S: 100, M: 100, L: 200, XL: 400, FULL: 800},
CIFAR10: {S: 100, M: 100, L: 100, XL: 200, FULL: 800},
PTB: {S: 100, M: 100, L: 200, XL: 400, FULL: 3200},
WT2: {S: 40, M: 40, L: 40, XL: 80, FULL: 320},
SQUAD: {S: 5, M: 5, L: 5, XL: 8, FULL: 60},
}
EPOCH_CLIP_START = {
MNIST: {S: 4, M: 14, L: 58, XL: 267, FULL: 800},
CIFAR10: {S: 1, M: 4, L: 16, XL: 64, FULL: 768},
PTB: {S: 2, M: 8, L: 32, XL: 128, FULL: 3200},
WT2: {S: 1, M: 2, L: 7, XL: 27, FULL: 320},
SQUAD: {S: 1, M: 1, L: 1, XL: 2, FULL: 60},
}
EPOCH_CLIP_START_NEW = {
MNIST: {S: 4, M: 14, L: 58, XL: 267, FULL: 800},
CIFAR10: {S: 1, M: 4, L: 16, XL: 64, FULL: 768},
PTB: {S: 2, M: 8, L: 32, XL: 128, FULL: 3200},
WT2: {S: 1, M: 3, L: 10, XL: 34, FULL: 320},
SQUAD: {S: 1, M: 2, L: 3, XL: 4, FULL: 60},
}
EPOCH_CLIP_START_IGNORE_S = {
MNIST: {S: 4, M: 14, L: 58, XL: 267, FULL: 800},
CIFAR10: {S: 1, M: 4, L: 16, XL: 64, FULL: 768},
PTB: {S: 2, M: 8, L: 32, XL: 128, FULL: 3200},
WT2: {S: 1, M: 2, L: 7, XL: 28, FULL: 320},
SQUAD: {S: 1, M: 1, L: 1, XL: 2, FULL: 60},
}
##
# Named optimizer selections
OPTIMS = {
SGD_ANY: {"opt.name": "SGD"},
SGD_NM: {"opt.name": "SGD", "opt.momentum": 0.0},
SGD_M: {"opt.name": "SGD", "opt.momentum": 0.9},
ADAM_NM: {"opt.name": "Adam", "opt.b1": 0.0},
ADAM_M: {"opt.name": "Adam", "opt.b1": 0.9},
SIGN_NM: {"opt.name": "SignDescent", "opt.momentum": 0.0},
SIGN_M: {"opt.name": "SignDescent", "opt.momentum": 0.9},
NORM_NM: {"opt.name": "NormalizedGD", "opt.momentum": 0.0},
NORM_M: {"opt.name": "NormalizedGD", "opt.momentum": 0.9},
BLOCK_NM: {"opt.name": "BlockNormalizedGD", "opt.momentum": 0.0},
BLOCK_M: {"opt.name": "BlockNormalizedGD", "opt.momentum": 0.9},
RSD_NM: {"opt.name": "RescaledSignDescent", "opt.momentum": 0.0, "opt.norm": 1.0},
RSD_M: {"opt.name": "RescaledSignDescent", "opt.momentum": 0.9, "opt.norm": 1.0},
}
| 8,066 | 25.800664 | 86 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/__main__.py
|
import json
import os
import sys
import explib.results.wandb_cleanups
from explib.results import data
from explib import cli_helper
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Tools to download results")
parser.add_argument(
"--download",
action="store_true",
help="Download all the data from wandb",
default=False,
)
parser.add_argument(
"--download-new",
action="store_true",
help="Download all the data from wandb",
default=False,
)
parser.add_argument(
"--summary",
action="store_true",
help="Download the summary of all runs from wandb",
default=False,
)
parser.add_argument(
"--concat",
action="store_true",
help="Concatenate all runs as one file for faster loading",
default=False,
)
parser.add_argument(
"--download-group",
default=None,
type=str,
help="Download all the data from wandb for a specific group only",
)
parser.add_argument(
"--checkup",
action="store_true",
help="Checks if the runs have finished successfully and flag in wandb",
default=False,
)
parser.add_argument(
"--checkup-group",
default=None,
type=str,
help="Checks if the runs have finished successfully and flag in wandb (for a group only)",
)
cli_helper.add_dotenv_option(parser)
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
args = parser.parse_args()
cli_helper.load_dotenv_if_required(args)
if args.summary:
data.download_summary(download_runs=False)
elif args.download:
data.download_summary(download_runs=True)
elif args.download_new:
data.download_summary(download_runs=True, only_new=True)
elif args.download_group is not None:
data.download_summary(download_runs=True, group=args.download_all_group)
elif args.concat:
data.concatenate_all_runs()
elif args.checkup:
explib.results.wandb_cleanups.checkup()
elif args.checkup_group is not None:
explib.results.wandb_cleanups.checkup(group=args.checkup_group)
| 2,251 | 28.246753 | 98 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/plotting.py
|
import os
import pickle
import warnings
from datetime import datetime
from math import atan2, degrees
from pathlib import Path
import numpy
import numpy as np
import pandas as pd
# Label line with line2D label data
from explib import config
from explib.results import cleanup as datacleaning
from explib.results import data as data_h
from explib.results import experiment_groups as expdef
from matplotlib import ticker
from matplotlib.dates import date2num
def subsample_idx(length, n, log=False):
"""Returns a n-subset of [0,length-1]"""
if log:
log_grid = np.logspace(start=0, stop=np.log10(length - 1), num=n - 1)
idx = [0] + list(log_grid.astype(int))
else:
lin_grid = np.linspace(start=0, stop=length - 1, num=n)
idx = list(lin_grid.astype(int))
idx = sorted(list(set(idx)))
return idx
def subsample(xs, n=100, log=False):
aslist = list(xs)
return [aslist[i] for i in subsample_idx(len(aslist), n=n, log=False)]
def labelLine(
line,
x,
label=None,
align=True,
drop_label=False,
manual_rotation=0,
ydiff=0.0,
**kwargs,
):
"""Label a single matplotlib line at position x.
Parameters
----------
line : matplotlib.lines.Line
The line holding the label
x : number
The location in data unit of the label
label : string, optional
The label to set. This is inferred from the line by default
drop_label : bool, optional
If True, the label is consumed by the function so that subsequent calls to e.g. legend
do not use it anymore.
kwargs : dict, optional
Optional arguments passed to ax.text
"""
ax = line.axes
xdata = line.get_xdata()
ydata = line.get_ydata()
mask = np.isfinite(ydata)
if mask.sum() == 0:
raise Exception("The line %s only contains nan!" % line)
# Find first segment of xdata containing x
if len(xdata) == 2:
i = 0
xa = min(xdata)
xb = max(xdata)
else:
for i, (xa, xb) in enumerate(zip(xdata[:-1], xdata[1:])):
if min(xa, xb) <= x <= max(xa, xb):
break
else:
raise Exception("x label location is outside data range!")
def x_to_float(x):
"""Make sure datetime values are properly converted to floats."""
return date2num(x) if isinstance(x, datetime) else x
xfa = x_to_float(xa)
xfb = x_to_float(xb)
ya = ydata[i]
yb = ydata[i + 1]
y = ya + (yb - ya) * (x_to_float(x) - xfa) / (xfb - xfa)
if not (np.isfinite(ya) and np.isfinite(yb)):
warnings.warn(
(
"%s could not be annotated due to `nans` values. "
"Consider using another location via the `x` argument."
)
% line,
UserWarning,
)
return
if not label:
label = line.get_label()
if drop_label:
line.set_label(None)
if align:
# Compute the slope and label rotation
screen_dx, screen_dy = ax.transData.transform(
(xfa, ya)
) - ax.transData.transform((xfb, yb))
rotation = (degrees(atan2(screen_dy, screen_dx)) + 90) % 180 - 90
else:
rotation = manual_rotation
# Set a bunch of keyword arguments
if "color" not in kwargs:
kwargs["color"] = line.get_color()
if ("horizontalalignment" not in kwargs) and ("ha" not in kwargs):
kwargs["ha"] = "center"
if ("verticalalignment" not in kwargs) and ("va" not in kwargs):
kwargs["va"] = "center"
if "backgroundcolor" not in kwargs:
kwargs["backgroundcolor"] = ax.get_facecolor()
if "clip_on" not in kwargs:
kwargs["clip_on"] = True
if "zorder" not in kwargs:
kwargs["zorder"] = 2.5
ax.text(x, y + ydiff, label, rotation=rotation, **kwargs)
def rgb_to_unit(xs):
"""Convert a list of RGB numbers [1, 255] to a list of unit [0, 1]"""
return [x / 255.0 for x in xs]
COLORS = {
"Google Blue": {
"color": "#4184f3",
"active": "#3a53c5",
"disabled": "#cad8fc",
},
"Google Red": {
"color": "#db4437",
"active": "#8f2a0c",
"disabled": "#e8c6c1",
},
"Google Yellow": {
"color": "#f4b400",
"active": "#db9200",
"disabled": "#f7e8b0",
},
"Google Green": {
"color": "#0f9d58",
"active": "#488046",
"disabled": "#c2e1cc",
},
"Purple": {
"color": "#aa46bb",
"active": "#5c1398",
"disabled": "#d7bce6",
},
"Teal": {
"color": "#00abc0",
"active": "#47828e",
"disabled": "#c2eaf2",
},
"Deep Orange": {
"color": "#ff6f42",
"active": "#ca4a06",
"disabled": "#f2cbba",
},
"Lime": {
"color": "#9d9c23",
"active": "#7f771d",
"disabled": "#f1f4c2",
},
"Indigo": {
"color": "#5b6abf",
"active": "#3e47a9",
"disabled": "#c5c8e8",
},
"Pink": {
"color": "#ef6191",
"active": "#ca1c60",
"disabled": "#e9b9ce",
},
"Deep Teal": {
"color": "#00786a",
"active": "#2b4f43",
"disabled": "#bededa",
},
"Deep Pink": {
"color": "#c1175a",
"active": "#75084f",
"disabled": "#de8cae",
},
"Gray": {
"color": "#9E9E9E",
"active": "#424242",
"disabled": "F5F5F5",
},
"VB": {
"blue": rgb_to_unit([0, 119, 187]),
"red": rgb_to_unit([204, 51, 17]),
"orange": rgb_to_unit([238, 119, 51]),
"cyan": rgb_to_unit([51, 187, 238]),
"teal": rgb_to_unit([0, 153, 136]),
"magenta": rgb_to_unit([238, 51, 119]),
"grey": rgb_to_unit([187, 187, 187]),
},
"PTyellow": rgb_to_unit([221, 170, 51]),
"PTred": rgb_to_unit([187, 85, 102]),
"PTblue": rgb_to_unit([0, 68, 136]),
"PTMC": {
"lightyellow": "#EECC66",
"lightred": "#EE99AA",
"lightblue": "#6699CC",
"darkyellow": "#997700",
"darkred": "#994455",
"darkblue": "#004488",
},
}
GOOGLE_STYLE_COLORS = {
"b1": COLORS["Google Blue"]["color"],
"b2": COLORS["Google Blue"]["active"],
"g1": COLORS["Google Green"]["color"],
"g2": COLORS["Google Green"]["active"],
"t1": COLORS["Google Yellow"]["color"],
"t2": COLORS["Google Yellow"]["active"],
"gr1": COLORS["Gray"]["color"],
"gr2": COLORS["Gray"]["active"],
"p1": COLORS["Deep Pink"]["color"],
"p2": COLORS["Deep Pink"]["active"],
"r1": COLORS["VB"]["orange"],
"r2": COLORS["VB"]["red"],
"gray": "#808080",
"black": "#000000",
}
BASE_COLORS = {
"b1": [0, 0, 0], # COLORS["PTblue"],
"b2": [0, 0, 0], # COLORS["PTblue"],
"r1": COLORS["PTred"],
"r2": COLORS["PTred"],
"t1": COLORS["PTyellow"],
"t2": COLORS["PTyellow"],
"gr1": COLORS["Gray"]["color"],
"gr2": COLORS["Gray"]["active"],
"g1": COLORS["PTblue"],
"g2": COLORS["PTblue"],
"p1": COLORS["Deep Pink"]["color"],
"p2": COLORS["Deep Pink"]["active"],
"gray": "#808080",
"black": "#000000",
}
# Magic constants
_stroke_width = 0.5
_xtick_width = 0.8
_GOLDEN_RATIO = (5.0**0.5 - 1.0) / 2.0
def base_font(*, family="sans-serif"):
# ptmx replacement
fontset = "stix" if family == "serif" else "stixsans"
return {
"text.usetex": False,
"font.sans-serif": ["TeX Gyre Heros"],
"font.serif": ["Times New Roman"],
"mathtext.fontset": fontset,
"mathtext.rm": "Times New Roman",
"mathtext.it": "Times New Roman:italic",
"mathtext.bf": "Times New Roman:bold",
"font.family": family,
}
fontsizes = {
"normal": 9,
"small": 7,
"tiny": 6,
}
def base_fontsize(*, base=10):
fontsizes = {
"normal": base - 1,
"small": base - 3,
"tiny": base - 4,
}
return {
"font.size": fontsizes["normal"],
"axes.titlesize": fontsizes["normal"],
"axes.labelsize": fontsizes["small"],
"legend.fontsize": fontsizes["small"],
"xtick.labelsize": fontsizes["tiny"],
"ytick.labelsize": fontsizes["tiny"],
}
def base_layout(
*,
rel_width=1.0,
nrows=1,
ncols=2,
constrained_layout=False,
tight_layout=False,
height_to_width_ratio=_GOLDEN_RATIO,
base_width_in=5.5,
):
width_in = base_width_in * rel_width
subplot_width_in = width_in / ncols
subplot_height_in = height_to_width_ratio * subplot_width_in
height_in = subplot_height_in * nrows
figsize = (width_in, height_in)
return {
"figure.dpi": 250,
"figure.figsize": figsize,
"figure.constrained_layout.use": constrained_layout,
"figure.autolayout": tight_layout,
# Padding around axes objects. Float representing
"figure.constrained_layout.h_pad": 1 / 72,
# inches. Default is 3/72 inches (3 points)
"figure.constrained_layout.w_pad": 1 / 72,
# Space between subplot groups. Float representing
"figure.constrained_layout.hspace": 0.00,
# a fraction of the subplot widths being separated.
"figure.constrained_layout.wspace": 0.00,
}
def base_style():
grid_color = BASE_COLORS["gray"]
text_color = BASE_COLORS["black"]
return {
"text.color": text_color,
"axes.labelcolor": text_color,
"axes.labelpad": 2,
# "axes.spines.left": False,
# "axes.spines.bottom": False,
"axes.spines.top": False,
"axes.spines.right": False,
# # "ytick.minor.left": False,
# # Axes aren't used in this theme, but still set some properties in case the user
# # decides to turn them on.
"axes.edgecolor": grid_color,
"axes.linewidth": _stroke_width,
# default is "line", i.e., below lines but above patches (bars)
# "axes.axisbelow": True,
# #
# "ytick.right": False,
# "ytick.color": grid_color,
# "ytick.labelcolor": text_color,
# "ytick.major.width": _stroke_width,
# "xtick.minor.top": False,
# "xtick.minor.bottom": False,
# "xtick.color": grid_color,
# "xtick.labelcolor": text_color,
# "xtick.major.width": _xtick_width,
# "axes.grid": True,
# "axes.grid.axis": "y",
"ytick.major.pad": 1,
"xtick.major.pad": 1,
"grid.color": grid_color,
# Choose the line width such that it's very subtle, but still serves as a guide.
"grid.linewidth": _stroke_width,
"axes.xmargin": 0,
"axes.ymargin": 0,
"axes.titlepad": 3,
}
def smaller_style():
return {
"axes.labelpad": 2,
"axes.spines.top": False,
"axes.spines.right": False,
"ytick.major.pad": 1,
"xtick.major.pad": 1,
"axes.xmargin": 0,
"axes.ymargin": 0,
"axes.titlepad": 3,
}
def base_config(
*,
rel_width=1.0,
nrows=1,
ncols=4,
family="serif",
height_to_width_ratio=_GOLDEN_RATIO,
):
font_config = base_font(family=family)
fonsize_config = base_fontsize(base=10)
layout_config = base_layout(
rel_width=rel_width,
nrows=nrows,
ncols=ncols,
height_to_width_ratio=height_to_width_ratio,
)
style_config = base_style()
return {**font_config, **fonsize_config, **layout_config, **style_config}
def smaller_config(
*,
rel_width=1.0,
nrows=1,
ncols=4,
family="sans-serif",
height_to_width_ratio=_GOLDEN_RATIO,
):
font_config = base_font(family=family)
fonsize_config = base_fontsize(base=10)
layout_config = base_layout(
rel_width=rel_width,
nrows=nrows,
ncols=ncols,
height_to_width_ratio=height_to_width_ratio,
)
style_config = smaller_style()
return {**font_config, **fonsize_config, **layout_config, **style_config}
def iclr_config_2(
*,
rel_width=1.0,
nrows=1,
ncols=4,
family="sans-serif",
height_to_width_ratio=_GOLDEN_RATIO,
):
font_config = base_font(family=family)
fonsize_config = base_fontsize(base=11)
layout_config = base_layout(
rel_width=rel_width,
nrows=nrows,
ncols=ncols,
height_to_width_ratio=height_to_width_ratio,
base_width_in=5.5,
)
style_config = smaller_style()
return {**font_config, **fonsize_config, **layout_config, **style_config}
def icml_config(
*,
rel_width=1.0,
nrows=1,
ncols=4,
family="sans-serif",
height_to_width_ratio=_GOLDEN_RATIO,
):
font_config = base_font(family=family)
fonsize_config = base_fontsize(base=11)
layout_config = base_layout(
rel_width=rel_width,
nrows=nrows,
ncols=ncols,
height_to_width_ratio=height_to_width_ratio,
base_width_in=6.75,
)
style_config = smaller_style()
return {**font_config, **fonsize_config, **layout_config, **style_config}
def save(fig, name, tight=False, transparent=False):
print(f"Saving figure {name}.pdf")
fig.savefig(
f"{name}.pdf",
bbox_inches="tight" if tight else None,
transparent=transparent,
)
def hide_frame(*axes, top=True, right=True, left=False, bottom=False):
for ax in axes:
ax.spines["top"].set_visible(not top)
ax.spines["right"].set_visible(not right)
ax.spines["left"].set_visible(not left)
ax.spines["bottom"].set_visible(not bottom)
def hide_all_frame(*axes):
hide_frame(*axes, top=True, right=True, left=True, bottom=True)
def hide_ticklabels(*axes, x=True, y=True):
for ax in axes:
if x:
ax.set_xticklabels([], minor=True)
ax.set_xticklabels([], minor=False)
if y:
ax.set_yticklabels([], minor=True)
ax.set_yticklabels([], minor=False)
def hide_ticks(*axes, x=True, y=True):
for ax in axes:
if x:
ax.set_xticks([], minor=True)
ax.set_xticks([], minor=False)
if y:
ax.set_yticks([], minor=True)
ax.set_yticks([], minor=False)
def flatten(t):
return [item for sublist in t for item in sublist]
##
#
displaynames = {
"small": "S",
"medium": "M",
"large": "L",
"larger": "XL",
"full": "Full",
"resnet18": "ResNet18",
"transformer_encoder": "Transformer",
"transformer_xl": "Transformer XL",
"distilbert_base_pretrained": "DistilBert",
"mnist": "MNIST",
"cifar10": "CIFAR-10",
"ptb": "PTB",
"wikitext2": "WikiText-2",
"squad": "SQuAD",
"sgd": "SGD",
"adam": "Adam",
"sgd+m": "SGD($+$m)",
"adam+m": "Adam($+$m)",
"sgd-m": "SGD($-$m)",
"adam-m": "Adam($-$m)",
"training_loss": "Train Loss",
"training_perf": "Train Perf.",
"validation_perf": "Val. Perf.",
"PlainSGD": "GD",
"SGD": "SGD",
"NormalizedGD": "Norm. GD($-$m)",
"NormalizedGD+m": "Norm. GD($+$m)",
"BlockNormalizedGD": "Block-Normalized",
"SignDescent": "Sign descent($-$m)",
"SignDescent+m": "Sign descent($+$m)",
"RescaledSignDescent": "Rescaled SD($-$m)",
"RescaledSignDescent+m": "Rescaled SD($+$m)",
"train_accuracy": "Train Acc.",
"train_ppl": "Train PPL",
"train_exact_f1": "Train F1",
"valid_accuracy": "Valid Acc.",
"valid_ppl": "Valid PPL",
"valid_exact_f1": "Valid F1",
}
abbrevs_ = {
"sgd": "SGD",
"adam": "Adam",
"sgd+m": "SGD($+$m)",
"adam+m": "Adam($+$m)",
"sgd-m": "SGD($-$m)",
"adam-m": "Adam($-$m)",
"NormalizedGD": "Norm.($-$m)",
"NormalizedGD+m": "Norm.($+$m)",
"BlockNormalizedGD": "Block",
"SignDescent": "Sign($-$m)",
"SignDescent+m": "Sign($+$m)",
"accuracy": "Acc.",
"ppl": "PPL",
"f1": "F1",
"train_accuracy": "Acc.",
"train_ppl": "PPL",
"train_exact_f1": "F1",
"valid_accuracy": "Acc.",
"valid_ppl": "PPL",
"valid_exact_f1": "F1",
}
def abbrev(key):
return abbrevs_.get(key, key)
def fdisplaynames(key):
if key in displaynames:
return displaynames[key]
else:
return key
markersize_small = 3
linewidth_small = 1
linestyles = {
"sgd+m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "-",
"color": BASE_COLORS["b2"],
},
"sgd-m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "--",
"dashes": (4, 5),
"color": BASE_COLORS["b1"],
},
"adam+m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "-",
"color": BASE_COLORS["r2"],
},
"adam-m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "--",
"dashes": (4, 5),
"color": BASE_COLORS["r1"],
},
"NormalizedGD": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "--",
"dashes": (4, 5),
"color": BASE_COLORS["t1"],
},
"BlockNormalizedGD": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "--",
"color": BASE_COLORS["t2"],
},
"SignDescent": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "--",
"dashes": (4, 5),
"color": BASE_COLORS["g2"],
},
"RescaledSignDescent": {
"marker": ".",
"linewidth": linewidth_small,
"markersize": markersize_small,
"linestyle": "--",
"color": BASE_COLORS["g1"],
},
"NormalizedGD+m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "-",
"color": BASE_COLORS["t1"],
},
"BlockNormalizedGD+m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "-",
"color": BASE_COLORS["t2"],
},
"SignDescent+m": {
"marker": ".",
"markersize": markersize_small,
"linewidth": linewidth_small,
"linestyle": "-",
"color": BASE_COLORS["g2"],
},
"RescaledSignDescent+m": {
"marker": ".",
"linewidth": linewidth_small,
"markersize": markersize_small,
"linestyle": "-",
"color": BASE_COLORS["g1"],
},
}
linestyles = {
**linestyles,
"adam+m+bc": linestyles["sgd+m"],
"adam-m+bc": linestyles["sgd-m"],
"adam+m-bc": linestyles["adam+m"],
"adam-m-bc": linestyles["adam-m"],
"SGD": linestyles["sgd-m"],
}
for b2 in [".9", ".6", ".3", ".1", ".0"]:
linestyles[f"adam+m+bc_b2={b2}"] = linestyles["adam+m+bc"]
linestyles[f"adam-m+bc_b2={b2}"] = linestyles["adam-m+bc"]
linestyles[f"adam+m-bc_b2={b2}"] = linestyles["adam+m-bc"]
linestyles[f"adam-m-bc_b2={b2}"] = linestyles["adam-m-bc"]
linestyles_nm = {}
for key, style in linestyles.items():
style_nm = {**style}
style_nm["marker"] = None
linestyles_nm[key] = style_nm
fillstyles = {
k: {"color": linestyles[k]["color"], "alpha": 0.2} for k in linestyles.keys()
}
fillstyles = {
**fillstyles,
"adam+m+bc": fillstyles["sgd+m"],
"adam-m+bc": fillstyles["sgd-m"],
"adam+m-bc": fillstyles["adam+m"],
"adam-m-bc": fillstyles["adam-m"],
"SGD": fillstyles["sgd-m"],
# "NormalizedGD": fillstyles["sgd+m"],
# "BlockNormalizedGD": fillstyles["sgd-m"],
# "SignDescent": fillstyles["adam+m"],
# "RescaledSignDescent": fillstyles["adam-m"],
}
for b2 in [".9", ".6", ".3", ".1", ".0"]:
fillstyles[f"adam+m+bc_b2={b2}"] = fillstyles["adam+m+bc"]
fillstyles[f"adam-m+bc_b2={b2}"] = fillstyles["adam-m+bc"]
fillstyles[f"adam+m-bc_b2={b2}"] = fillstyles["adam+m-bc"]
fillstyles[f"adam-m-bc_b2={b2}"] = fillstyles["adam-m-bc"]
opt_names = ["sgd-m", "sgd+m", "adam-m", "adam+m"]
normalized_opt_names = [
"SGD",
"NormalizedGD",
"BlockNormalizedGD",
"SignDescent",
"RescaledSignDescent",
]
opt_names_normonly = [
"NormalizedGD",
"SignDescent",
"RescaledSignDescent",
"NormalizedGD+m",
"SignDescent+m",
"RescaledSignDescent+m",
]
opt_filters = {
"sgd-m": {"opt.name": "SGD", "opt.momentum": 0.0},
"sgd+m": {"opt.name": "SGD", "opt.momentum": 0.9},
"adam+m": {"opt.name": "Adam", "opt.b1": 0.9},
"adam-m": {"opt.name": "Adam", "opt.b1": 0.0},
"SGD": {"opt.name": "SGD"},
"SignDescent": {"opt.name": "SignDescent", "opt.momentum": 0.0},
"NormalizedGD": {"opt.name": "NormalizedGD", "opt.momentum": 0.0},
"BlockNormalizedGD": {"opt.name": "BlockNormalizedGD", "opt.momentum": 0.0},
"RescaledSignDescent": {
"opt.name": "RescaledSignDescent",
"opt.momentum": 0.0,
"opt.norm": 1.0,
},
"SignDescent+m": {"opt.name": "SignDescent", "opt.momentum": 0.9},
"NormalizedGD+m": {"opt.name": "NormalizedGD", "opt.momentum": 0.9},
"BlockNormalizedGD+m": {"opt.name": "BlockNormalizedGD", "opt.momentum": 0.9},
"RescaledSignDescent+m": {
"opt.name": "RescaledSignDescent",
"opt.momentum": 0.9,
"opt.norm": 1.0,
},
}
problems = {
"LEN": {"model": "lenet5", "dataset": "mnist"},
"RES": {"model": "resnet18", "dataset": "cifar10"},
"TEC": {"model": "transformer_encoder", "dataset": "ptb"},
"TXL": {"model": "transformer_xl", "dataset": "wikitext2"},
"BRT": {"model": "distilbert_base_pretrained", "dataset": "squad"},
}
models_datasets = [
("lenet5", "mnist"),
("resnet18", "cifar10"),
("transformer_encoder", "ptb"),
("distilbert_base_pretrained", "squad"),
]
def make_textbf(str):
return r"$\mathrm{\bf" + str.replace(" ", r"\,") + r"}}$"
def compute_limits(data_min, data_max, margin_p=0.05, logspace=True):
def get_limits(data_min, data_max, margin_p=0.05):
dp = margin_p
"""
Given data min and max, returns limits lim- and lim+
such that [min,max] is centered in [lim-, lim+]
and represent (1-2*margin_p) percent of the interval such that
lim- + (1-margin_p) * (lim+ - lim-) = data_max
lim- + margin_p * (lim+ - lim-) = data_min
"""
W = np.array([[1 - dp, dp], [dp, 1 - dp]]).reshape((2, 2))
data_values = np.array([data_min, data_max]).reshape((-1, 1))
limits = np.linalg.solve(W, data_values)
assert np.allclose(limits[0] + (limits[1] - limits[0]) * (1 - dp), data_max)
assert np.allclose(limits[0] + (limits[1] - limits[0]) * dp, data_min)
return limits[0][0], limits[1][0]
if not logspace:
return get_limits(data_min, data_max, margin_p)
else:
x0, x1 = np.log(data_min), np.log(data_max)
z0, z1 = get_limits(x0, x1, margin_p)
return np.exp(z0), np.exp(z1)
def find_best_stepsize(rundata, at_epoch=None, by_key="training_loss_runs"):
if at_epoch is not None:
rundata = data_h.df_select(rundata, epoch_runs=at_epoch)
medians, mins, maxs, xs = data_h.median_min_max_by(
rundata, key="opt.alpha", metric_name=by_key
)
best_alpha_idx = np.nanargmin(maxs)
best_alpha = xs[best_alpha_idx]
return best_alpha
def plot_metric_by_key(ax, metric_name, optname, rundata, key="opt.alpha"):
medians, mins, maxs, xs = data_h.median_min_max_by(
rundata, key=key, metric_name=metric_name
)
ax.plot(
xs,
medians,
**linestyles[optname],
label=displaynames[optname],
)
ax.fill_between(
xs,
mins,
maxs,
**fillstyles[optname],
)
def plot_optim_by_stepsize(ax, metric_name, optname, rundata):
medians, mins, maxs, xs = data_h.median_min_max_by(
rundata, key="opt.alpha", metric_name=metric_name
)
ax.plot(
xs,
medians,
**linestyles[optname],
label=fdisplaynames(optname),
)
ax.fill_between(
xs,
mins,
maxs,
**fillstyles[optname],
)
best_alpha = find_best_stepsize(rundata)
best_alpha_idx = np.where(xs == best_alpha)[0][0]
ax.plot(
xs[best_alpha_idx],
medians[best_alpha_idx],
color="k",
marker="*",
markersize=3,
zorder=5,
)
def higher_is_better(metric_name):
higher = ["accuracy", "f1"]
lower = ["ppl", "loss"]
if any([x in metric_name for x in higher]):
return True
if any([x in metric_name for x in lower]):
return False
raise NotImplementedError(
f"Metric {metric_name} unknown, can't determine if higher is better."
+ f"Only know of higher: {higher}, lower: {lower}"
)
def should_log(metric_name):
to_log = ["ppl", "loss"]
not_log = ["accuracy", "f1"]
if any([x in metric_name for x in to_log]):
return True
if any([x in metric_name for x in not_log]):
return False
raise NotImplementedError(
f"Metric {metric_name} unknown, can't determine if log scale."
+ f"Only know of to_log: {to_log}, lower: {not_log}"
)
def get_metric_at_start_for_dataset(runs, summaries, dataset, metric):
runs_at_start = datacleaning.filter_merge(
summaries, runs, summary_filter={"dataset": dataset}, runs_filter={"epoch": 0}
)
return runs_at_start[f"{metric}_runs"].mean()
def get_metric_limits_for_dataset(runs, summaries, dataset, metric, margin_p=0.2):
if "accuracy" in metric or "f1" in metric:
return [0, 105.0]
all_runs = datacleaning.filter_merge(
summaries, runs, summary_filter={"dataset": dataset}, runs_filter={}
)
start_val = get_metric_at_start_for_dataset(runs, summaries, dataset, metric)
# Replace values of training loss == 0 by nan so we get the second-smallest elem
all_runs["training_loss_runs"] = all_runs["training_loss_runs"].where(
all_runs["training_loss_runs"] != 0.0, np.nan
)
min_val = all_runs[f"{metric}_runs"].min()
max_val = all_runs[f"{metric}_runs"].max()
try:
if higher_is_better(metric):
return compute_limits(start_val, max_val, margin_p=margin_p)
else:
return compute_limits(min_val, start_val, margin_p=margin_p)
except AssertionError:
import pdb
pdb.set_trace()
def accumulate_fitler(accumulate):
return {} if accumulate is None else {"accumulate_steps": accumulate}
def eff_bs(bs, accum):
return bs * (accum if accum is not None else 1)
def clip_metric_at(df_, key, ylims):
scaling = 2 if should_log(key) else 1.05
if higher_is_better(key):
val_ = (1 / scaling) * ylims[0]
df_[key] = np.where(df_[key].isna(), val_, df_[key])
df_[key] = np.where(df_[key] < val_, val_, df_[key])
else:
val_ = scaling * ylims[1]
df_[key] = np.where(df_[key].isna(), val_, df_[key])
df_[key] = np.where(df_[key] > val_, val_, df_[key])
if "accuracy" in key or "f1" in key:
df_[key] = np.where(df_[key] < 0, 0, df_[key])
df_[key] = np.where(df_[key] > 100, 100, df_[key])
return df_
metric_type_to_dset_to_metric = {
"training_loss": {
"mnist": "training_loss",
"cifar10": "training_loss",
"ptb": "training_loss",
"wikitext2": "training_loss",
"squad": "training_loss",
},
"training_perf": {
"mnist": "train_accuracy",
"cifar10": "train_accuracy",
"ptb": "train_ppl",
"wikitext2": "train_ppl",
"squad": "train_exact_f1",
},
"validation_perf": {
"mnist": "valid_accuracy",
"cifar10": "valid_accuracy",
"ptb": "valid_ppl",
"wikitext2": "valid_ppl",
"squad": "valid_exact_f1",
},
}
experiment_settings = {
"fix-full-batch-training-squad": {
"squad": {
"full": {
"clip_epoch": 60,
"max_epoch": 80,
"batch_size": 16,
"accumulate_steps": 1370 * 4,
},
},
},
"no-dropout": {
"wikitext2": {
"full": {
"clip_epoch": 320,
"max_epoch": 320,
"batch_size": 80,
"accumulate_steps": 203,
},
},
"ptb": {
"full": {
"clip_epoch": 800 * 4,
"max_epoch": 800 * 4,
"batch_size": 1326,
"accumulate_steps": 20,
},
},
},
"full_batch": {
"clip_epoch": {
"mnist": 800,
"cifar10": 800,
"ptb": 3200,
"wikitext2": 320,
"squad": 60,
}
},
"norm-ablation": {
"mnist": {
"medium": {"clip_epoch": 100, "max_epoch": 100, "batch_size": 1024},
"large": {"clip_epoch": 200, "max_epoch": 200, "batch_size": 4096},
"larger": {"clip_epoch": 400, "max_epoch": 800, "batch_size": 16384},
},
"cifar10": {
"medium": {"clip_epoch": 100, "max_epoch": 100, "batch_size": 256},
"large": {"clip_epoch": 100, "max_epoch": 200, "batch_size": 1024},
"larger": {"clip_epoch": 200, "max_epoch": 400, "batch_size": 4096},
},
"ptb": {
"medium": {"clip_epoch": 100, "max_epoch": 100, "batch_size": 64},
"large": {"clip_epoch": 200, "max_epoch": 200, "batch_size": 256},
"larger": {"clip_epoch": 400, "max_epoch": 800, "batch_size": 1024},
},
"wikitext2": {
"medium": {
"clip_epoch": 40,
"max_epoch": 40,
"batch_size": 80,
"accumulate_steps": 1,
},
"large": {
"clip_epoch": 40,
"max_epoch": 80,
"batch_size": 80,
"accumulate_steps": 4,
},
"larger": {
"clip_epoch": 80,
"max_epoch": 160,
"batch_size": 80,
"accumulate_steps": 16,
},
},
"squad": {
"medium": {
"clip_epoch": 5,
"max_epoch": 10,
"batch_size": 16,
"accumulate_steps": 2,
},
"large": {
"clip_epoch": 5,
"max_epoch": 10,
"batch_size": 16,
"accumulate_steps": 32,
},
"larger": {
"clip_epoch": 5,
"max_epoch": 10,
"batch_size": 16,
"accumulate_steps": 128,
},
},
},
"norm-ablation-full": {
"clip_epoch": {
"mnist": 800,
"cifar10": 800,
"ptb": 3200,
"wikitext2": 320,
"squad": 60,
},
"max_epoch": {
"mnist": 800,
"cifar10": 800,
"ptb": 3200,
"wikitext2": 320,
"squad": 80,
},
},
"increasing_batch_size": {
"problem_filters": {
"mnist": [
{"batch_size": 256, "max_epoch": 100},
{"batch_size": 1024, "max_epoch": 100},
{"batch_size": 4096, "max_epoch": 200},
{"batch_size": 16384, "max_epoch": 800},
],
"cifar10": [
{"batch_size": 64, "max_epoch": 100},
{"batch_size": 256, "max_epoch": 100},
{"batch_size": 1024, "max_epoch": 200},
{"batch_size": 4096, "max_epoch": 800},
],
"ptb": [
{"batch_size": 16, "max_epoch": 100},
{"batch_size": 64, "max_epoch": 100},
{"batch_size": 256, "max_epoch": 200},
{"batch_size": 1024, "max_epoch": 800},
],
"wikitext2": [
{"batch_size": 20, "max_epoch": 40, "accumulate_steps": 1},
{"batch_size": 80, "max_epoch": 40, "accumulate_steps": 1},
{"batch_size": 80, "max_epoch": 80, "accumulate_steps": 4},
{"batch_size": 80, "max_epoch": 160, "accumulate_steps": 16},
],
"squad": [
{"batch_size": 32, "max_epoch": 5, "accumulate_steps": 1},
{"batch_size": 32, "max_epoch": 5, "accumulate_steps": 4},
{"batch_size": 32, "max_epoch": 5, "accumulate_steps": 16},
{"batch_size": 32, "max_epoch": 20, "accumulate_steps": 64},
],
},
"run_filters": {
"mnist": [
{"epoch": 100},
{"epoch": 100},
{"epoch": 200},
{"epoch": 400},
],
"cifar10": [
{"epoch": 100},
{"epoch": 100},
{"epoch": 100},
{"epoch": 200},
],
"ptb": [
{"epoch": 100},
{"epoch": 100},
{"epoch": 200},
{"epoch": 400},
],
"wikitext2": [
{"epoch": 40},
{"epoch": 40},
{"epoch": 40},
{"epoch": 80},
],
"squad": [
{"epoch": 5},
{"epoch": 5},
{"epoch": 5},
{"epoch": 5},
],
},
},
}
def normalize_y_axis(*axes):
miny, maxy = np.inf, -np.inf
for ax in axes:
y1, y2 = ax.get_ylim()
miny = np.min([miny, y1])
maxy = np.max([maxy, y2])
for ax in axes:
ax.set_ylim([miny, maxy])
def same_xlims(*axes):
minx, maxx = np.inf, -np.inf
for ax in axes:
y1, y2 = ax.get_xlim()
minx = np.min([minx, y1])
maxx = np.max([maxx, y2])
for ax in axes:
ax.set_xlim([minx, maxx])
def make_yaxis_scale_and_ticks(ax, metric_type, dataset, data_ylim, special=None):
metric = metric_type_to_dset_to_metric[metric_type][dataset]
if should_log(metric):
ax.set_yscale("log", base=10)
ax.set_ylim(data_ylim)
ax.yaxis.set_major_locator(ticker.LogLocator(numticks=4))
ax.yaxis.set_minor_locator(ticker.LogLocator(numticks=4))
if dataset == "ptb":
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_ticks([4, 8], major=True)
ax.yaxis.set_ticks([3, 4, 5, 6, 7, 8, 9], minor=True)
if dataset == "squad":
ax.yaxis.set_minor_formatter(ticker.NullFormatter())
ax.yaxis.set_major_formatter(ticker.ScalarFormatter())
ax.yaxis.set_ticks([2, 4], major=True)
ax.yaxis.set_ticks([2, 3, 4, 5, 6], minor=True)
if special == "full batch":
if dataset == "cifar10":
ax.set_ylim([10**-5, 10**2])
# %%
# Dataloader shortcuts
def load_gradnorms(seed=0):
files = {
"mnist_1": f"norm_lenet5_mnist_1_{seed}_Adam_0.npy",
"mnist_256": f"norm_lenet5_mnist_256_{seed}_Adam_0.npy",
"cifar10_1": f"norm_resnet18_cifar10_2_{seed}_Adam_0.npy",
"cifar10_64": f"norm_resnet18_cifar10_64_{seed}_Adam_0.npy",
"ptb_1": f"norm_transformer_encoder_ptb_1_{seed}_Adam_0.npy",
"ptb_16": f"norm_transformer_encoder_ptb_16_{seed}_Adam_0.npy",
"wt2_1": f"norm_transformer_xl_wikitext2_1_{seed}_Adam_0.npy",
"wt2_16": f"norm_transformer_xl_wikitext2_16_{seed}_Adam_0.npy",
"squad_1": f"norm_distilbert_base_pretrained_squad_1_{seed}_SGD_0.npy",
"squad_16": f"norm_distilbert_base_pretrained_squad_16_{seed}_SGD_0.npy",
}
data_dir = os.path.join(config.get_workspace(), "norms_and_text_data", "norms")
return {
dset: np.load(os.path.join(data_dir, dset, file))
for dset, file in files.items()
}
def preprocessed_file(filename):
preprocessed_di = os.path.join(
config.get_workspace(), "preprocessed_results_for_plotting"
)
Path(preprocessed_di).mkdir(parents=True, exist_ok=True)
return os.path.join(preprocessed_di, filename)
def save_preprocessed(data, filename):
with open(preprocessed_file(filename), "wb") as fh:
pickle.dump(data, fh)
def load_preprocessed(filename):
with open(preprocessed_file(filename), "rb") as fh:
return pickle.load(fh)
markers_loglog = ["o", "s"]
YLIM_TRAINLOSS_LOG = {
expdef.MNIST: [10**-6, 10**1],
expdef.CIFAR10: [10**-7, 10**2],
expdef.PTB: [1.7, 7],
expdef.WT2: [10**-1, 10**1.1],
expdef.SQUAD: [10**-1, 10**1.1],
}
YLIM_TRAINPERF_LOG = {
expdef.MNIST: [0, 110],
expdef.CIFAR10: [0, 110],
expdef.PTB: [10**0, 10**4.5],
expdef.WT2: [10**-0.5, 10**5],
expdef.SQUAD: [0, 110],
}
INIT_LOSSES = {
expdef.MNIST: 2.306351,
expdef.CIFAR10: 6.976949,
expdef.PTB: 9.270155,
expdef.SQUAD: 5.533700,
expdef.WT2: 11.182902,
}
MIN_LOSSES = {
expdef.MNIST: 6.762069e-07,
expdef.CIFAR10: 8.835905e-09,
expdef.PTB: 1.922758e00,
expdef.SQUAD: 8.793967e-03,
expdef.WT2: 4.584191e-02,
}
def get_min_max(ax, axis="x"):
"""Returns the min and max values of the "x" or "y" axis for lines in
ax."""
vals = [v for line in ax.lines for v in (line._x if axis == "x" else line._y)]
return np.min(vals), np.max(vals)
def make_grid(fig, grid_type="2-3"):
from matplotlib import gridspec
if grid_type == "2x2-3":
gs_base = fig.add_gridspec(
nrows=1,
ncols=2,
width_ratios=(2, 3 + 1 / 8),
left=0.06,
right=0.99,
bottom=0.085,
top=0.925,
wspace=0.175,
hspace=0.5,
)
gs_left = gridspec.GridSpecFromSubplotSpec(
2, 2, subplot_spec=gs_base[0], wspace=0.375, hspace=0.425
)
gs_right = gridspec.GridSpecFromSubplotSpec(
2, 3, subplot_spec=gs_base[1], wspace=0.375, hspace=0.425
)
axes = [
[
fig.add_subplot(gs_left[0, 0]),
fig.add_subplot(gs_left[0, 1]),
fig.add_subplot(gs_right[0, 0]),
fig.add_subplot(gs_right[0, 1]),
fig.add_subplot(gs_right[0, 2]),
],
[
fig.add_subplot(gs_left[1, 0]),
fig.add_subplot(gs_left[1, 1]),
fig.add_subplot(gs_right[1, 0]),
fig.add_subplot(gs_right[1, 1]),
fig.add_subplot(gs_right[1, 2]),
],
]
if grid_type == "2-3":
gs_base = fig.add_gridspec(
nrows=1,
ncols=2,
width_ratios=(2, 3 + 1 / 8 + 1 / 32),
left=0.0775,
right=0.99,
bottom=0.19,
top=0.84,
wspace=0.15,
hspace=0.10,
)
gs_left = gridspec.GridSpecFromSubplotSpec(
1, 2, subplot_spec=gs_base[0], wspace=0.375, hspace=0.7
)
gs_right = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=gs_base[1], wspace=0.375, hspace=0.7
)
axes = [
[
fig.add_subplot(gs_left[0, 0]),
fig.add_subplot(gs_left[0, 1]),
fig.add_subplot(gs_right[0, 0]),
fig.add_subplot(gs_right[0, 1]),
fig.add_subplot(gs_right[0, 2]),
]
]
return axes
def make_grid_iclr(fig, grid_type="2-3", tight=False):
from matplotlib import gridspec
axes = None
if grid_type == "2x2":
gs_base = fig.add_gridspec(
nrows=2,
ncols=2,
left=0.08,
right=0.98,
bottom=0.12,
top=0.94,
wspace=0.2,
hspace=0.3,
)
axes = [
[
fig.add_subplot(gs_base[0, 0]),
fig.add_subplot(gs_base[0, 1]),
],
[
fig.add_subplot(gs_base[1, 0]),
fig.add_subplot(gs_base[1, 1]),
],
]
elif grid_type == "2x3":
gs_base = fig.add_gridspec(
nrows=2,
ncols=3,
left=0.07,
right=0.98,
bottom=0.09,
top=0.94,
wspace=0.3,
hspace=0.3,
)
axes = [
[
fig.add_subplot(gs_base[0, 0]),
fig.add_subplot(gs_base[0, 1]),
fig.add_subplot(gs_base[0, 2]),
],
[
fig.add_subplot(gs_base[1, 0]),
fig.add_subplot(gs_base[1, 1]),
fig.add_subplot(gs_base[1, 2]),
],
]
elif grid_type == "3x5":
gs_base = fig.add_gridspec(
nrows=3,
ncols=5,
left=0.07,
right=0.98,
bottom=0.09,
top=0.94,
wspace=0.3,
hspace=0.3,
)
axes = [[fig.add_subplot(gs_base[i, j]) for j in range(5)] for i in range(3)]
elif grid_type == "2x2-3":
gs_base = fig.add_gridspec(
nrows=1,
ncols=2,
width_ratios=(2, 3 + 1 / 8 + 1 / 16),
left=0.075,
right=0.98,
bottom=0.095,
top=0.915,
wspace=0.175,
hspace=0.5,
)
hspace = 0.3 if tight else 0.6
gs_left = gridspec.GridSpecFromSubplotSpec(
2, 2, subplot_spec=gs_base[0], wspace=0.4, hspace=hspace
)
gs_right = gridspec.GridSpecFromSubplotSpec(
2, 3, subplot_spec=gs_base[1], wspace=0.4, hspace=hspace
)
axes = [
[
fig.add_subplot(gs_left[0, 0]),
fig.add_subplot(gs_left[0, 1]),
fig.add_subplot(gs_right[0, 0]),
fig.add_subplot(gs_right[0, 1]),
fig.add_subplot(gs_right[0, 2]),
],
[
fig.add_subplot(gs_left[1, 0]),
fig.add_subplot(gs_left[1, 1]),
fig.add_subplot(gs_right[1, 0]),
fig.add_subplot(gs_right[1, 1]),
fig.add_subplot(gs_right[1, 2]),
],
]
elif grid_type == "2-3":
gs_base = fig.add_gridspec(
nrows=1,
ncols=2,
width_ratios=(2, 3 + 1 / 8 + 1 / 32),
left=0.095,
right=0.99,
# bottom=0.085,
bottom=0.18,
# top=0.915,
top=0.84,
wspace=0.15,
# wspace=0.15,
hspace=0.5,
# hspace=0.10,
)
gs_left = gridspec.GridSpecFromSubplotSpec(
1, 2, subplot_spec=gs_base[0], wspace=0.4, hspace=0.7
)
gs_right = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=gs_base[1], wspace=0.4, hspace=0.7
)
axes = [
[
fig.add_subplot(gs_left[0, 0]),
fig.add_subplot(gs_left[0, 1]),
fig.add_subplot(gs_right[0, 0]),
fig.add_subplot(gs_right[0, 1]),
fig.add_subplot(gs_right[0, 2]),
]
]
# print(
# f"Width of plots on the left and right should be equal. "
# + f"Currently ["
# + f"{(axes[0][0]._position.x1 - axes[0][0]._position.x0):.3f}"
# + ", "
# + f"{(axes[0][2]._position.x1 - axes[0][2]._position.x0):.3f}"
# + ")"
# )
return axes
def make_xticks_pow10(ax, xs):
ax.set_xticks(xs)
def format_pow(power):
return "$\\mathdefault{10^" + str(int(power)) + "}$"
labels = (
[format_pow(np.log10(xs[0]))]
+ ["" for i in range(len(xs) - 2)]
+ [format_pow((np.log10(xs[-1])))]
)
ax.set_xticklabels(labels)
| 44,397 | 27.961513 | 97 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/cleanup.py
|
import json
import re
from datetime import datetime, timedelta
import explib.results.data as data_h
import numpy as np
import warnings
import pandas as pd
from explib import logging
def clean_data(summary, runs):
"""
All the data cleanup such that the summary and run data can be plotted.
Expects summary and runs to be dataframes,
returns cleaned dataframes.
"""
#
summary, runs = convert_runtimes_to_seconds(summary, runs)
summary, runs = fill_runtime_from_missing_summary(summary, runs)
#
summary, runs = process_tags(summary, runs)
summary, runs = drop_bad_tags(summary, runs)
summary, runs = remove_crashes(summary, runs)
summary, runs = remove_duplicates(summary, runs)
#
summary, runs = bump_epoch_counter(summary, runs)
#
summary, runs = fill_accumulate_steps(summary, runs)
summary, runs = add_dataset_size(summary, runs)
summary, runs = rescale_training_loss_for_squad_with_accumulation(summary, runs)
#
summary, runs = add_update_count(summary, runs)
summary, runs = rescale_accuracy(summary, runs)
#
summary, runs = fill_in_defaults(summary, runs)
return summary, runs
def fill_accumulate_steps(summary, runs):
"""If accumulate steps wasn't set, uses 1 by default"""
summary["accumulate_steps"] = np.where(
summary["accumulate_steps"].isna(), 1, summary["accumulate_steps"]
)
return summary, runs
def add_dataset_size(summary, runs):
"""Add in dataset size and numbers related to it;
the number of minibatches per epoch
and the number of gradient updates per epoch
"""
ds_to_size = {
"mnist": 60000,
"cifar10": 50000,
"ptb": 26560,
"wikitext2": 16317,
"squad": 87714,
}
summary["ds_size"] = np.nan
for k in ds_to_size.keys():
summary["ds_size"] = np.where(
summary["dataset"] == k, ds_to_size[k], summary["ds_size"]
)
summary["grad_updates_per_epoch"] = np.floor(
summary["ds_size"] / (summary["batch_size"] * summary["accumulate_steps"])
)
# max(summary["grad_updates_per_epoch"], 1)
summary["grad_updates_per_epoch"] = np.where(
summary["grad_updates_per_epoch"] == 0, 1, summary["grad_updates_per_epoch"]
)
summary["minibatches_per_epoch"] = np.floor(
summary["ds_size"] / (summary["batch_size"])
)
runs = pd.merge(
left=runs,
right=summary[["dataset", "grad_updates_per_epoch", "minibatches_per_epoch"]],
left_on="id",
right_index=True,
how="left",
)
return summary, runs
def add_update_count(summary, runs):
"""Adds a column counting the number of parameter updates rather than epochs"""
runs["update_count"] = runs["epoch"] * runs["grad_updates_per_epoch"]
return summary, runs
def rescale_accuracy(summary, runs):
summary["train_accuracy"] = 100 * summary["train_accuracy"]
summary["valid_accuracy"] = 100 * summary["valid_accuracy"]
runs["train_accuracy"] = 100 * runs["train_accuracy"]
runs["valid_accuracy"] = 100 * runs["valid_accuracy"]
return summary, runs
def rescale_training_loss_for_squad_with_accumulation(summary, runs):
"""
In experiment code, we messed up the computation of the loss
by a constant factor due to the use of gradient accumulation.
This fixes it.
Instead of dividing by the number of gradient computation
to obtain the average (of the average loss over datapoints) over batches,
we divided by the number of effective gradient steps.
This leads to an obvious issue where multiplying the number of accumulation
steps by 4 would lead to a 4-fold increase in the loss.
To fix this, we can multiply by the number of effective steps per epoch
and divide by the number of accumulation steps.
They're off by a factor
"""
summary["training_loss"] = np.where(
summary["dataset"] == "squad",
summary["training_loss"]
* summary["grad_updates_per_epoch"]
/ summary["minibatches_per_epoch"],
summary["training_loss"],
)
runs["training_loss"] = np.where(
runs["dataset"] == "squad",
runs["training_loss"]
* runs["grad_updates_per_epoch"]
/ runs["minibatches_per_epoch"],
runs["training_loss"],
)
return summary, runs
def bump_epoch_counter(summary, runs):
runs["epoch"] = runs["epoch"] + 1
runs["epoch"] = np.where(runs["step"] == 0, 0, runs["epoch"])
return summary, runs
def remove_crashes(summary, runs):
summary = data_h.df_unselect(summary, status="OOM")
summary = data_h.df_unselect(summary, status="Crash")
summary = data_h.df_unselect(summary, status="NoLogs")
return summary, runs
def process_tags(summary, runs):
summary["tags"] = summary["tags"].apply(lambda x: json.loads(x.replace("'", '"')))
summary["bad_tag"] = summary["tags"].apply(
lambda tags: any([tag.startswith("bad-") for tag in tags])
)
return summary, runs
def drop_bad_tags(summary, runs):
summary = summary.loc[~summary["bad_tag"]]
return summary, runs
def remove_duplicates(summary, runs):
summary["is_duplicate"] = summary["tags"].apply(lambda x: "duplicate" in x)
summary = data_h.df_select(summary, is_duplicate=False)
summary = summary.drop_duplicates(subset="wuuid", keep="last")
return summary, runs
def convert_runtimes_to_seconds(summary, runs):
def exp_runtime_to_seconds(timestring):
if type(timestring) != str: # is most likely nan
return timestring
elif timestring == "":
return np.nan
hours_correction = 0
if "days" in timestring:
match = re.search("([0-9])\sdays,\s(.*)", timestring)
n_days = int(match.group(1))
hours_correction = n_days * 24
time_bit = match.group(2)
t = datetime.strptime(time_bit, "%H:%M:%S.%f")
elif "day" in timestring:
match = re.search("(1)\sday,\s(.*)", timestring)
hours_correction = 24
time_bit = match.group(2)
t = datetime.strptime(time_bit, "%H:%M:%S.%f")
else:
t = datetime.strptime(timestring, "%H:%M:%S.%f")
delta = timedelta(
hours=t.hour + hours_correction, minutes=t.minute, seconds=t.second
)
return delta.total_seconds()
try:
summary["exp_runtime_s"] = summary["exp_runtime"].apply(exp_runtime_to_seconds)
runs["exp_runtime_s"] = runs["exp_runtime"].apply(exp_runtime_to_seconds)
except:
import pdb
pdb.set_trace()
return summary, runs
def fill_runtime_from_missing_summary(summary, runs):
"""Fill missing values in summary.
Currently only fills in the exp_runtime
"""
missing_runtime = summary[summary["exp_runtime_s"].isna()]
missing_summary = summary[summary["status"] == "Finished with no summary"]
if missing_runtime.shape[0] != missing_summary.shape[0]:
logging.debug(
(
f"The number of runs with missing summary ({missing_summary.shape[0]}) "
+ f"does not match the number of runs with missing runtime ({missing_runtime.shape[0]}). "
+ "This might indicate data issues on wandb."
)
)
runs_runtime = runs.groupby("id")["exp_runtime_s"].max()
merged = pd.merge(left=summary, right=runs_runtime, on="id", how="left")
merged = merged.drop(columns=["exp_runtime_s_x", "exp_runtime"])
merged = merged.rename(columns={"exp_runtime_s_y": "exp_runtime"})
n_missing = merged[merged["exp_runtime"].isna()].shape[0]
if n_missing > 0:
logging.debug(
(
"Filling of runtime failed (?). "
+ f"There are still {n_missing} missing values."
)
)
return summary, runs
def fill_in_defaults(summary, runs):
summary = summary.fillna(value={"opt.momentum": 0.0})
return summary, runs
def sanity_check_number_of_runs(bs, dataset, max_epoch, model, runs_df):
if any(runs_df.groupby("opt.alpha").count()["id"] < 3):
print(
f"Issue with {model} {dataset} {max_epoch} {bs}: Too few runs for one of the step-sizes!"
)
if any(runs_df.groupby("opt.alpha").count()["id"] > 3):
print(
f"Issue with {model} {dataset} {max_epoch} {bs}: Too many runs for one of the step-sizes!"
)
print(runs_df.groupby("opt.alpha").count())
def filter_merge(summary, runs, summary_filter, runs_filter):
summary_filtered = data_h.df_select(summary, **summary_filter)
runs_filtered = data_h.df_select(runs, **runs_filter)
runs_filtered = runs_filtered.loc[runs_filtered["id"].isin(summary_filtered.index)]
merged = runs_filtered.join(
other=summary_filtered,
on="id",
how="right",
lsuffix="_runs",
rsuffix="_summary",
)
return merged
| 9,029 | 31.135231 | 106 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/data.py
|
import json
import os
import warnings
from pathlib import Path
from typing import Any, Dict, List
import numpy as np
import pandas as pd
import wandb
from explib import config
from explib.results import experiment_groups as expdef
from tqdm import tqdm
class WandbAPI:
"""Static class to provide a singleton handler to the wandb api."""
api_handler = None
@staticmethod
def get_handler():
if WandbAPI.api_handler is None:
WandbAPI.api_handler = wandb.Api()
return WandbAPI.api_handler
##
#
def flatten_dict(x):
return pd.io.json._normalize.nested_to_record(x)
##
# Folders and magic strings
def get_results_folder():
path = os.path.join(config.get_workspace(), "results")
Path(path).mkdir(parents=True, exist_ok=True)
return path
def get_runs_folder():
path = os.path.join(config.get_workspace(), "results", "runs")
Path(path).mkdir(parents=True, exist_ok=True)
return path
def get_run_path(id):
return os.path.join(get_runs_folder(), f"{id}.csv")
SUMMARY_FILE = os.path.join(get_results_folder(), "summary.csv")
ALLRUNS_FILE = os.path.join(get_results_folder(), "all_runs.csv")
##
# Load data from disk, download if not exist
def list_converter(as_str):
try:
if len(as_str) == 0:
return []
as_str = as_str.replace("'Infinity'", "NaN")
as_str = as_str.replace("'NaN'", "NaN")
return json.loads(as_str)
except:
import pdb
pdb.set_trace()
def get_summary(ignore_per_iter_data=False):
"""Returns a dataframe with summary information about all runs.
Uses the last downloaded .csv if it exists, downloads a new one otherwise
Can be updated manually with ``python -m explib.result --download-summary``
or ``explib.results.data.download_summary``
"""
if not os.path.isfile(SUMMARY_FILE):
download_summary()
def string_bool_table(default=True):
DEFAULT = ""
return {"True": True, "False": False, DEFAULT: default}
mappings = {
"shuffle": string_bool_table(default=True),
"drop_last": string_bool_table(default=False),
"opt.use_bias_correction": string_bool_table(default=True),
}
summary_converters = {
**{k: lambda x: mappings[k][x] for k in mappings.keys()},
"norm_squared_gradients": list_converter,
"norm_squared_gradients_l1": list_converter,
"function_values": list_converter,
}
extra_args = {}
if ignore_per_iter_data:
cols = list(pd.read_csv(SUMMARY_FILE, nrows=1))
cols_to_drop = [
"norm_squared_gradients",
"norm_squared_gradients_l1",
"function_values",
]
usecols = [col for col in cols if col not in cols_to_drop]
extra_args = {"usecols": usecols}
return pd.read_csv(
SUMMARY_FILE,
header=0,
index_col="id",
converters=summary_converters,
**extra_args,
)
def get_run(id):
"""Returns a dataframe of all info for a run.
Returns cached .csv if exists, otherwise downloads from wandb
Force re-download with ``explib.results.data.download_run_by_id``
"""
file_path = get_run_path(id)
if not os.path.isfile(file_path):
download_run_by_id(id)
df = pd.read_csv(file_path, header=0, low_memory=False)
return df
def get_all_runs(ignore_per_iter_data=False):
"""Returns a dataframe with info for all runs.
Uses cached concatenated .csv file if it exists, create it otherwise
Can be updated manually with ``python -m explib.result --concat-runs``
or explib.results.data.concatenate_all_runs()
"""
if not os.path.isfile(ALLRUNS_FILE):
concatenate_all_runs()
summary_converters = {
"norm_squared_gradients": list_converter,
"norm_squared_gradients_l1": list_converter,
"function_values": list_converter,
}
extra_params = {}
if ignore_per_iter_data:
cols = list(pd.read_csv(ALLRUNS_FILE, nrows=1))
cols_to_drop = [
"norm_squared_gradients",
"norm_squared_gradients_l1",
"function_values",
]
usecols = [col for col in cols if col not in cols_to_drop]
extra_params = {"usecols": usecols}
return pd.read_csv(
ALLRUNS_FILE,
header=0,
converters=summary_converters,
dtype={"exp_runtime": "str"},
**extra_params,
)
##
# Data download
def download_run_by_id(id):
"""See `download_run`"""
run = WandbAPI.get_handler().run(config.get_wandb_project() + "/" + id)
download_run(run)
def download_run(arun: wandb.apis.public.Run):
"""Given a Wandb Run, download the full history."""
df = arun.history(samples=arun._attrs["historyLineCount"], pandas=(True))
df.to_csv(get_run_path(arun.id))
def download_summary(download_runs=False, group=None, only_new=False):
"""Download a summary of all runs on the wandb project."""
filters = {"group": group} if group is not None else {}
runs = WandbAPI.get_handler().runs(
config.get_wandb_project(), filters=filters, per_page=1000
)
summaries = []
configs = []
systems = []
miscs = []
for run in tqdm(runs):
summaries.append(flatten_dict(run.summary._json_dict))
configs.append(flatten_dict(run.config))
systems.append(flatten_dict(run._attrs["systemMetrics"]))
miscs.append(
{
"name": run.name,
"id": run.id,
"group": run.group,
"state": run.state,
"tags": run.tags,
"histLineCount": run._attrs["historyLineCount"],
}
)
if download_runs:
if only_new:
run_exists = os.path.isfile(get_run_path(run.id))
if not run_exists:
download_run(run)
else:
download_run(run)
misc_df = pd.DataFrame.from_records(miscs)
summary_df = pd.DataFrame.from_records(summaries)
config_df = pd.DataFrame.from_records(configs)
system_df = pd.DataFrame.from_records(systems)
all_df = pd.concat([misc_df, config_df, summary_df, system_df], axis=1)
all_df.to_csv(SUMMARY_FILE)
##
# Data pre-processing
def concatenate_all_runs():
"""Concatenates all run .csv files into one file."""
summary_df = get_summary()
dfs = []
ids = list(summary_df.index)
for id in tqdm(ids):
dfs.append(get_run(id))
concat_df = pd.concat(dfs, keys=ids)
concat_df.index = concat_df.index.set_names(names=["id", "step"])
concat_df.to_csv(ALLRUNS_FILE)
def filter_out_tags(df, fromlist=None):
"""Removes lines from a dataframe if they are tagged.
Filters out all tags by default. If fromlist is a list of strings,
filters out only those tags.
"""
new_df = df
def filter_any(taglist):
return len(taglist) > 0
def filter_fromlist(taglist):
return any([tag_to_filter in taglist for tag_to_filter in fromlist])
filter = filter_any if fromlist is None else filter_fromlist
def should_filter_out(row):
s_ = str(row["tags"])
s_ = s_.replace("'", '"')
as_list = json.loads(s_)
return filter(as_list)
should_filter = new_df.apply(func=should_filter_out, axis=1)
return df[~should_filter]
##
# Helper functions
def df_foreach(df, column, sortfunc=sorted):
"""Iterates through subsets of df for each unique value of column."""
for unique_value in sortfunc(df[column].unique()):
yield unique_value, df[df[column] == unique_value]
def df_select(df, **kwargs):
"""Select subsets of the dataframe by key/value pairs in kwargs."""
if len(kwargs) == 0:
return df
def makemask(column, value):
if value is None:
return column.isna()
elif isinstance(value, float):
if 0 < value and value < 1e-5:
return np.isclose(column, value, atol=0)
else:
return np.isclose(column, value)
else:
return column == value
selection_masks = [makemask(df[k], v) for k, v in kwargs.items()]
mask = selection_masks[0]
for newmask in selection_masks[1:]:
mask &= newmask
return df[mask]
def df_unselect(df, **kwargs):
"""Select subsets of the dataframe by key/value pairs in kwargs."""
if len(kwargs) == 0:
return df
def makemask(column, value):
if value is None:
return not column.isna()
elif isinstance(value, float):
return np.isclose(column, value) == False
else:
return column != value
selection_masks = [makemask(df[k], v) for k, v in kwargs.items()]
mask = selection_masks[0]
for newmask in selection_masks[1:]:
mask &= newmask
return df[mask]
def median_min_max(df, key):
return df[key].median(), df[key].min(), df[key].max()
def median_min_max_by(dataframe, key, metric_name):
sub_df = dataframe[[key, metric_name]]
groupby = sub_df.groupby(key)
transforms_df = groupby.agg(["min", "max", "median"])
medians = np.array(transforms_df[metric_name]["median"])
mins = np.array(transforms_df[metric_name]["min"])
maxs = np.array(transforms_df[metric_name]["max"])
xs = np.array(transforms_df.index)
return medians, mins, maxs, xs
def make_mask(df, selections: List[Dict[str, Any]]):
def makemask(column, value):
if value is None:
return column.isna()
elif isinstance(value, float):
if 0 < value and value < 1e-5:
return np.isclose(column, value, atol=0)
else:
return np.isclose(column, value)
else:
return column == value
def make_mask_for_dict(selection_dict: Dict[str, Any]):
selection_masks = [makemask(df[k], v) for k, v in selection_dict.items()]
mask = selection_masks[0]
for newmask in selection_masks[1:]:
mask &= newmask
return mask
selection_mask = make_mask_for_dict(selections[0])
for new_selection_mask in [make_mask_for_dict(_) for _ in selections[1:]]:
selection_mask |= new_selection_mask
return selection_mask
def new_select(df, selections: List[Dict[str, Any]]):
"""Select subsets of the dataframe by key/value pairs.
selections is a list of dictionaries.
The dictionaries are ORed while their elements are ANDed.
Example:
selection = [
{"dataset": "mnist", "optim": "SGD"},
{"dataset": "mnist", "optim": "Adam"}
]
selects for
(
(dataset == mnist and optim == SGD)
or (dataset == mnist and optim == Adam)
)
"""
if len(selections) == 0:
return df
return df[make_mask(df, selections)]
def new_filter_and_merge(summary, runs, summary_filter, runs_filter):
summary_filtered = new_select(summary, summary_filter)
runs_filtered = new_select(runs, runs_filter)
runs_filtered = runs_filtered.loc[runs_filtered["id"].isin(summary_filtered.index)]
##
# Drop duplicate columns
run_columns_to_drop = [
"grad_updates_per_epoch",
"minibatches_per_epoch",
"exp_runtime_s",
"dataset",
]
runs_filtered = runs_filtered.drop(labels=run_columns_to_drop, axis=1)
merged = runs_filtered.join(
other=summary_filtered, on="id", how="right", rsuffix="_end"
)
return merged
def flatten(t):
return [item for sublist in t for item in sublist]
def grid_search(data, setting, opt, epoch, metric, key):
"""Grid search.
for (settings AND opt AND epoch)
find best value for key
where "best" is minimum of (maximum across runs with that value)
return dataframes:
all runs (settings AND opt) at EPOCH
best runs (settings AND opt AND key=best) for all epochs
"""
setting_mask = make_mask(data, setting)
opt_mask = make_mask(data, [opt])
epoch_mask = make_mask(data, [{"epoch": epoch}])
all_runs_at_epoch = data[setting_mask & opt_mask & epoch_mask]
meds, mins, maxs, xs = median_min_max_by(
all_runs_at_epoch, key=key, metric_name=metric
)
best_value = xs[np.nanargmin(maxs)]
best_value_mask = make_mask(data, [{key: best_value}])
best_runs_all_epochs = data[setting_mask & opt_mask & best_value_mask]
return all_runs_at_epoch, best_runs_all_epochs
def gridsearch_for(data, dss, bss, opts, epoch_clip, experiments=expdef.EXPERIMENTS):
runs_at_last_epoch_list = []
best_runs_list = []
for ds in dss:
for bs in bss:
for opt in opts:
runs_at_last_epoch_, best_runs_ = grid_search(
data,
setting=experiments[ds][bs],
opt=expdef.OPTIMS[opt],
epoch=epoch_clip[ds][bs],
metric="training_loss",
key="opt.alpha",
)
runs_at_last_epoch_list.append(runs_at_last_epoch_)
best_runs_list.append(best_runs_)
runs_at_last_epoch = pd.concat(runs_at_last_epoch_list)
best_runs = pd.concat(best_runs_list)
return runs_at_last_epoch, best_runs
def add_stop_at_info(dataframe, stop_at):
if any(["epoch_to_stop" == key for key in list(dataframe.keys())]):
return dataframe
dataframe["eff_bs"] = dataframe["batch_size"] * dataframe["accumulate_steps"]
epoch_stop_table = pd.DataFrame(
[
{
"dataset": ds,
"eff_bs": expdef.EFF_BS[ds][bs],
"epoch_to_stop": stop_at[ds][bs],
}
for ds in [
expdef.MNIST,
expdef.CIFAR10,
expdef.PTB,
expdef.WT2,
expdef.SQUAD,
]
for bs in [expdef.S, expdef.M, expdef.L, expdef.XL, expdef.FULL]
]
)
return pd.merge(left=dataframe, right=epoch_stop_table, on=["dataset", "eff_bs"])
| 14,209 | 26.917485 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/data_caching.py
|
import os.path
import pickle as pk
from pathlib import Path
import explib.results.data as data_h
from explib import config
from explib.results import experiment_groups as expdef
from explib.results.cleanup import clean_data
from explib.results.data import get_all_runs, get_summary, gridsearch_for
CACHE_DIR = os.path.join(config.get_workspace(), "cache")
VERSION = 6
def cached_call(base_filename, function):
filename = f"{base_filename}_{VERSION}.pk"
filepath = os.path.join(CACHE_DIR, filename)
Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
if os.path.isfile(filepath):
print(f"Loading {base_filename} from cache {filepath}")
with open(filepath, "rb") as handle:
return pk.load(handle)
else:
print(f"No cache hit for {base_filename} ({function} at {filepath}). Build...")
results = function()
print(f"Saving {base_filename} from cache {filepath}")
with open(filepath, "wb") as handle:
pk.dump(results, handle, protocol=pk.HIGHEST_PROTOCOL)
return results
def load_cleaned_data():
def _load_cleaned_data():
return clean_data(get_summary(), get_all_runs())
return cached_call("cleaned_data", _load_cleaned_data)
def load_filtered_data():
def _load_filtered_data():
summary, runs = load_cleaned_data()
summary_filter = data_h.flatten(
[
expdef.EXPERIMENTS[ds][bs]
for ds in [
expdef.MNIST,
expdef.CIFAR10,
expdef.PTB,
expdef.WT2,
expdef.SQUAD,
]
for bs in [expdef.S, expdef.M, expdef.L, expdef.XL, expdef.FULL]
]
)
return data_h.new_filter_and_merge(
summary, runs, summary_filter, runs_filter=[]
)
return cached_call("filtered_data", _load_filtered_data)
def load_nd_filtered_data():
def _load_nd_filtered_data():
summary, runs = load_cleaned_data()
summary_filter = data_h.flatten(
[
expdef.SPECIAL[expdef.NO_DROPOUT][ds][bs]
for ds in [
expdef.PTB,
expdef.WT2,
]
for bs in [expdef.FULL]
]
)
return data_h.new_filter_and_merge(
summary, runs, summary_filter, runs_filter=[]
)
return cached_call("filtered_nd_data", _load_nd_filtered_data)
def gridsearch_nd_all_end():
def _gridsearch_nd_all_end():
data = load_nd_filtered_data()
dss, bss, opts = [expdef.PTB, expdef.WT2], [expdef.FULL], expdef.ALL_MAIN_OPT
return gridsearch_for(
data,
dss=dss,
bss=bss,
opts=opts,
epoch_clip=expdef.EPOCH_CLIP,
experiments=expdef.SPECIAL[expdef.NO_DROPOUT],
)
return cached_call("gridsearch_nd_all_end", _gridsearch_nd_all_end)
def gridsearch_all_end():
def _gridsearch_all_end():
data = load_filtered_data()
dss, bss, opts = expdef.ALL_DS, expdef.ALL_BS, expdef.ALL_MAIN_OPT
return gridsearch_for(
data, dss=dss, bss=bss, opts=opts, epoch_clip=expdef.EPOCH_CLIP
)
return cached_call("gridsearch_all_end", _gridsearch_all_end)
def gridsearch_all_start():
def _gridsearch_all_start():
data = load_filtered_data()
dss, bss, opts = expdef.ALL_DS, expdef.ALL_BS, expdef.ALL_MAIN_OPT
return gridsearch_for(
data, dss=dss, bss=bss, opts=opts, epoch_clip=expdef.EPOCH_CLIP_START
)
return cached_call("gridsearch_all_start", _gridsearch_all_start)
def gridsearch_all_start_soft_increase():
def _gridsearch_all_start_soft_increase():
data = load_filtered_data()
dss, bss, opts = expdef.ALL_DS, expdef.ALL_BS, expdef.ALL_MAIN_OPT
return gridsearch_for(
data, dss=dss, bss=bss, opts=opts, epoch_clip=expdef.EPOCH_CLIP_START_NEW
)
return cached_call("gridsearch_all_start_new", _gridsearch_all_start_soft_increase)
def gridsearch_all_start_ignore_S():
def _gridsearch_all_start_ignore_S():
data = load_filtered_data()
dss, bss, opts = expdef.ALL_DS, expdef.ALL_BS, expdef.ALL_MAIN_OPT
return gridsearch_for(
data,
dss=dss,
bss=bss,
opts=opts,
epoch_clip=expdef.EPOCH_CLIP_START_IGNORE_S,
)
return cached_call("gridsearch_all_ignore_S", _gridsearch_all_start_ignore_S)
| 4,602 | 29.686667 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/__init__.py
|
from . import cleanup
from . import data
from . import plotting
from . import wandb_cleanups
| 93 | 17.8 | 28 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/results/wandb_cleanups.py
|
import pdb
from explib import config
from explib.expmaker.experiment_defs import (
exp_dict_from_str,
exp_dict_to_str,
make_uuid,
make_wuuid,
)
from explib.results.data import WandbAPI
from tqdm import tqdm
##
# Helper functions
def get_logs(run):
"""Downloads the .log file for the run and returns its content as a str."""
files = run.files()
for file in files:
if file.name == "output.log":
fh = file.download(replace=True)
logs = fh.read()
return logs
return False
def get_exp_dict_str(arun):
"""Extract the string representation of exp_dict from the run's logs."""
logs = get_logs(arun)
if logs == False:
return False
firstline = logs.split("\n")[0]
if firstline.startswith("Downloading"):
firstline = logs.split("\n")[2]
dictionary_parts = firstline.split("dictionnary ")
if len(dictionary_parts) != 2:
print(f"Error; {len(dictionary_parts)} dictionary parts instead of 2.")
print("Don't know what to do. Here's a debugger:")
pdb.set_trace()
dictionary_part = dictionary_parts[1]
exp_dict_str = dictionary_part.replace("'", '"')
return exp_dict_str
##
# Updating the wuuid and expconfig_str for experiments that ran on older
# versions of explib
def update_wuuid(arun):
"""Migrates to the new WUUID system.
For runs that do not have a WUUID, creates it and updates the run.
Also adds in the field expconfig_str.
Context:
We used to have only one UUID per experiments, now we have two (UUID and WUUID).
The UUID depends on the entire exp_dict, including slurm_config,
and can not be used to check for finished jobs across clusters with different
slurm configs. The WUUID is the same as UUID but independent of exp_dict.
"""
if "wuuid" in arun.config:
return
exp_dict_str = get_exp_dict_str(arun)
if exp_dict_str == False:
return
exp_dict = exp_dict_from_str(exp_dict_str)
predicted_uuid = make_uuid(exp_dict)
if arun.config["uuid"] != predicted_uuid:
print("Error: predicted uuid doesn't match stored uuid.")
print("Don't know what to do. Here's a debugger:")
pdb.set_trace()
arun.config["wuuid"] = make_wuuid(exp_dict)
arun.config["exp_dict_str"] = exp_dict_to_str(exp_dict)
print(f"UPDATE_WUUID: Updated wuuid and exp_dict_str for {arun.id}")
arun.update()
##
# Flag run status
# Rules and how to apply them
def _rule_success(run):
if "max_epoch" not in run.config:
return False
if "epoch" not in run.summary:
return False
return run.config["max_epoch"] == run.summary["epoch"] + 1
def _rule_diverged(run):
return "training_error" in run.summary
def _rule_diverged_from_logs(run):
logs = get_logs(run)
if logs:
if "{'training_error': 'nan'}" in logs:
return True
return False
def _rule_OOM(run):
logs = get_logs(run)
if logs:
if "CUDA error: out of memory" in logs:
return True
return False
def _rule_finished_with_no_summary(run):
if dict(run.summary) == {} or dict(run.summary) == {
"status": "Finished with no summary"
}:
if len(run.history(samples=1, keys=["exp_runtime"])) == 1:
return True
return False
def _rule_uncaught_exception(run):
logs = get_logs(run)
if logs == False:
return False
if "Uncaught exception" in logs:
return True
return False
def _rule_no_logs(run):
if get_logs(run) == False:
return True
return False
_flags_and_rules = [
("Success", _rule_success),
("Finished with no summary", _rule_finished_with_no_summary),
("Diverged", _rule_diverged),
("NoLogs", _rule_no_logs),
("OOM", _rule_OOM),
("Crash", _rule_uncaught_exception),
("Diverged", _rule_diverged_from_logs),
]
def flag_status(arun):
if "status" in arun.summary:
return
status_found = False
for (flag, rule) in _flags_and_rules:
if rule(arun):
arun.summary["status"] = flag
print(f"FLAG_STATUS: Marking run {arun.id} as {flag}")
arun.update()
status_found = True
break
if not status_found:
print(
"FLAG_STATUS: Unknown run status! Don't know what to do. Here's a debugger:"
)
pdb.set_trace()
##
# Unify expdict_str and exp_dict_str
def expdict_str_rename(arun):
if "expdict_str" in arun.config:
arun.config["exp_dict_str"] = arun.config["expdict_str"]
arun.config.pop("expdict_str", None)
print(f"EXPDICT_STR_RENAME: Renamed expdict_str to exp_dict_str for {arun.id}")
arun.update()
##
#
##
#
def correct_experiment_definition():
"""Fix an issue in some experiment definitions.
Problem:
The full batch experiments were intended to run in full batch
(using ``drop_last = True, shuffle = False``, effectively dropping the last incomplete batch)
but did not, because ``shuffle`` was never passed to the dataset ``__init__``.
- The experiments using ``MNIST`` and ``CIFAR10`` were not affected. There was no incomplete batch to drop, ``shuffle`` did not affect what gets dropped.
- It did not affect ``PTB`` and ``WikiText2`` because ``shuffle`` was not implemented
The code from the Transformer-XL paper used an ordered iterator rather than
a shuffled one
- The only affected dataset for the full batch experiments were on ``SQuAD``.
The run are still valid, but the ``shuffle=True`` should be turned to ``False``.
Solution:
This script updates runs from wandb as follows:
If the run has ``shuffle = False``,
remove the ``shuffle`` key and update the exp dict and the unique ids.
Args:
arun: The wandb run to potentially fix
"""
runs = WandbAPI.get_handler().runs(config.get_wandb_project(), per_page=1000)
for arun in tqdm(runs):
if "shuffle" in arun.config and arun.config["shuffle"] == False:
exp_dict = exp_dict_from_str(arun.config["exp_dict_str"])
predicted_uuid = make_uuid(exp_dict)
predicted_wuuid = make_wuuid(exp_dict)
if arun.config["shuffle"] != exp_dict["shuffle"]:
pdb.set_trace(
header="Error: exp_dict and wandb shuffle don't match. Don't know what to do, here's a debugger:"
)
if arun.config["uuid"] != predicted_uuid:
pdb.set_trace(
header="Error: predicted and stored uuid don't match. Don't know what to do, here's a debugger:"
)
if arun.config["wuuid"] != predicted_wuuid:
pdb.set_trace(
header="Error: predicted and stored wuuid don't match. Don't know what to do, here's a debugger:"
)
exp_dict.pop("shuffle")
arun.config.pop("shuffle")
arun.config["exp_dict_str"] = exp_dict_to_str(exp_dict)
arun.config["wuuid"] = make_wuuid(exp_dict)
arun.config["uuid"] = make_uuid(exp_dict)
arun.tags.append("RemovedFalseShuffle")
print(f"FIX_SHUFFLE: RemovedFalseShuffle for {arun.id}")
arun.update()
def checkup(group=None):
filters = {"group": group} if group is not None else {}
runs = WandbAPI.get_handler().runs(
config.get_wandb_project(), filters=filters, per_page=1000
)
for arun in tqdm(runs):
flag_status(arun)
update_wuuid(arun)
expdict_str_rename(arun)
| 7,692 | 28.250951 | 161 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/logging/__init__.py
|
import logging
import os
from pathlib import Path
import wandb
from wandb.util import generate_id
from dotenv import load_dotenv
import sys
import datetime
from explib import config
base_logger = None
wandb_is_enabled = True
def log_data(dict, commit=True):
if wandb_is_enabled:
wandb.log(dict, commit=commit)
base_logger.info(dict)
def init_logging_stdout(level=None):
global base_logger
logging.basicConfig(
level=config.get_console_logging_level() if level is None else level,
format="%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt="%Y-%m-%dT%H:%M:%S",
)
base_logger = logging.getLogger(__name__)
base_logger.info("Explib env/configuration: {}".format(config.get_all()))
def init_logging_for_exp(
slug, exp_uuid, exp_dict, disable_wandb, additional_config=None
):
"""Initialize the logging"""
load_dotenv()
logs_path = os.path.join(config.get_workspace(), slug, "logs")
Path(logs_path).mkdir(parents=True, exist_ok=True)
timestring = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S%z")
log_filename = f"{slug}_{exp_uuid}_{timestring}.log"
file_path = os.path.join(logs_path, log_filename)
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s",
datefmt="%a, %d %b %Y %H:%M:%S",
filename=file_path,
filemode="a+",
)
global base_logger
base_logger = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s"
)
console.setFormatter(formatter)
base_logger.addHandler(console)
global wandb_is_enabled
if disable_wandb:
wandb_is_enabled = False
else:
if additional_config is None:
additional_config = {}
wandb.init(
project=config.get_wandb_project(),
id=generate_id(16),
entity=config.get_wandb_entity(),
dir=logs_path,
config={**exp_dict, **additional_config},
group=slug,
force=True,
)
wandb_is_enabled = True
def error_handler(exctype, value, tb):
base_logger.error("Uncaught exception", exc_info=(exctype, value, tb))
sys.excepthook = error_handler
return log_data
def info(*args, **kwargs):
base_logger.info(*args, **kwargs)
def warn(*args, **kwargs):
base_logger.warn(*args, **kwargs)
def debug(*args, **kwargs):
base_logger.debug(*args, **kwargs)
def full_stack():
"""https://stackoverflow.com/a/16589622"""
import traceback, sys
exc = sys.exc_info()[0]
stack = traceback.extract_stack()[:-1] # last one would be full_stack()
if exc is not None: # i.e. an exception is present
del stack[-1] # remove call of full_stack, the printed exception
# will contain the caught exception caller instead
trc = "Traceback (most recent call last):\n"
stackstr = trc + "".join(traceback.format_list(stack))
if exc is not None:
stackstr += " " + traceback.format_exc().lstrip(trc)
return stackstr
| 3,276 | 27.008547 | 85 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/optim/signum.py
|
import torch
from torch.optim import Optimizer
class Signum(Optimizer):
r"""
Code taken from https://github.com/jiaweizzhao/Signum_pytorch/blob/master/Example/signum.py
Implements Signum optimizer that takes the sign of gradient or momentum.
See details in the original paper at:https://arxiv.org/abs/1711.05101
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0.9)
weight_decay (float, optional): weight decay (default: 0)
Example:
>>> optimizer = signum.Signum(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
.. note::
The optimizer updates the weight by:
buf = momentum * buf + (1-momentum)*rescaled_grad
weight = (1 - lr * weight_decay) * weight - lr * sign(buf)
Considering the specific case of Momentum, the update Signum can be written as
.. math::
\begin{split}g_t = \nabla J(W_{t-1})\\
m_t = \beta m_{t-1} + (1 - \beta) g_t\\
W_t = W_{t-1} - \eta_t \text{sign}(m_t)}\end{split}
where p, g, v and :math:`\rho` denote the parameters, gradient,
velocity, and momentum respectively.
If do not consider Momentum, the update Sigsgd can be written as
.. math::
g_t = \nabla J(W_{t-1})\\
W_t = W_{t-1} - \eta_t \text{sign}(g_t)}
"""
def __init__(self, params, lr=0.01, momentum=0, weight_decay=0, **kwargs):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= momentum:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super(Signum, self).__init__(params, defaults)
def __setstate__(self, state):
super(Signum, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(weight_decay, p.data)
if momentum != 0:
# signum
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.zeros_like(p.data)
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_((1 - momentum), d_p)
d_p = torch.sign(buf)
else: # signsgd
d_p = torch.sign(d_p)
p.data.add_(d_p, alpha=-group["lr"])
return loss
| 3,282 | 36.735632 | 95 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/optim/normalized_gd.py
|
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class CopyOfSGD(Optimizer):
def __init__(
self,
params,
lr=required,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
*,
maximize=False,
):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
maximize=maximize,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(CopyOfSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
group.setdefault("maximize", False)
@torch.no_grad()
def _step_with_direction(self, closure=None, direction_func=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# grad norm comp
for group in self.param_groups:
params_with_grad = []
d_p_list = []
momentum_buffer_list = []
has_sparse_grad = False
for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
# norm decent
d_p_list.append(direction_func(p.grad))
if p.grad.is_sparse:
has_sparse_grad = True
state = self.state[p]
if "momentum_buffer" not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state["momentum_buffer"])
sgd(
params_with_grad,
d_p_list,
momentum_buffer_list,
weight_decay=group["weight_decay"],
momentum=group["momentum"],
lr=group["lr"],
dampening=group["dampening"],
nesterov=group["nesterov"],
maximize=group["maximize"],
has_sparse_grad=has_sparse_grad,
)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state["momentum_buffer"] = momentum_buffer
return loss
def _eval_closure(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
return loss
def _total_grad_norm(self):
total_grad = p2v(
[
p.grad if p.grad is not None else torch.zeros_like(p)
for group in self.param_groups
for p in group["params"]
]
)
return total_grad.norm(self.norm)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: g)
return loss
def sgd(
params: List[Tensor],
d_p_list: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
maximize: bool,
):
for i, param in enumerate(params):
d_p = d_p_list[i]
if weight_decay != 0:
d_p = d_p.add(param, alpha=weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(d_p).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
alpha = lr if maximize else -lr
param.add_(d_p, alpha=alpha)
class PlainSGD(CopyOfSGD):
def __init__(self, params, lr=required, momentum=0):
super(PlainSGD, self).__init__(
params=params,
lr=lr,
momentum=momentum,
dampening=0,
weight_decay=0,
nesterov=False,
)
class BlockNormalizedSGD(PlainSGD):
"""Change the magnitude and direction, but by block rather than coordinate"""
def __init__(self, params, lr=required, momentum=0, norm=1):
assert norm > 0
self.norm = norm
super(BlockNormalizedSGD, self).__init__(params, momentum=momentum, lr=lr)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: g / g.norm(self.norm))
return loss
class RescaledSignDescent(PlainSGD):
"""
Change the direction using the sign but keep the magnitude
"""
def __init__(self, params, lr=required, momentum=0, norm=1):
assert norm > 0
self.norm = norm
super(RescaledSignDescent, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
total_grad_norm = self._total_grad_norm()
self._step_with_direction(closure, lambda g: torch.sign(g) * total_grad_norm)
return loss
class NormalizedSGD(PlainSGD):
"""
Change the magnitude but keep the direction
"""
def __init__(self, params, lr=required, momentum=0, norm=2):
assert norm > 0
self.norm = norm
super(NormalizedSGD, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
total_grad_norm = self._total_grad_norm()
self._step_with_direction(closure, lambda g: g / total_grad_norm)
return loss
class SignSGD(PlainSGD):
"""
Change the magnitude and direction
"""
def __init__(self, params, lr=required, momentum=0):
super(SignSGD, self).__init__(params, lr=lr, momentum=momentum)
@torch.no_grad()
def step(self, closure=None):
loss = self._eval_closure(closure)
self._step_with_direction(closure, lambda g: torch.sign(g))
return loss
| 7,502 | 30.004132 | 101 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/optim/modified_adam.py
|
from torch.optim import Optimizer
import math
import torch
from torch import Tensor
from typing import List, Optional
def f_modifiedadam(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
max_exp_avg_sqs: List[Tensor],
state_steps: List[int],
*,
amsgrad: bool,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
use_bias_correction: bool,
eps: float
):
r"""Functional API that performs Adam algorithm computation."""
for i, param in enumerate(params):
grad = grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step = state_steps[i]
bias_correction1 = 1 - beta1**step if use_bias_correction else 1
bias_correction2 = 1 - beta2**step if use_bias_correction else 1
print(use_bias_correction, bias_correction1, bias_correction2, end="")
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sqs[i], exp_avg_sq, out=max_exp_avg_sqs[i])
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sqs[i].sqrt() / math.sqrt(bias_correction2)).add_(eps)
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
step_size = lr / bias_correction1
param.addcdiv_(exp_avg, denom, value=-step_size)
class ModifiedAdam(Optimizer):
r"""Modified Adam Implementation for ablation."""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8,
weight_decay=0,
use_bias_correction=True,
amsgrad=False,
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
amsgrad=amsgrad,
use_bias_correction=use_bias_correction,
)
super(ModifiedAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(ModifiedAdam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("amsgrad", False)
group.setdefault("use_bias_correction", True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps = []
beta1, beta2 = group["betas"]
for p in group["params"]:
if p.grad is not None:
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
if group["amsgrad"]:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["amsgrad"]:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
# update the steps for each param group update
state["step"] += 1
# record the step after step update
state_steps.append(state["step"])
f_modifiedadam(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=group["amsgrad"],
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
use_bias_correction=group["use_bias_correction"],
eps=group["eps"],
)
return loss
| 6,017 | 34.192982 | 104 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/optim/__init__.py
|
"""Optimizers
Generic interface to build optimizers by name,
possibly interfacing with pytorch
"""
import json
import torch
from .signum import Signum
from .modified_adam import ModifiedAdam
from .normalized_gd import (
PlainSGD,
NormalizedSGD,
BlockNormalizedSGD,
SignSGD,
RescaledSignDescent,
)
from .clipped_sgd import ClippedGD
SGD = "SGD"
ADAM = "Adam"
ADAM_ABLATION = "AdamAblation"
SIGNUM = "Signum"
PLAIN_SGD = "PlainSGD"
NORMALIZED_GD = "NormalizedGD"
BLOCK_NORMALIZED_GD = "BlockNormalizedGD"
SIGN_D = "SignDescent"
RESCALED_SIGN_D = "RescaledSignDescent"
CLIPPED_SGD = "ClippedGD"
AVAILABLE_OPTIMIZERS = [
SGD,
ADAM,
SIGNUM,
ADAM_ABLATION,
NORMALIZED_GD,
BLOCK_NORMALIZED_GD,
SIGN_D,
RESCALED_SIGN_D,
CLIPPED_SGD,
]
def init(params, model):
name = params["name"]
momentum = params["momentum"] if "momentum" in params else 0
if name not in AVAILABLE_OPTIMIZERS:
raise Exception("Optimizer {} not available".format(name))
if name == SGD:
return torch.optim.SGD(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == ADAM:
return torch.optim.Adam(
model.parameters(),
lr=params["alpha"],
betas=(params["b1"], params["b2"]),
)
if name == ADAM_ABLATION:
params_ = json.loads(json.dumps(params))
lr = params_.get("alpha")
betas = (params_.get("b1"), params_.get("b2"))
params_.pop("name")
params_.pop("alpha")
params_.pop("b1")
params_.pop("b2")
return ModifiedAdam(model.parameters(), lr=lr, betas=betas, **params_)
if name == SIGNUM:
return Signum(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == PLAIN_SGD:
return PlainSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == NORMALIZED_GD:
return NormalizedSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == BLOCK_NORMALIZED_GD:
return BlockNormalizedSGD(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == SIGN_D:
return SignSGD(model.parameters(), lr=params["alpha"], momentum=momentum)
if name == RESCALED_SIGN_D:
return RescaledSignDescent(
model.parameters(), lr=params["alpha"], momentum=momentum
)
if name == CLIPPED_SGD:
return ClippedGD(
model.parameters(),
lr=params["alpha"],
momentum=momentum,
clipat=params.get("clipat", 0.5),
)
| 2,628 | 24.77451 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/optim/clipped_sgd.py
|
import itertools
import torch
from torch import Tensor
from torch.optim import SGD
from torch.optim.optimizer import Optimizer, required
from torch.nn.utils import parameters_to_vector as p2v
from typing import List, Optional
class ClippedGD(SGD):
def __init__(
self,
params,
lr=required,
clipat=0.5,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
):
if clipat < 0.0:
raise ValueError("Invalid clipat value: {}".format(clipat))
self._clipat = clipat
self.params = params
super().__init__(
params,
lr,
momentum,
dampening,
weight_decay,
nesterov,
)
def step(self, closure=None):
"""Clips the gradients and takes a step of GD. Changes the values of the gradients."""
torch.nn.utils.clip_grad_norm_(
itertools.chain(*[group["params"] for group in self.param_groups]),
max_norm=self._clipat,
)
super().step(closure=closure)
| 1,091 | 24.395349 | 94 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/problem.py
|
import torch
from torch.nn.utils import parameters_to_vector as p2v
from abc import ABCMeta, abstractmethod
from explib import config
from ..util import get_grads, enable_running_stats, disable_running_stats
import os
import numpy as np
from pathlib import Path
import csv
from ..dataset import *
class Problem(metaclass=ABCMeta):
def __init__(self, exp_dict):
self.model_name = exp_dict["model"]
self.batch_size = exp_dict["batch_size"]
self.seed = exp_dict["seed"]
self.fake_full_batch_mode = (
"fake_full_batch_mode" in exp_dict and exp_dict["fake_full_batch_mode"]
)
self.drop_last = "drop_last" in exp_dict and exp_dict["drop_last"]
self.device = exp_dict["device"]
self.dataset_name = exp_dict["dataset"]
self.optim_name = exp_dict["opt"]["name"]
self.init_noise_norm = (
"init_noise_norm" in exp_dict and exp_dict["init_noise_norm"]
)
self.save_path = os.path.join(
config.get_workspace(), exp_dict["dataset"], exp_dict["exp_uuid"]
)
self.trained_norms = exp_dict["trained_norms"]
self.save_norm_samples = (
"save_norm_samples" in exp_dict and exp_dict["save_norm_samples"]
)
self.dummy_run = exp_dict["dummy_run"]
if "loss_func" in exp_dict:
self.loss_func = self.get_loss_function(exp_dict["loss_func"])
# Gradient accumulation for noise norm calculation
if "accumulate_steps" in exp_dict:
self.accumulate_steps = exp_dict["accumulate_steps"]
self.grad_accumulate = True
else:
self.accumulate_steps = 1
self.grad_accumulate = False
self.exp_uuid = exp_dict["exp_uuid"]
@abstractmethod
def calculate_loss(self, data):
pass
@abstractmethod
def eval_loop(self, is_validation=False):
pass
def train_loop(self):
"""Train for one epoch"""
self.model.train()
self.model.to(self.device)
self.optim.zero_grad()
epoch_loss = 0.0
iteration_counter = 0
accumulation_counter = 0
fvals, gnorms_1, gnorms_2 = [], [], []
for (step, *data) in enumerate(self.train_dataloader):
loss = self.calculate_loss(data)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
loss.backward()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
fvals.append(loss.item())
gnorms_1.append(grad_norm_squared(self.optim, p=1).item())
gnorms_2.append(grad_norm_squared(self.optim, p=2).item())
self.optim.step()
self.optim.zero_grad()
accumulation_counter += 1
epoch_loss += loss.item()
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
accumulation_counter = 1
break
epoch_loss = epoch_loss / accumulation_counter
return epoch_loss, fvals, gnorms_1, gnorms_2
def calc_norms(self, norm_epoch, mean_grad=None):
"""
Calculate noise norms. If mean_grad is None, will calculate
the gradient mean first. If not None, will calculate the norms and save them
"""
self.model.train()
self.model.to(self.device)
self.optim.zero_grad()
iteration_counter = 0
accumulation_counter = 0
calc_total_grad = mean_grad is None
self.model.apply(disable_running_stats)
if calc_total_grad:
logs_path = os.path.join(self.save_path, "noise")
Path(logs_path).mkdir(parents=True, exist_ok=True)
grads = None
else:
# calc norms
noise_norms = []
for (step, *data) in enumerate(self.train_dataloader):
loss = self.calculate_loss(data)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
loss.backward()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
if calc_total_grad:
grad = get_grads(self.model).cpu()
grads = grad if grads is None else grads + grad
else:
# calc norms
grad = get_grads(self.model).cpu()
noise_norm = (grad - mean_grad).norm().item() ** 2
noise_norms.append(noise_norm)
self.optim.zero_grad()
accumulation_counter += 1
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
break
if calc_total_grad:
torch.save(
grads,
self.save_path
+ "/noise/grad_{}_{}".format(accumulation_counter, norm_epoch),
)
self.calc_norms(
norm_epoch=norm_epoch, mean_grad=grads / accumulation_counter
)
self.model.apply(enable_running_stats)
return
else:
# calc norms
final_noise_norms = np.asarray(noise_norms)
np.save(
self.save_path
+ "/noise/norm_{}_{}_{}_{}_{}_{}".format(
self.model_name,
self.dataset_name,
self.batch_size * self.accumulate_steps,
self.seed,
self.optim_name,
norm_epoch,
),
final_noise_norms,
)
if self.save_norm_samples:
if self.dataset_name in [PTB, WIKITEXT2, SQUAD]:
self.get_outliers_helper(final_noise_norms)
def logLoss(self, predicted, actual):
criterion = torch.nn.CrossEntropyLoss()
return criterion(predicted, actual.long())
def get_loss_function(self, function_name):
if function_name == "logloss":
criterion = self.logLoss
elif function_name == "mse":
criterion = torch.nn.MSELoss()
else:
raise Exception("unsupported loss function: " + function_name)
return criterion
@torch.no_grad()
def grad_norm_squared(optim, p=2):
v = p2v(
[
p.grad
for group in optim.param_groups
for p in group["params"]
if p.grad is not None
]
)
return v.norm(p=p) ** 2
| 6,790 | 32.78607 | 84 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/bert_squad_prob.py
|
import csv
import torch
from accelerate import Accelerator
from datasets import load_metric
from .. import dataset, model, optim
from .problem import Problem
class BertSquadProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
(
self.train_dataloader,
self.train_dataloader_for_eval,
self.valid_dataloader,
self.valid_dataset,
self.valid_examples,
self.train_dataset,
self.train_examples,
self.tokenizer,
) = dataset.init(
exp_dict["dataset"],
self.batch_size,
self.device,
extra_params={**exp_dict["model_args"], "model_name": self.model_name},
drop_last=self.drop_last,
fake_full_batch_mode=self.fake_full_batch_mode,
shuffle=False if self.save_norm_samples else exp_dict.get("shuffle", True),
outliers_filename=exp_dict.get("outliers_filename", None),
)
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"],
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
self.accelerator = Accelerator()
(
self.model,
self.optim,
self.train_dataloader,
self.valid_dataloader,
) = self.accelerator.prepare(
self.model, self.optim, self.train_dataloader, self.valid_dataloader
)
self.train_dataloader_for_eval = self.accelerator.prepare(
self.train_dataloader_for_eval
)
self.metric = load_metric("squad")
def calculate_loss(self, data):
return self.model(**data[0]).loss
@torch.no_grad()
def eval_loop(self, is_validation=False):
if self.dummy_run:
results = {}
if not is_validation:
results["training_loss"] = float("nan")
results["train_exact_f1"] = float("nan")
results["train_exact_match"] = float("nan")
else:
results["valid_exact_match"] = float("nan")
results["valid_exact_f1"] = float("nan")
return results
if is_validation:
dataloader = self.valid_dataloader
dataset = self.valid_dataset
examples = self.valid_examples
else:
dataloader = self.train_dataloader_for_eval
dataset = self.train_dataset
examples = self.train_examples
# TODO: merge the loss and metrics calculations here into one loop
# loss = model.bert_base_pretrained.eval_loss(
# self, self.model, self.train_dataloader
# )
metrics, loss = model.bert_base_pretrained.evaluate(
self,
self.model,
dataloader,
self.accelerator,
dataset,
examples,
self.metric,
)
results = {}
if not is_validation:
results["training_loss"] = loss
results["train_exact_f1"] = metrics["f1"]
results["train_exact_match"] = metrics["exact_match"]
else:
results["valid_loss"] = loss
results["valid_exact_match"] = metrics["exact_match"]
results["valid_exact_f1"] = metrics["f1"]
return results
def get_outliers_helper(self, final_noise_norms):
with open(
self.save_path + "/noise/outliers_{}.csv".format(self.exp_uuid),
"w",
) as fw:
writer = csv.writer(fw, delimiter=",")
writer.writerow(["index", "norm", "question", "context"])
rows = []
for (step, *data) in enumerate(self.train_dataloader):
noise = final_noise_norms[step]
input_ids = data[0]["input_ids"].tolist()
questions, contexts = self.norm_helper(input_ids)
row = [step, noise, questions, contexts]
rows.append(row)
rows = sorted(rows, key=lambda x: x[1], reverse=True)
writer.writerows(rows)
def norm_helper(self, input_ids):
decoded = self.tokenizer.batch_decode(input_ids)
questions, contexts = [], []
for x in decoded:
x = x.split("[SEP]")
questions.append(x[0])
contexts.append(x[1])
return questions, contexts
| 4,505 | 31.185714 | 87 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/image_prob.py
|
import torch
import torch.nn.functional as F
from .. import dataset, model, optim
from .problem import Problem
class ImageProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
drop_last=self.drop_last,
shuffle=(
False if self.fake_full_batch_mode else exp_dict.get("shuffle", True)
),
fake_full_batch_mode=self.fake_full_batch_mode,
)
if "model_args" not in exp_dict and exp_dict["dataset"] == "mnist":
exp_dict["model_args"] = {}
exp_dict["model_args"]["in_channels"] = 1
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"] if "model_args" in exp_dict else None,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels = data[0][1:][0].to(self.device).float()
X = data[0][0]
X = X.to(self.device)
y = self.model(X.float())
return self.loss_func(y, labels)
@torch.no_grad()
def eval_loop(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
correct = torch.zeros(1).to(self.device)
epoch_loss = 0
images_counter = 0
accumulation_counter = 0
iteration_counter = 0
self.model.eval()
self.model.to(self.device)
for (X, labels) in dataloader:
X = X.to(self.device)
labels = labels.to(self.device).float()
y = self.model(X)
predicted = F.softmax(y, dim=1)
_, predicted_labels = torch.max(predicted, 1)
images_counter += labels.size(0)
correct += (predicted_labels == labels).sum()
loss = self.loss_func(y, labels)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
epoch_loss += loss.item()
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
if self.dummy_run:
accumulation_counter = 1
break
results = {}
accuracy = correct.item() / images_counter
if is_validation:
results["valid_accuracy"] = accuracy
else:
results["train_accuracy"] = accuracy
results["training_loss"] = epoch_loss / max(accumulation_counter, 1)
return results
| 2,823 | 29.042553 | 86 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/simple_prob.py
|
from .problem import Problem
from .. import dataset, model, optim
import torch
class SimpleProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
self.train_dataloader, self.valid_dataloader = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
drop_last=self.drop_last,
shuffle=exp_dict.get("shuffle", True),
)
features_dim = next(iter(self.train_dataloader))[0].shape[1]
self.model = model.init(
exp_dict["model"],
features_dim=features_dim,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels = data[0][1:][0].to(self.device).float()
X = data[0][0]
X = X.to(self.device)
y = self.model(X.float())
return self.loss_func(y, labels)
@torch.no_grad()
def eval_loss(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
self.model.eval()
self.model.to(self.device)
epoch_loss = 0.0
iteration_counter = 0
accumulation_counter = 0
for (X, labels) in dataloader:
labels = labels.to(self.device).float()
y = self.model(X.float())
loss = self.loss_func(y, labels)
if self.grad_accumulate:
loss = loss / self.accumulate_steps
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
epoch_loss += loss.item()
if self.fake_full_batch_mode and accumulation_counter == 1:
break
if self.dummy_run:
accumulation_counter = 1
break
epoch_loss = epoch_loss / max(accumulation_counter, 1)
results = {}
if is_validation:
results["valid_mse"] = epoch_loss
else:
results["train_mse"] = epoch_loss
results["training_loss"] = epoch_loss
return results
| 2,272 | 28.141026 | 86 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/transformer_prob.py
|
import csv
import math
import torch
from .. import dataset, model, optim
from .problem import Problem
class TransformerProb(Problem):
def __init__(self, exp_dict):
super().__init__(exp_dict)
init_outputs = dataset.init(
self.dataset_name,
self.batch_size,
self.device,
extra_params=exp_dict.get("model_args", None),
drop_last=self.drop_last,
shuffle=exp_dict.get("shuffle", False),
outliers_filename=exp_dict.get("outliers_filename", None),
)
if len(init_outputs) == 3:
(
self.train_dataloader,
self.valid_dataloader,
transformer_len,
) = init_outputs
elif len(init_outputs) == 4:
(
self.train_dataloader,
self.valid_dataloader,
transformer_len,
self.corpus,
) = init_outputs
else:
raise ValueError(
"Don't know how to process this number of dataset.init output values"
)
self.model = model.init(
exp_dict["model"],
model_args=exp_dict["model_args"],
transformer_len=transformer_len,
)
self.model.to(self.device)
self.optim = optim.init(
exp_dict["opt"],
self.model,
)
def calculate_loss(self, data):
labels_seq_len = data[0][1:]
X = data[0][0]
X = X.to(self.device)
labels, seq_len = labels_seq_len[0], labels_seq_len[1]
return self.loss_helper(X, labels, seq_len)
def transformer_xl_loss(self, data, target):
mems = tuple()
ret = self.model(data, target, *mems)
loss, mems = ret[0], ret[1:]
return loss.float().mean().type_as(loss)
def transformer_encoder_loss(self, data, target, seq_len):
src_mask = self.model.generate_square_subsequent_mask(seq_len).to(self.device)
output = self.model(data, src_mask)
output_flat = output.view(-1, self.model.ntoken)
return self.loss_func(output_flat, target.view(-1))
@torch.no_grad()
def eval_loop(self, is_validation=False):
dataloader = self.valid_dataloader if is_validation else self.train_dataloader
self.model.eval()
self.model.to(self.device)
self.optim.zero_grad()
epoch_loss = 0.0
ppl_loss = 0.0
total_len = 0
iteration_counter = 0
accumulation_counter = 0
for (X, labels, seq_len) in dataloader:
loss = self.loss_helper(X, labels, seq_len)
ppl_loss += seq_len * loss
total_len += seq_len
if self.grad_accumulate:
loss = loss / self.accumulate_steps
epoch_loss += loss
iteration_counter += 1
if (
not self.grad_accumulate
or iteration_counter % self.accumulate_steps == 0
):
accumulation_counter += 1
if (
self.fake_full_batch_mode
and accumulation_counter == 1
and not is_validation
):
break
if self.dummy_run:
accumulation_counter = 1
break
results = {}
ppl_loss = ppl_loss / total_len
try:
ppl = math.exp(ppl_loss)
except OverflowError:
ppl = float("inf")
if is_validation:
results["valid_ppl"] = ppl
else:
results["train_ppl"] = ppl
results["training_loss"] = epoch_loss / max(accumulation_counter, 1)
return results
def loss_helper(self, X, labels, seq_len):
if self.model_name in [model.TRANSFORMER_XL, model.TRANSFORMER_XL_DET]:
loss = self.transformer_xl_loss(X, labels)
elif self.model_name in [
model.TRANSFORMER_ENCODER,
model.TRANSFORMER_ENCODER_DET,
]:
loss = self.transformer_encoder_loss(X, labels, seq_len)
else:
raise Exception("Transformer not supported!")
return loss
def get_outliers_helper(self, final_noise_norms):
with open(
self.save_path + "/noise/outliers_{}.csv".format(self.exp_uuid),
"w",
encoding="utf-8",
) as fw:
writer = csv.writer(fw, delimiter=",")
writer.writerow(["index", "norm", "text"])
rows = []
for (step, *data) in enumerate(self.train_dataloader):
noise = final_noise_norms[step]
X = data[0][0]
X = X.to(self.device)
sentences = self.corpus.vocab.convert_to_sent_from_tensor(X)
row = [step, noise, sentences]
rows.append(row)
rows = sorted(rows, key=lambda x: x[1], reverse=True)
writer.writerows(rows)
| 4,995 | 31.025641 | 86 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/problem/__init__.py
|
from ..model import *
from .bert_squad_prob import BertSquadProb
from .image_prob import ImageProb
from .simple_prob import SimpleProb
from .transformer_prob import TransformerProb
image_models = [
LENET5,
RESNET18,
RESNET34,
RESNET50,
RESNET101,
]
simple_models = [
LIN_REG,
LOG_REG,
FULLY_CONNECTED,
]
transformer_models = [
TRANSFORMER_ENCODER,
TRANSFORMER_ENCODER_DET,
TRANSFORMER_XL,
TRANSFORMER_XL_DET,
]
bert_squad = [BERT_BASE, DISTILBERT]
bert_glue = [BERT_GLUE]
def init(exp_dict):
model_name = exp_dict["model"]
if model_name in simple_models:
return SimpleProb(exp_dict)
elif model_name in image_models:
return ImageProb(exp_dict)
elif model_name in transformer_models:
return TransformerProb(exp_dict)
elif model_name in bert_squad:
return BertSquadProb(exp_dict)
raise Exception("Model {} not available".format(model_name))
| 950 | 20.133333 | 64 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/full_connected.py
|
import torch
from torch import nn
import copy
class FullyConnected(nn.Module):
def __init__(self, input_dim=3 * 32 * 32, width=100, depth=3, num_classes=10):
super(FullyConnected, self).__init__()
self.input_dim = input_dim
self.width = width
self.depth = depth
self.num_classes = num_classes
layers = self.get_layers()
self.fc = nn.Sequential(
nn.Linear(self.input_dim, self.width, bias=False),
nn.ReLU(inplace=True),
*layers,
nn.Linear(self.width, self.num_classes, bias=False),
)
def get_layers(self):
layers = []
for i in range(self.depth - 2):
layers.append(nn.Linear(self.width, self.width, bias=False))
layers.append(nn.ReLU())
return layers
def forward(self, x):
x = x.view(x.size(0), self.input_dim)
x = self.fc(x)
return x
| 935 | 26.529412 | 82 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/transformer_xl.py
|
import math
import functools
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
##### layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
##### residual connection
output = core_out + inp
else:
##### positionwise feed-forward
core_out = self.CoreNet(inp)
##### residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.q_net = nn.Linear(d_model, n_head * d_head, bias=False)
self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head**0.5)
self.pre_lnorm = pre_lnorm
def forward(self, h, attn_mask=None, mems=None):
##### multihead attention
# [hlen x bsz x n_head x d_head]
if mems is not None:
c = torch.cat([mems, h], 0)
else:
c = h
if self.pre_lnorm:
##### layer normalization
c = self.layer_norm(c)
head_q = self.q_net(h)
head_k, head_v = torch.chunk(self.kv_net(c), 2, -1)
head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head)
head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head)
head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head)
# [qlen x klen x bsz x n_head]
attn_score = torch.einsum("ibnd,jbnd->ijbn", (head_q, head_k))
attn_score.mul_(self.scale)
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float("inf"))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float("inf"))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
# [qlen x klen x bsz x n_head] + [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head]
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, head_v))
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = h + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(h + attn_out)
return output
class RelMultiHeadAttn(nn.Module):
def __init__(
self,
n_head,
d_model,
d_head,
dropout,
dropatt=0,
tgt_len=None,
ext_len=None,
mem_len=None,
pre_lnorm=False,
):
super(RelMultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.dropout = dropout
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
self.scale = 1 / (d_head**0.5)
self.pre_lnorm = pre_lnorm
def _parallelogram_mask(self, h, w, left=False):
mask = torch.ones((h, w)).byte()
m = min(h, w)
mask[:m, :m] = torch.triu(mask[:m, :m])
mask[-m:, -m:] = torch.tril(mask[-m:, -m:])
if left:
return mask
else:
return mask.flip(0)
def _shift(self, x, qlen, klen, mask, left=False):
if qlen > 1:
zero_pad = torch.zeros(
(x.size(0), qlen - 1, x.size(2), x.size(3)),
device=x.device,
dtype=x.dtype,
)
else:
zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype)
if left:
mask = mask.flip(1)
x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1)
else:
x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1)
x = x_padded.masked_select(mask[:, :, None, None]).view(
qlen, klen, x.size(2), x.size(3)
)
return x
def _rel_shift(self, x, zero_triu=False):
zero_pad = torch.zeros(
(x.size(0), 1, *x.size()[2:]), device=x.device, dtype=x.dtype
)
x_padded = torch.cat([zero_pad, x], dim=1)
x_padded = x_padded.view(x.size(1) + 1, x.size(0), *x.size()[2:])
x = x_padded[1:].view_as(x)
if zero_triu:
ones = torch.ones((x.size(0), x.size(1)))
x = x * torch.tril(ones, x.size(1) - x.size(0))[:, :, None, None]
return x
def forward(self, w, r, attn_mask=None, mems=None):
raise NotImplementedError
class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False)
def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None):
qlen, rlen, bsz = w.size(0), r.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
r_head_k = self.r_net(r)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(
qlen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
w_head_k = w_head_k.view(
klen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
w_head_v = w_head_v.view(
klen, bsz, self.n_head, self.d_head
) # qlen x bsz x n_head x d_head
r_head_k = r_head_k.view(
rlen, self.n_head, self.d_head
) # qlen x n_head x d_head
#### compute attention score
rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head
AC = torch.einsum(
"ibnd,jbnd->ijbn", (rw_head_q, w_head_k)
) # qlen x klen x bsz x n_head
rr_head_q = w_head_q + r_r_bias
BD = torch.einsum(
"ibnd,jnd->ijbn", (rr_head_q, r_head_k)
) # qlen x klen x bsz x n_head
BD = self._rel_shift(BD)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score = (
attn_score.float()
.masked_fill(attn_mask[None, :, :, None], -float("inf"))
.type_as(attn_score)
)
elif attn_mask.dim() == 3:
attn_score = (
attn_score.float()
.masked_fill(attn_mask[:, :, :, None], -float("inf"))
.type_as(attn_score)
)
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class RelLearnableMultiHeadAttn(RelMultiHeadAttn):
def __init__(self, *args, **kwargs):
super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs)
def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None):
# r_emb: [klen, n_head, d_head], used for term B
# r_w_bias: [n_head, d_head], used for term C
# r_bias: [klen, n_head], used for term D
qlen, bsz = w.size(0), w.size(1)
if mems is not None:
cat = torch.cat([mems, w], 0)
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(cat))
else:
w_heads = self.qkv_net(cat)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
w_head_q = w_head_q[-qlen:]
else:
if self.pre_lnorm:
w_heads = self.qkv_net(self.layer_norm(w))
else:
w_heads = self.qkv_net(w)
w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1)
klen = w_head_k.size(0)
w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head)
w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head)
w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head)
if klen > r_emb.size(0):
r_emb_pad = r_emb[0:1].expand(klen - r_emb.size(0), -1, -1)
r_emb = torch.cat([r_emb_pad, r_emb], 0)
r_bias_pad = r_bias[0:1].expand(klen - r_bias.size(0), -1)
r_bias = torch.cat([r_bias_pad, r_bias], 0)
else:
r_emb = r_emb[-klen:]
r_bias = r_bias[-klen:]
#### compute attention score
rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head
AC = torch.einsum(
"ibnd,jbnd->ijbn", (rw_head_q, w_head_k)
) # qlen x klen x bsz x n_head
B_ = torch.einsum(
"ibnd,jnd->ijbn", (w_head_q, r_emb)
) # qlen x klen x bsz x n_head
D_ = r_bias[None, :, None] # 1 x klen x 1 x n_head
BD = self._rel_shift(B_ + D_)
# [qlen x klen x bsz x n_head]
attn_score = AC + BD
attn_score.mul_(self.scale)
#### compute attention probability
if attn_mask is not None and attn_mask.any().item():
attn_mask = attn_mask.bool()
if attn_mask.dim() == 2:
attn_score.masked_fill_(attn_mask[None, :, :, None], -float("inf"))
elif attn_mask.dim() == 3:
attn_score.masked_fill_(attn_mask[:, :, :, None], -float("inf"))
# [qlen x klen x bsz x n_head]
attn_prob = F.softmax(attn_score, dim=1)
attn_prob = self.dropatt(attn_prob)
#### compute attention vector
attn_vec = torch.einsum("ijbn,jbnd->ibnd", (attn_prob, w_head_v))
# [qlen x bsz x n_head x d_head]
attn_vec = attn_vec.contiguous().view(
attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head
)
##### linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
##### residual connection
output = w + attn_out
else:
##### residual connection + layer normalization
output = self.layer_norm(w + attn_out)
return output
class DecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(DecoderLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, dec_attn_mask=None, mems=None):
output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems)
output = self.pos_ff(output)
return output
class RelLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelLearnableDecoderLayer, self).__init__()
self.dec_attn = RelLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(
dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems
)
output = self.pos_ff(output)
return output
class RelPartialLearnableDecoderLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs):
super(RelPartialLearnableDecoderLayer, self).__init__()
self.dec_attn = RelPartialLearnableMultiHeadAttn(
n_head, d_model, d_head, dropout, **kwargs
)
self.pos_ff = PositionwiseFF(
d_model, d_inner, dropout, pre_lnorm=kwargs.get("pre_lnorm")
)
def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None):
output = self.dec_attn(
dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems
)
output = self.pos_ff(output)
return output
class AdaptiveEmbedding(nn.Module):
def __init__(
self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False
):
super(AdaptiveEmbedding, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = cutoffs + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.emb_scale = d_proj**0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(
nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0)
)
if d_proj != d_embed:
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.view(-1)
emb_flat = torch.zeros(
[inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device
)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
emb_flat.index_copy_(0, indices_i, emb_i)
embed = emb_flat.view(*inp.size(), self.d_proj)
embed.mul_(self.emb_scale)
return embed
class MemTransformerLM(nn.Module):
def __init__(
self,
n_token,
n_layer,
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt,
tie_weight=True,
d_embed=None,
div_val=1,
tie_projs=[False],
pre_lnorm=False,
tgt_len=None,
ext_len=None,
mem_len=None,
cutoffs=[],
adapt_inp=False,
same_length=False,
attn_type=0,
clamp_len=-1,
sample_softmax=-1,
):
super(MemTransformerLM, self).__init__()
self.n_token = n_token
d_embed = d_model if d_embed is None else d_embed
self.d_embed = d_embed
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.word_emb = AdaptiveEmbedding(
n_token, d_embed, d_model, cutoffs, div_val=div_val
)
self.drop = nn.Dropout(dropout)
self.n_layer = n_layer
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
self.max_klen = tgt_len + ext_len + mem_len
self.attn_type = attn_type
self.layers = nn.ModuleList()
if attn_type == 0: # the default attention
for i in range(n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
elif attn_type == 1: # learnable embeddings
for i in range(n_layer):
self.layers.append(
RelLearnableDecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
tgt_len=tgt_len,
ext_len=ext_len,
mem_len=mem_len,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
elif attn_type in [2, 3]: # absolute embeddings
for i in range(n_layer):
self.layers.append(
DecoderLayer(
n_head,
d_model,
d_head,
d_inner,
dropout,
dropatt=dropatt,
pre_lnorm=pre_lnorm,
)
)
self.sample_softmax = sample_softmax
# use sampled softmax
if sample_softmax > 0:
self.out_layer = nn.Linear(d_model, n_token)
if tie_weight:
self.out_layer.weight = self.word_emb.weight
self.tie_weight = tie_weight
self.sampler = LogUniformSampler(n_token, sample_softmax)
# use adaptive softmax (including standard softmax)
else:
self.crit = ProjectedAdaptiveLogSoftmax(
n_token, d_embed, d_model, cutoffs, div_val=div_val
)
if tie_weight:
for i in range(len(self.crit.out_layers)):
self.crit.out_layers[i].weight = self.word_emb.emb_layers[i].weight
if tie_projs:
for i, tie_proj in enumerate(tie_projs):
if tie_proj and div_val == 1 and d_model != d_embed:
self.crit.out_projs[i] = self.word_emb.emb_projs[0]
elif tie_proj and div_val != 1:
self.crit.out_projs[i] = self.word_emb.emb_projs[i]
self.same_length = same_length
self.clamp_len = clamp_len
self._create_params()
def backward_compatible(self):
self.sample_softmax = -1
def _create_params(self):
if self.attn_type == 0: # default attention
self.pos_emb = PositionalEmbedding(self.d_model)
self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head))
elif self.attn_type == 1: # learnable
self.r_emb = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head, self.d_head)
)
self.r_w_bias = nn.Parameter(
torch.Tensor(self.n_layer, self.n_head, self.d_head)
)
self.r_bias = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head)
)
elif self.attn_type == 2: # absolute standard
self.pos_emb = PositionalEmbedding(self.d_model)
elif self.attn_type == 3: # absolute deeper SA
self.r_emb = nn.Parameter(
torch.Tensor(self.n_layer, self.max_klen, self.n_head, self.d_head)
)
def reset_length(self, tgt_len, ext_len, mem_len):
self.tgt_len = tgt_len
self.mem_len = mem_len
self.ext_len = ext_len
def init_mems(self):
if self.mem_len > 0:
mems = []
param = next(self.parameters())
for i in range(self.n_layer + 1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mems.append(empty)
return mems
else:
return None
def _update_mems(self, hids, mems, qlen, mlen):
# does not deal with None
if mems is None:
return None
# mems is not None
assert len(hids) == len(mems), "len(hids) != len(mems)"
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + max(0, qlen - 0 - self.ext_len)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mems[i], hids[i]], dim=0)
new_mems.append(cat[beg_idx:end_idx].detach())
return new_mems
def _forward(self, dec_inp, mems=None):
qlen, bsz = dec_inp.size()
word_emb = self.word_emb(dec_inp)
mlen = mems[0].size(0) if mems is not None else 0
klen = mlen + qlen
if self.same_length:
all_ones = word_emb.new_ones(qlen, klen)
mask_len = klen - self.mem_len
if mask_len > 0:
mask_shift_len = qlen - mask_len
else:
mask_shift_len = qlen
dec_attn_mask = (
torch.triu(all_ones, 1 + mlen) + torch.tril(all_ones, -mask_shift_len)
).byte()[
:, :, None
] # -1
else:
dec_attn_mask = torch.triu(
word_emb.new_ones(qlen, klen), diagonal=1 + mlen
).byte()[:, :, None]
hids = []
if self.attn_type == 0: # default
pos_seq = torch.arange(
klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype
)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb)
pos_emb = self.drop(pos_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
pos_emb,
self.r_w_bias,
self.r_r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
)
hids.append(core_out)
elif self.attn_type == 1: # learnable
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
if self.clamp_len > 0:
r_emb = self.r_emb[i][-self.clamp_len :]
r_bias = self.r_bias[i][-self.clamp_len :]
else:
r_emb, r_bias = self.r_emb[i], self.r_bias[i]
mems_i = None if mems is None else mems[i]
core_out = layer(
core_out,
r_emb,
self.r_w_bias[i],
r_bias,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
)
hids.append(core_out)
elif self.attn_type == 2: # absolute
pos_seq = torch.arange(
klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype
)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
core_out = self.drop(word_emb + pos_emb[-qlen:])
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and i == 0:
mems_i += pos_emb[:mlen]
core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
elif self.attn_type == 3:
core_out = self.drop(word_emb)
hids.append(core_out)
for i, layer in enumerate(self.layers):
mems_i = None if mems is None else mems[i]
if mems_i is not None and mlen > 0:
cur_emb = self.r_emb[i][:-qlen]
cur_size = cur_emb.size(0)
if cur_size < mlen:
cur_emb_pad = cur_emb[0:1].expand(mlen - cur_size, -1, -1)
cur_emb = torch.cat([cur_emb_pad, cur_emb], 0)
else:
cur_emb = cur_emb[-mlen:]
mems_i += cur_emb.view(mlen, 1, -1)
core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1)
core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i)
hids.append(core_out)
core_out = self.drop(core_out)
new_mems = self._update_mems(hids, mems, mlen, qlen)
return core_out, new_mems
def forward(self, data, target, *mems):
# nn.DataParallel does not allow size(0) tensors to be broadcasted.
# So, have to initialize size(0) mems inside the model forward.
# Moreover, have to return new_mems to allow nn.DataParallel to piece
# them together.
if not mems:
mems = self.init_mems()
tgt_len = target.size(0)
hidden, new_mems = self._forward(data, mems=mems)
pred_hid = hidden[-tgt_len:]
if self.sample_softmax > 0 and self.training:
assert self.tie_weight
logit = sample_logits(
self.word_emb, self.out_layer.bias, target, pred_hid, self.sampler
)
loss = -F.log_softmax(logit, -1)[:, :, 0]
else:
loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1))
loss = loss.view(tgt_len, -1)
if new_mems is None:
return [loss]
else:
return [loss] + new_mems
CUDA_MAJOR = int(torch.version.cuda.split(".")[0])
CUDA_MINOR = int(torch.version.cuda.split(".")[1])
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, keep_order=False):
super(ProjectedAdaptiveLogSoftmax, self).__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(
torch.zeros(self.n_clusters, self.d_embed)
)
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
self.out_layers = nn.ModuleList()
self.out_projs = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed)))
else:
self.out_projs.append(None)
self.out_layers.append(nn.Linear(d_embed, n_token))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i)))
self.out_layers.append(nn.Linear(d_emb_i, r_idx - l_idx))
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
proj_hid = F.linear(hidden, proj.t().contiguous())
logit = F.linear(proj_hid, weight, bias=bias)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def forward(self, hidden, target, keep_order=False):
"""
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
"""
if hidden.size(0) != target.size(0):
raise RuntimeError(
"Input and target should have the same size " "in the batch dimension."
)
if self.n_clusters == 0:
logit = self._compute_logit(
hidden,
self.out_layers[0].weight,
self.out_layers[0].bias,
self.out_projs[0],
)
nll = (
-F.log_softmax(logit, dim=-1).gather(1, target.unsqueeze(1)).squeeze(1)
)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers[0].weight[l_idx:r_idx]
bias_i = self.out_layers[0].bias[l_idx:r_idx]
else:
weight_i = self.out_layers[i].weight
bias_i = self.out_layers[i].bias
if i == 0:
weight_i = torch.cat([weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat([bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(
hidden_i, weight_i, bias_i, proj_i
)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] + tail_logprob_i.gather(
1, target_i[:, None]
).squeeze(1)
if (hasattr(self, "keep_order") and self.keep_order) or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
class LogUniformSampler(object):
def __init__(self, range_max, n_sample):
"""
Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
`P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
expected count can be approximated by 1 - (1 - p)^n
and we use a numerically stable version -expm1(num_tries * log1p(-p))
Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
"""
with torch.no_grad():
self.range_max = range_max
log_indices = torch.arange(1.0, range_max + 2.0, 1.0).log_()
self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
# print('P', self.dist.numpy().tolist()[-30:])
self.log_q = (
(-(-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
)
self.n_sample = n_sample
def sample(self, labels):
"""
labels: [b1, b2]
Return
true_log_probs: [b1, b2]
samp_log_probs: [n_sample]
neg_samples: [n_sample]
"""
# neg_samples = torch.empty(0).long()
n_sample = self.n_sample
n_tries = 2 * n_sample
with torch.no_grad():
neg_samples = torch.multinomial(
self.dist, n_tries, replacement=True
).unique()
device = labels.device
neg_samples = neg_samples.to(device)
true_log_probs = self.log_q[labels].to(device)
samp_log_probs = self.log_q[neg_samples].to(device)
return true_log_probs, samp_log_probs, neg_samples
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[:-n_sample].view(b1, b2, -1)
sample_w = all_w[-n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[:-n_sample].view(b1, b2)
sample_b = all_b[-n_sample:]
hit = (labels[:, :, None] == neg_samples).detach().bool()
true_logits = (
torch.einsum("ijk,ijk->ij", [true_w, inputs]) + true_b - true_log_probs
)
sample_logits = (
torch.einsum("lk,ijk->ijl", [sample_w, inputs]) + sample_b - samp_log_probs
)
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits
| 38,100 | 33.356177 | 119 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/resnet.py
|
import torchvision.models as models
def getResNet(size, pretrained=False):
if size == 50:
return models.resnet50(pretrained=pretrained)
elif size == 34:
return models.resnet34(pretrained=pretrained)
elif size == 101:
return models.resnet101(pretrained=pretrained)
elif size == 18:
return models.resnet18(pretrained=pretrained)
| 377 | 28.076923 | 54 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/bert_glue.py
|
import os
import random
from explib import config
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
PretrainedConfig,
)
def get_bert_glue(model_args):
num_labels, task_name = model_args["num_labels"], model_args["task_name"]
autoconfig = AutoConfig.from_pretrained(
"bert-base-cased", num_labels=num_labels, finetuning_task=task_name
)
model = AutoModelForSequenceClassification.from_pretrained(
"bert-base-cased",
from_tf=False,
config=autoconfig,
)
if "freeze_embedding" in model_args and model_args["freeze_embedding"]:
for param in model.bert.embeddings.parameters():
param.requires_grad = False
if "num_encoder_layers_to_freeze" in model_args:
num_layers = model_args["num_encoder_layers_to_freeze"]
for layer in model.bert.encoder.layer[:num_layers]:
for param in layer.parameters():
param.requires_grad = False
return model
| 1,138 | 28.205128 | 77 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/transformer_encoder.py
|
"""
Simple transformer architecture used as introduction by the pytorch team
https://pytorch.org/tutorials/beginner/transformer_tutorial.html
Version used
https://github.com/pytorch/tutorials/blob/a981886fd8f1793ac5808b26e75dd50b788eb4e5/beginner_source/transformer_tutorial.py
Code covered by
See pytorch_
Copyright (c) 2017-2021, Pytorch contributors
"""
import math
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
class TransformerEncoderModel(nn.Module):
def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):
super(TransformerEncoderModel, self).__init__()
self.model_type = "Transformer"
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
self.ntoken = ntoken
self.init_weights()
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src, src_mask):
src = self.encoder(src) * math.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, src_mask)
output = self.decoder(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
| 2,568 | 33.253333 | 122 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/letnet5.py
|
import torch
from torch import nn
class LeNet5(nn.Module):
def __init__(self, n_classes, in_channels=3):
super(LeNet5, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=6, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),
nn.Tanh(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=n_classes),
)
def forward(self, x):
x = self.feature_extractor(x)
x = torch.flatten(x, 1)
logits = self.classifier(x)
return logits
| 961 | 30.032258 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/__init__.py
|
from .letnet5 import LeNet5
from .linear_model import LinearModel
from .transformer_encoder import TransformerEncoderModel
from .resnet import getResNet
from .full_connected import FullyConnected
from .transformer_xl import MemTransformerLM
from .bert_base_pretrained import (
get_bert_base_pretrained,
get_distilbert_base_pretrained,
)
from .bert_glue import get_bert_glue
from ..util import weights_init
LENET5 = "lenet5"
LIN_REG = "lin_reg"
LOG_REG = "log_reg"
TRANSFORMER_ENCODER = "transformer_encoder"
TRANSFORMER_ENCODER_DET = "transformer_encoder_deterministic"
RESNET50 = "resnet50"
RESNET34 = "resnet34"
RESNET18 = "resnet18"
RESNET101 = "resnet101"
FULLY_CONNECTED = "fc"
TRANSFORMER_XL = "transformer_xl"
TRANSFORMER_XL_DET = "transformer_xl_deterministic"
BERT_BASE = "bert_base_pretrained"
BERT_GLUE = "bert_base_cased"
DISTILBERT = "distilbert_base_pretrained"
AVAILABLE_MODELS = [
LENET5,
LIN_REG,
LOG_REG,
TRANSFORMER_ENCODER,
TRANSFORMER_ENCODER_DET,
RESNET101,
RESNET50,
RESNET34,
RESNET18,
FULLY_CONNECTED,
TRANSFORMER_XL,
TRANSFORMER_XL_DET,
BERT_BASE,
BERT_GLUE,
DISTILBERT,
]
def init(model_name, model_args=None, features_dim=0, transformer_len=0):
if model_name == LENET5:
if model_args is not None:
return LeNet5(10, in_channels=model_args["in_channels"])
return LeNet5(10)
elif model_name == LIN_REG:
return LinearModel(features_dim, 1)
elif model_name == LOG_REG:
return LinearModel(features_dim, 2)
elif model_name == TRANSFORMER_ENCODER:
model = TransformerEncoderModel(transformer_len, 200, 2, 200, 2, 0.2)
model.apply(weights_init)
return model
elif model_name == TRANSFORMER_ENCODER_DET:
model = TransformerEncoderModel(transformer_len, 200, 2, 200, 2, dropout=0.0)
model.apply(weights_init)
return model
elif model_name == RESNET50:
return getResNet(50)
elif model_name == RESNET34:
return getResNet(34)
elif model_name == RESNET18:
return getResNet(18)
elif model_name == RESNET101:
return getResNet(101)
elif model_name == FULLY_CONNECTED:
return FullyConnected()
elif model_name == TRANSFORMER_XL:
model = MemTransformerLM(
transformer_len,
model_args["n_layer"],
model_args["n_head"],
model_args["d_model"],
model_args["d_head"],
model_args["d_inner"],
model_args["dropout"],
model_args["dropatt"],
tie_weight=False,
d_embed=model_args["d_model"],
tgt_len=model_args["tgt_len"],
ext_len=0,
mem_len=model_args["mem_len"],
same_length=False,
)
model.apply(weights_init)
model.word_emb.apply(weights_init)
return model
elif model_name == TRANSFORMER_XL_DET:
model = MemTransformerLM(
transformer_len,
model_args["n_layer"],
model_args["n_head"],
model_args["d_model"],
model_args["d_head"],
model_args["d_inner"],
dropout=0,
dropatt=0,
tie_weight=False,
d_embed=model_args["d_model"],
tgt_len=model_args["tgt_len"],
ext_len=0,
mem_len=model_args["mem_len"],
same_length=False,
)
model.apply(weights_init)
model.word_emb.apply(weights_init)
return model
elif model_name == BERT_BASE:
return get_bert_base_pretrained()
elif model_name == DISTILBERT:
return get_distilbert_base_pretrained()
elif model_name == BERT_GLUE:
return get_bert_glue(model_args)
else:
raise Exception("Model {} not available".format(model_name))
| 3,881 | 27.544118 | 85 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/bert_base_pretrained.py
|
from datasets import load_metric
import numpy as np
from typing import Optional, Tuple
import json
import collections
import os
import torch
from transformers import (
AutoConfig,
AutoModelForQuestionAnswering,
EvalPrediction,
)
from .. import logging
def get_bert_base_pretrained():
config = AutoConfig.from_pretrained("bert-base-uncased")
model = AutoModelForQuestionAnswering.from_pretrained(
"bert-base-uncased",
from_tf=False,
config=config,
)
return model
def get_distilbert_base_pretrained():
config = AutoConfig.from_pretrained("distilbert-base-uncased")
model = AutoModelForQuestionAnswering.from_pretrained(
"distilbert-base-uncased",
from_tf=False,
config=config,
)
return model
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
log_level: Optional[int] = None,
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``):
``logging`` log level (e.g., ``logging.WARNING``)
"""
assert (
len(predictions) == 2
), "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predictions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
# logger.setLevel(log_level)
# logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.")
# Let's loop over all the examples!
for example_index, example in enumerate(examples):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get(
"token_is_max_context", None
)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if (
min_null_prediction is None
or min_null_prediction["score"] > feature_null_score
):
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -n_best_size - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if (
token_is_max_context is not None
and not token_is_max_context.get(str(start_index), False)
):
continue
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
"score": start_logits[start_index] + end_logits[end_index],
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(
prelim_predictions, key=lambda x: x["score"], reverse=True
)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(
p["offsets"] == (0, 0) for p in predictions
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (
len(predictions) == 1 and predictions[0]["text"] == ""
):
predictions.insert(
0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}
)
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = (
null_score
- best_non_null_pred["start_logit"]
- best_non_null_pred["end_logit"]
)
scores_diff_json[example["id"]] = float(
score_diff
) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
all_predictions[example["id"]] = best_non_null_pred["text"]
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{
k: (
float(v)
if isinstance(v, (np.float16, np.float32, np.float64))
else v
)
for k, v in pred.items()
}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir,
"predictions.json" if prefix is None else f"{prefix}_predictions.json",
)
nbest_file = os.path.join(
output_dir,
"nbest_predictions.json"
if prefix is None
else f"{prefix}_nbest_predictions.json",
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir,
"null_odds.json" if prefix is None else f"{prefix}_null_odds.json",
)
# logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
# logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
# logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
# Post-processing:
def post_processing_function(examples, features, predictions, stage="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
predictions = postprocess_qa_predictions(
examples=examples,
features=features,
predictions=predictions,
version_2_with_negative=False,
n_best_size=20,
max_answer_length=30,
null_score_diff_threshold=0.0,
output_dir=None,
prefix=stage,
)
# Format the result to the format the metric expects.
# if args.version_2_with_negative:
# formatted_predictions = [
# {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
# ]
# else:
formatted_predictions = [
{"id": k, "prediction_text": v} for k, v in predictions.items()
]
references = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
# Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
"""
Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
Args:
start_or_end_logits(:obj:`tensor`):
This is the output predictions of the model. We can only enter either start or end logits.
eval_dataset: Evaluation dataset
max_len(:obj:`int`):
The maximum length of the output tensor. ( See the model.eval() part for more details )
"""
step = 0
# create a numpy array and fill it with -100.
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
for i, output_logit in enumerate(start_or_end_logits): # populate columns
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
# And after every iteration we have to change the step
batch_size = output_logit.shape[0]
cols = output_logit.shape[1]
if step + batch_size < len(dataset):
logits_concat[step : step + batch_size, :cols] = output_logit
else:
logits_concat[step:, :cols] = output_logit[: len(dataset) - step]
step += batch_size
return logits_concat
@torch.no_grad()
def evaluate(problem, model, dataloader, accelerator, dataset, examples, metric):
all_start_logits = []
all_end_logits = []
model.eval()
epoch_loss = 0
iteration_counter = 0
accumulation_counter = 0
logger = logging.logging.getLogger(__name__)
logger.debug("Starting evaluate loop")
for step, batch in enumerate(dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
iteration_counter += 1
epoch_loss += loss.item()
if (
not problem.grad_accumulate
or iteration_counter % problem.accumulate_steps == 0
):
# self.optim.step()
# self.optim.zero_grad()
accumulation_counter += 1
if problem.fake_full_batch_mode and accumulation_counter == 1:
break
start_logits = outputs.start_logits
end_logits = outputs.end_logits
start_logits = accelerator.pad_across_processes(
start_logits, dim=1, pad_index=-100
)
end_logits = accelerator.pad_across_processes(
end_logits, dim=1, pad_index=-100
)
all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
if problem.dummy_run:
logger.debug("Breaking eval loop due to dummy run")
break
logger.debug("End evaluate loop")
max_len = max(
[x.shape[1] for x in all_start_logits]
) # Get the max_length of the tensor
logger.debug("Concatenate results")
# concatenate the numpy array
start_logits_concat = create_and_fill_np_array(all_start_logits, dataset, max_len)
end_logits_concat = create_and_fill_np_array(all_end_logits, dataset, max_len)
# delete the list of numpy arrays
del all_start_logits
del all_end_logits
logger.debug("Loading Squad Metric")
logger.debug("Post processing function")
outputs_numpy = (start_logits_concat, end_logits_concat)
prediction = post_processing_function(examples, dataset, outputs_numpy)
logger.debug("Computing metric")
eval_metric = metric.compute(
predictions=prediction.predictions, references=prediction.label_ids
)
epoch_loss = epoch_loss / max(accumulation_counter, 1)
return eval_metric, epoch_loss
# deprecated
# @torch.no_grad()
# def eval_loss(problem, model, dataloader):
# model.eval()
# epoch_loss = 0
# iteration_counter = 0
# accumulation_counter = 0
# for X in dataloader:
# if iteration_counter % 1000 == 0:
# print(iteration_counter)
# loss = model(**X).loss
# if problem.grad_accumulate:
# loss = loss / problem.accumulate_steps
# # print(loss)
# iteration_counter += 1
# epoch_loss += loss.item()
# if (
# not problem.grad_accumulate
# or iteration_counter % problem.accumulate_steps == 0
# ):
# # self.optim.step()
# # self.optim.zero_grad()
# accumulation_counter += 1
# # if n % 500 == 0:
# # self.logging({"eval_loss_iter": n})
# if problem.fake_full_batch_mode and accumulation_counter == 1:
# break
# if problem.dummy_run:
# break
# epoch_loss = epoch_loss / accumulation_counter
# return epoch_loss
| 18,940 | 39.997835 | 119 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/model/linear_model.py
|
import torch
class LinearModel(torch.nn.Module):
def __init__(self, inputSize, outputSize):
super(LinearModel, self).__init__()
self.linear = torch.nn.Linear(inputSize, outputSize)
def forward(self, X):
out = self.linear(X)
return out
| 278 | 22.25 | 60 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/expmaker/sbatch_writers.py
|
import os
import textwrap
from explib import config, logging
from explib.expmaker import (
slurm_configs,
get_jobs_folder,
get_exp_full_path_json,
load_summary,
get_job_path,
)
from explib.expmaker.experiment_defs import make_uuid, make_wuuid
from explib.expmaker.slurm_configs import SlurmConfigIssue
def make_sbatch_config(slurm_config_name, jobarray_len=None):
slurm_config = slurm_configs.SLURM_CONFIGS[slurm_config_name]
gpu_str = ""
if slurm_config["gpu"] is not None:
gpu_str = "#SBATCH --gres=gpu:{gpu}".format(gpu=slurm_config["gpu"])
notification_str = ""
if config.get_notification_email() is not None:
notification_str = textwrap.dedent(
f"""
#SBATCH --mail-user={config.get_notification_email()}
#SBATCH --mail-type=ALL
"""
)
jobarray_str = ""
if jobarray_len is not None:
jobarray_str = textwrap.dedent(
f"""
#SBATCH --array=0-{jobarray_len-1}
"""
)
account_or_partition_str = f"#SBATCH --account={config.get_slurm_account()}"
if config.get_slurm_partition() is not None:
account_or_partition_str = f"#SBATCH --partition={config.get_slurm_partition()}"
conda_load_env_str = ""
if config.get_conda_env() is not None:
conda_load_env_str = f"conda activate {config.get_conda_env()}"
env_load_str = ""
if config.get_env_file_to_source() is not None:
env_load_str = f". {config.get_env_file_to_source()}"
return textwrap.dedent(
"""
{account_or_partition_str}
#SBATCH --mem={mem}
#SBATCH --time={time}
#SBATCH --cpus-per-task={cpus}
{notification_str}
{gpu_str}
{jobarray_str}
{conda_load_env_str}
{env_load_str}
export TMPDIR=$SLURM_TMPDIR
"""
).format(
account_or_partition_str=account_or_partition_str,
mem=slurm_config["mem"],
time=slurm_config["time"],
cpus=slurm_config["cpus-per-task"],
notification_str=notification_str,
gpu_str=gpu_str,
jobarray_str=jobarray_str,
conda_load_env_str=conda_load_env_str,
env_load_str=env_load_str,
)
def filter_experiments_for_slurm_config(summary, slurm_config_name):
"""Filters an experiment dictionary summary for experiments with slurm_config.
`experiment_dicts` is a dictionary of `hash => exp_dict`
Raises a SlurmConfigIssue if there is an experiment with no slurm config
"""
filtered_dict = {}
for key, exp_dict in summary.items():
if "slurm_config" not in exp_dict:
raise SlurmConfigIssue(
f"Slurm config not found in experiment {key}. Full exp def: {exp_dict}."
)
if exp_dict["slurm_config"] == slurm_config_name:
filtered_dict[key] = exp_dict
return filtered_dict
def make_slurm_job(exp_dict, json_path, out_path):
if "slurm_config" not in exp_dict:
raise SlurmConfigIssue(f"No slurm_config in experiment dictionary ({exp_dict})")
config_name = exp_dict["slurm_config"]
if config_name not in slurm_configs.SLURM_CONFIGS:
raise SlurmConfigIssue(
f"Slurm config '{config_name}' unknown"
+ f"(possible: {slurm_configs.SLURM_CONFIGS.keys()})"
)
with open(out_path, "w+") as file:
file.writelines(
textwrap.dedent(
"""#!/bin/sh
{sbatch_config}
python -m explib {json_path}
exit
"""
).format(
sbatch_config=make_sbatch_config(config_name),
json_path=json_path,
)
)
file.close()
def make_jobarray_file(exp_name, summary, slurm_config_name, filter_should_run_wuuid):
jobs_folder = get_jobs_folder(exp_name)
out_path = os.path.join(
jobs_folder, f"run_all_{exp_name}_array_{slurm_config_name}.sh"
)
summary_for_config = filter_experiments_for_slurm_config(summary, slurm_config_name)
summary_for_config_filtered = {
make_wuuid(exp_dict): exp_dict
for uuid, exp_dict in summary_for_config.items()
if filter_should_run_wuuid(make_wuuid(exp_dict))
}
with open(out_path, "w+") as file:
file.writelines(
textwrap.dedent(
"""#!/bin/sh
{sbatch_config}
"""
).format(
sbatch_config=make_sbatch_config(
slurm_config_name, jobarray_len=len(summary_for_config_filtered)
),
)
)
file.writelines(
[
textwrap.dedent(
f"""
if [ $SLURM_ARRAY_TASK_ID -eq {i} ]
then
python -m explib {get_exp_full_path_json(exp_name, exp_dict)}
fi
"""
)
for i, exp_dict in enumerate(summary_for_config_filtered.values())
]
)
file.writelines("exit")
file.writelines("\n")
file.close()
logging.info(f"Created job array file for config {slurm_config_name} at {out_path}")
def create_slurm_jobarrays(exp_name, filter_should_run_wuuid=None):
"""Creates one jobarray file per SLURM config"""
summary = load_summary(exp_name)
if filter_should_run_wuuid is None:
filter_should_run_wuuid = lambda x: True
unique_slurm_configs = list(
set([exp_dict.get("slurm_config", None) for exp_dict in summary.values()])
)
for slurm_config in unique_slurm_configs:
make_jobarray_file(exp_name, summary, slurm_config, filter_should_run_wuuid)
def create_slurm_single_job_file(exp_name, filter_should_run_wuuid=None):
"""Creates one sbatch file to run all experiments sequentially"""
summary = load_summary(exp_name)
if filter_should_run_wuuid is None:
filter_should_run_wuuid = lambda x: True
logging.info(f"Checking json files...")
unique_slurm_configs = set(
[exp_dict.get("slurm_config", None) for exp_dict in summary.values()]
)
if len(unique_slurm_configs) != 1:
raise SlurmConfigIssue(
f"Expected single Slurm config, multiple requested {unique_slurm_configs}"
)
slurm_config_name = unique_slurm_configs.pop()
jobs_folder = get_jobs_folder(exp_name)
out_path = os.path.join(jobs_folder, f"run_all_{exp_name}.sh")
with open(out_path, "w+") as file:
file.writelines(
textwrap.dedent(
"""#!/bin/sh
{sbatch_config}
"""
).format(
sbatch_config=make_sbatch_config(slurm_config_name),
)
)
file.writelines(
[
textwrap.dedent(
f"""
python -m explib {get_exp_full_path_json(exp_name, exp_dict)}
"""
)
for exp_dict in summary.values()
if filter_should_run_wuuid(make_wuuid(exp_dict))
]
)
file.writelines("exit")
file.writelines("\n")
file.close()
logging.info(f"Created single job file, run with > sbatch {out_path}")
def create_slurm_job_files(exp_name, filter_should_run_wuuid=None):
"""Creates one sbatch file per experiment"""
jobs_folder = get_jobs_folder(exp_name)
if filter_should_run_wuuid is None:
filter_should_run_wuuid = lambda x: True
summary = load_summary(exp_name)
try:
logging.info(f"Creating job files in {jobs_folder}")
out_paths = []
for uuid, exp_dict in summary.items():
if filter_should_run_wuuid(make_wuuid(exp_dict)):
json_path = get_exp_full_path_json(exp_name, exp_dict)
out_path = get_job_path(exp_name, exp_dict)
make_slurm_job(exp_dict, json_path, out_path)
out_paths.append(out_path)
logging.info(f"Created {len(out_paths)} job files")
run_all_filepath = os.path.join(get_jobs_folder(exp_name), f"run_{exp_name}.sh")
with open(run_all_filepath, "w+") as fp:
fp.writelines([f"sbatch {out_path}\n" for out_path in out_paths])
logging.info(f"Submit all jobs with > source {run_all_filepath}")
except SlurmConfigIssue as e:
logging.warn("Slurm config not found - skipping making sbatch files")
logging.warn(e, exc_info=1)
| 8,757 | 32.684615 | 88 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/expmaker/slurm_configs.py
|
from functools import partial
def set_config(time, gpu, mem, cpus):
return {
"gpu": gpu,
"mem": mem,
"time": time,
"cpus-per-task": cpus,
}
small_cpu = partial(set_config, gpu=None, mem="12000M", cpus=2)
medium_cpu = partial(set_config, gpu=None, mem="32000M", cpus=8)
small_gpu = partial(set_config, gpu="p100", mem="16000M", cpus=6)
large_gpu = partial(set_config, gpu="v100l", mem="48000M", cpus=8)
default_gpu = partial(set_config, gpu="1", mem="30000M", cpus=5)
narval_gpu = partial(set_config, gpu="a100", mem="48000M", cpus=8)
SMALL_CPU_2H = "SMALL_CPU_2H"
SMALL_CPU_4H = "SMALL_CPU_4H"
SMALL_CPU_8H = "SMALL_CPU_8H"
SMALL_CPU_16H = "SMALL_CPU_16H"
SMALL_CPU_24H = "SMALL_CPU_24H"
MEDIUM_CPU_2H = "MEDIUM_CPU_2H"
MEDIUM_CPU_4H = "MEDIUM_CPU_4H"
MEDIUM_CPU_8H = "MEDIUM_CPU_8H"
MEDIUM_CPU_16H = "MEDIUM_CPU_16H"
MEDIUM_CPU_24H = "MEDIUM_CPU_24H"
DEFAULT_GPU_2H = "DEFAULT_GPU_2H"
DEFAULT_GPU_4H = "DEFAULT_GPU_4H"
DEFAULT_GPU_8H = "DEFAULT_GPU_8H"
DEFAULT_GPU_12H = "DEFAULT_GPU_12H"
DEFAULT_GPU_16H = "DEFAULT_GPU_16H"
DEFAULT_GPU_24H = "DEFAULT_GPU_24H"
DEFAULT_GPU_36H = "DEFAULT_GPU_36H"
NARVAL_GPU_2H = "NARVAL_GPU_2H"
NARVAL_GPU_4H = "NARVAL_GPU_4H"
NARVAL_GPU_8H = "NARVAL_GPU_8H"
NARVAL_GPU_16H = "NARVAL_GPU_16H"
NARVAL_GPU_24H = "NARVAL_GPU_24H"
SMALL_GPU_1H = "SMALL_GPU_1H"
SMALL_GPU_2H = "SMALL_GPU_2H"
SMALL_GPU_4H = "SMALL_GPU_4H"
SMALL_GPU_8H = "SMALL_GPU_8H"
SMALL_GPU_12H = "SMALL_GPU_12H"
SMALL_GPU_16H = "SMALL_GPU_16H"
LARGE_GPU_1H = "LARGE_GPU_1H"
LARGE_GPU_2H = "LARGE_GPU_2H"
LARGE_GPU_2HALFH = "LARGE_GPU_2HALFH"
LARGE_GPU_4H = "LARGE_GPU_4H"
LARGE_GPU_8H = "LARGE_GPU_8H"
LARGE_GPU_6H = "LARGE_GPU_6H"
LARGE_GPU_12H = "LARGE_GPU_12H"
LARGE_GPU_16H = "LARGE_GPU_16H"
LARGE_GPU_24H = "LARGE_GPU_24H"
LARGE_GPU_36H = "LARGE_GPU_36H"
LARGE_GPU_72H = "LARGE_GPU_72H"
SLURM_CONFIGS = {
SMALL_CPU_2H: small_cpu("0-02:00"),
SMALL_CPU_4H: small_cpu("0-04:00"),
SMALL_CPU_8H: small_cpu("0-08:00"),
SMALL_CPU_16H: small_cpu("0-16:00"),
SMALL_CPU_24H: small_cpu("0-24:00"),
MEDIUM_CPU_2H: medium_cpu("0-02:00"),
MEDIUM_CPU_4H: medium_cpu("0-04:00"),
MEDIUM_CPU_8H: medium_cpu("0-08:00"),
MEDIUM_CPU_16H: medium_cpu("0-16:00"),
MEDIUM_CPU_24H: medium_cpu("0-24:00"),
DEFAULT_GPU_2H: default_gpu("0-02:00"),
DEFAULT_GPU_4H: default_gpu("0-04:00"),
DEFAULT_GPU_8H: default_gpu("0-08:00"),
DEFAULT_GPU_12H: default_gpu("0-12:00"),
DEFAULT_GPU_16H: default_gpu("0-16:00"),
DEFAULT_GPU_24H: default_gpu("0-24:00"),
DEFAULT_GPU_36H: default_gpu("1-12:00"),
NARVAL_GPU_2H: narval_gpu("0-02:00"),
NARVAL_GPU_4H: narval_gpu("0-04:00"),
NARVAL_GPU_8H: narval_gpu("0-08:00"),
NARVAL_GPU_16H: narval_gpu("0-16:00"),
NARVAL_GPU_24H: narval_gpu("0-24:00"),
SMALL_GPU_1H: small_gpu("0-01:00"),
SMALL_GPU_2H: small_gpu("0-02:00"),
SMALL_GPU_4H: small_gpu("0-04:00"),
SMALL_GPU_8H: small_gpu("0-08:00"),
SMALL_GPU_12H: small_gpu("0-12:00"),
SMALL_GPU_16H: small_gpu("0-16:00"),
LARGE_GPU_1H: large_gpu("0-01:10"),
LARGE_GPU_2H: large_gpu("0-02:00"),
LARGE_GPU_2HALFH: large_gpu("0-02:30"),
LARGE_GPU_4H: large_gpu("0-04:00"),
LARGE_GPU_6H: large_gpu("0-06:00"),
LARGE_GPU_8H: large_gpu("0-08:00"),
LARGE_GPU_12H: large_gpu("0-12:00"),
LARGE_GPU_16H: large_gpu("0-16:00"),
LARGE_GPU_24H: large_gpu("0-24:00"),
LARGE_GPU_36H: large_gpu("1-12:00"),
LARGE_GPU_72H: large_gpu("3-00:00"),
}
class SlurmConfigIssue(ValueError):
pass
| 3,536 | 32.685714 | 66 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/expmaker/wandb_reporting.py
|
from explib.expmaker.experiment_defs import load_summary
from explib.expmaker.experiment_defs import make_wuuid
from explib.results.cleanup import process_tags
import explib.results.data as data_h
import pandas as pd
from functools import lru_cache
import json
def wuuid_to_successful_run(exp_name):
"""Returns a dictionary of uuid: boolean indicating whether that experiment
UUID has one succesful run on Wandb.
"""
df = load_summary_and_wandb_success(exp_name)
newdict = {}
for row in df.to_dict(orient="records"):
newdict[row["wuuid"]] = row["has_a_finished_run"]
return newdict
@lru_cache(maxsize=None, typed=False)
def load_summary_and_wandb_success(exp_name, download=True):
"""Returns the load_summary for exp_name augmented with a
"has_a_finished_run" column that checks for Wandb completion."""
local_summary = load_summary(exp_name)
local_df = pd.DataFrame(
[
{"uuid": k, "wuuid": make_wuuid(exp_dict), **data_h.flatten_dict(exp_dict)}
for k, exp_dict in local_summary.items()
]
)
if download:
data_h.download_summary()
wandb_df = data_h.get_summary(ignore_per_iter_data=True)
wandb_df["success"] = wandb_df["status"] == "Success"
wandb_df["finished"] = wandb_df["status"] == "Finished with no summary"
wandb_df["diverged"] = wandb_df["status"] == "Diverged"
wandb_df, _ = process_tags(wandb_df, None)
wandb_df["finished"] = (
wandb_df["success"] | wandb_df["diverged"] | wandb_df["finished"]
)
wandb_df["success"] = wandb_df["finished"] & ~wandb_df["bad_tag"]
wandb_success_df = (
wandb_df.groupby("wuuid")["success"]
.any()
.rename("has_a_finished_run")
.to_frame()
)
merged_df = pd.merge(
left=local_df,
left_on="wuuid",
right=wandb_success_df,
right_on="wuuid",
how="left",
)
merged_df["has_a_finished_run"] = merged_df["has_a_finished_run"].fillna(False)
return merged_df
def check_status(exp_name, hyperparam_names=None, download=False):
"""
Prints the % of runs that have finished on Wandb.
If hyperparam_names is a list of hyperparameter names in wandb format
(eg "opt.b1"), breaks it down per hyperparam.
"""
merged_df = load_summary_and_wandb_success(exp_name, download=download)
print(f"Total completion; {merged_df['has_a_finished_run'].mean()*100:.2f}%")
if hyperparam_names is not None:
for colname in hyperparam_names:
print(f" Completion for [{colname}]")
count = merged_df.groupby(colname)["has_a_finished_run"].count()
avg_finished = merged_df.groupby(colname)["has_a_finished_run"].mean()
for key in sorted(list(avg_finished.keys())):
print(
f" {key:>24} : {avg_finished[key]*100: >#04.2f}% (out of {count[key]})"
)
| 2,946 | 34.083333 | 94 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/expmaker/__init__.py
|
"""Helpers to create experiments."""
import argparse
import numpy as np
from explib import cli_helper, config, logging
from explib.expmaker import wandb_reporting
from explib.expmaker.experiment_defs import (
create_experiment_definitions,
exp_dict_from_str,
exp_dict_to_str,
get_exp_def_folder,
get_exp_full_path_json,
get_job_path,
get_jobs_folder,
load_summary,
)
from explib.expmaker.sbatch_writers import (
create_slurm_job_files,
create_slurm_jobarrays,
create_slurm_single_job_file,
)
BASE_PROBLEMS = {
"MNIST_LENET5": {
"loss_func": "logloss",
"dataset": "mnist",
"model": "lenet5",
},
"CIFAR10_RESNET18": {
"loss_func": "logloss",
"dataset": "cifar10",
"model": "resnet18",
},
"CIFAR10_RESNET34": {
"loss_func": "logloss",
"dataset": "cifar10",
"model": "resnet34",
},
"CIFAR10_RESNET50": {
"loss_func": "logloss",
"dataset": "cifar10",
"model": "resnet50",
},
"CIFAR100_RESNET50": {
"loss_func": "logloss",
"dataset": "cifar100",
"model": "resnet50",
},
"CIFAR100_RESNET34": {
"loss_func": "logloss",
"dataset": "cifar100",
"model": "resnet34",
},
"PTB_TRANSFORMERXL": {
"loss_func": "logloss",
"dataset": "ptb",
"model": "transformer_xl",
"model_args": {
"n_layer": 6,
"d_model": 512,
"n_head": 8,
"d_head": 64,
"d_inner": 2048,
"dropout": 0.1,
"dropatt": 0.0,
"tgt_len": 128,
"mem_len": 128,
},
},
"PTB_TRANSFORMERXL_DET": {
"loss_func": "logloss",
"dataset": "ptb",
"model": "transformer_xl_deterministic",
"model_args": {
"n_layer": 6,
"d_model": 512,
"n_head": 8,
"d_head": 64,
"d_inner": 2048,
"tgt_len": 128,
"mem_len": 128,
},
},
"WT2_TRANSFORMERXL": {
"loss_func": "logloss",
"dataset": "wikitext2",
"model": "transformer_xl",
"model_args": {
"n_layer": 6,
"d_model": 512,
"n_head": 8,
"d_head": 64,
"d_inner": 2048,
"dropout": 0.1,
"dropatt": 0.0,
"tgt_len": 128,
"mem_len": 128,
},
},
"WT2_TRANSFORMERXL_DET": {
"loss_func": "logloss",
"dataset": "wikitext2",
"model": "transformer_xl_deterministic",
"model_args": {
"n_layer": 6,
"d_model": 512,
"n_head": 8,
"d_head": 64,
"d_inner": 2048,
"tgt_len": 128,
"mem_len": 128,
},
},
"WT2_TENC": {
"loss_func": "logloss",
"dataset": "wikitext2",
"model": "transformer_encoder",
"model_args": {
"tgt_len": 35,
},
},
"PTB_TENC": {
"loss_func": "logloss",
"dataset": "ptb",
"model": "transformer_encoder",
"model_args": {
"tgt_len": 35,
},
},
"WT2_TENC_DET": {
"loss_func": "logloss",
"dataset": "wikitext2",
"model": "transformer_encoder_deterministic",
"model_args": {
"tgt_len": 35,
},
},
"PTB_TENC_DET": {
"loss_func": "logloss",
"dataset": "ptb",
"model": "transformer_encoder_deterministic",
"model_args": {
"tgt_len": 35,
},
},
"DB_SQD": {
"dataset": "squad",
"model": "distilbert_base_pretrained",
"model_args": {
"tgt_len": 384,
"doc_stride": 128,
},
},
}
PROB_MNIST_LENET5 = BASE_PROBLEMS["MNIST_LENET5"]
PROB_CIFAR10_RESNET18 = BASE_PROBLEMS["CIFAR10_RESNET18"]
PROB_CIFAR10_RESNET34 = BASE_PROBLEMS["CIFAR10_RESNET34"]
PROB_CIFAR10_RESNET50 = BASE_PROBLEMS["CIFAR10_RESNET50"]
PROB_CIFAR100_RESNET50 = BASE_PROBLEMS["CIFAR100_RESNET50"]
PROB_CIFAR100_RESNET34 = BASE_PROBLEMS["CIFAR100_RESNET34"]
PROB_PTB_TRANSFORMERXL = BASE_PROBLEMS["PTB_TRANSFORMERXL"]
PROB_WT2_TXL = BASE_PROBLEMS["WT2_TRANSFORMERXL"]
PROB_WT2_TXL_DET = BASE_PROBLEMS["WT2_TRANSFORMERXL_DET"]
PROB_WT2_TENC = BASE_PROBLEMS["WT2_TENC"]
PROB_PTB_TENC = BASE_PROBLEMS["PTB_TENC"]
PROB_PTB_TENC_DET = BASE_PROBLEMS["PTB_TENC_DET"]
PROB_DB_SQD = BASE_PROBLEMS["DB_SQD"]
def make_exp_dict_list_unique(experiments):
experiments_str = [exp_dict_to_str(exp_dict) for exp_dict in experiments]
experiments_str = list(set(experiments_str))
return [exp_dict_from_str(exp_str) for exp_str in experiments_str]
def nice_logspace(start, end, base, density):
"""Returns a log-spaced grid between ``base**start`` and ``base**end``.
Plays nicely with ``merge_grids``. Increasing the density repeats previous values.
- ``Start``, ``end`` and ``density`` are integers.
- Increasing ``density`` by 1 doubles the number of points.
- ``Density = 1`` will return ``end - start + 1`` points
- ``Density = 2`` will return ``2*(end-start) + 1`` points
- ``nice_logspace(0, 1, base=10, density=1) == [1, 10] == [10**0, 10**1]``
- ``nice_logspace(0, 1, base=10, density=2) == [1, 3.16..., 10] == [10**0, 10**(1/2), 10**1]``
"""
if density < 0 or not np.allclose(int(density), density):
raise ValueError(
f"nice_logspace: density needs to be an integer >= 0, got {density}."
)
if not np.allclose(int(start), start) or not np.allclose(int(end), end):
raise ValueError(
f"nice_logspace: start and end need to be integers, got {start, end}."
)
assert end > start
return np.logspace(start, end, base=base, num=(end - start) * (2**density) + 1)
def merge_grids(*grids):
"""Merge two lists of parameters.
Given lists [a,b,c], [c,d,e], returns [a,b,c,d,e]
"""
return sorted(list(set.union(*[set(grid) for grid in grids])))
def dict_update(x, y):
"""Non-mutable version of x.update(y)"""
z = x.copy()
z.update(y)
return z
def merge_dicts(*dicts):
"""Merge dictionary, preserves keys from the right-most dicts. Equivalent
to chaining ``x.update(y)``
Example:
.. code:: python
merge_dicts({"a":1, "b":2}, {"b":3, "c":4}) == {"a":1, "b":3, "c":4}
"""
z = dicts[0].copy()
for other_dict in dicts[1:]:
z.update(other_dict)
return z
def merge_sets(*many_lists):
"""Merge lists without duplicates."""
z = [x for x in many_lists[0]]
for other_list in many_lists[1:]:
z += [x for x in other_list]
return list(set(z))
def experiment_maker_cli(
exp_name,
descr,
experiments,
hyperparam_names=None,
as_one_job=False,
as_job_array=False,
):
"""Creates the experiment json files necessary to send individual jobs.
Will output the necessary files in
.. code:: bash
explib_workspace/
└─ exp_name/
├─ exp_defs/
│ ├─ ...
│ ├─ exp_name_uuid1.json
│ └─ exp_name_uuid2.json
├─ jobs/
│ ├─ main.sh
│ ├─ job_uuid1.sh
│ └─ job_uuid2.sh
└─ exp_name_summary.json
``exp_name`` needs to be filename friendly (to be safe, use ``[a-z]|[A-Z]|-|_``)
- If ``as_one_job``, outputs only one sbatch file to runs all jobs sequentially
- If ``as_job_array``, outputs one sbatch file to submit every job for slurm config in parallel
- By default, creates one sbatch file per experiment
``hyperparam_names`` is a list of hyperparameter names in wandb format
(eg ``"opt.name"``) for a better ``--report`` option.
Calling this file with
- No arguments: Generates the exp_defs and jobs as above.
- ``--report``: Queries wandb and prints a summary of which jobs have finished
(and have been checked by ``python -m explib.results checkup``)
If no ``hyperparam_names`` are given,
``--report`` outputs only the percentage of finished runs on wandb.
If ``hyperparam_names`` is given, gives a breakdown per hyper-parameter value.
- ``--unfinished``: Queries wandb; only generates jobs for unfinished runs
as logged on wandb (the missing runs from ``--report``)
"""
parser = argparse.ArgumentParser(description=descr)
parser.add_argument(
"--report",
action="store_true",
help="Generates a report on what experiments have been run/are stored on wandb",
default=False,
)
parser.add_argument(
"--report-short",
action="store_true",
help="Short reporting. Does not download summary, need to call python -m explib.results --summary first.",
default=False,
)
parser.add_argument(
"--unfinished",
action="store_true",
help="Only generate sbatch scripts for experiments that are not finished on wandb.",
default=False,
)
cli_helper.add_dotenv_option(parser)
args = parser.parse_args()
cli_helper.load_dotenv_if_required(args)
loglevel = (
"WARNING"
if args.report or args.report_short
else config.get_console_logging_level()
)
logging.init_logging_stdout(level=loglevel)
logging.info(f"Creating files for {exp_name} in {get_exp_def_folder(exp_name)}")
logging.info(f"Experiment description: {descr}")
create_experiment_definitions(exp_name, experiments)
if args.report:
wandb_reporting.check_status(exp_name, hyperparam_names)
elif args.report_short:
hyperparam_names = ["seed", "slurm_config"]
wandb_reporting.check_status(exp_name, hyperparam_names, download=False)
else:
filter_should_run_wuuid = lambda wuuid: True
if args.unfinished:
logging.info("Creating filter to not re-run already run experiments")
uuid_has_run = wandb_reporting.wuuid_to_successful_run(exp_name)
filter_should_run_wuuid = lambda wuuid: not uuid_has_run[wuuid]
if as_one_job:
create_slurm_single_job_file(exp_name, filter_should_run_wuuid)
elif as_job_array:
create_slurm_jobarrays(exp_name, filter_should_run_wuuid)
else:
create_slurm_job_files(exp_name, filter_should_run_wuuid)
| 10,448 | 29.642229 | 114 |
py
|
noise-sgd-adam-sign
|
noise-sgd-adam-sign-main/explib/explib/expmaker/experiment_defs.py
|
import base64
import hashlib
import json
import os
from pathlib import Path
from explib import config, logging
def exp_dict_from_str(exp_dict_str):
return json.loads(exp_dict_str)
def exp_dict_to_str(exp_dict, remove_keys=None):
"""String version of the experiment dictionary"""
if remove_keys is not None:
exp_dict_copy = json.loads(json.dumps(exp_dict))
for key in remove_keys:
if key in exp_dict_copy:
del exp_dict_copy[key]
return json.dumps(exp_dict_copy, sort_keys=True)
return json.dumps(exp_dict, sort_keys=True)
def _uuid_from_str(string):
bytes = string.encode("ascii")
hash = hashlib.sha256(bytes)
# Using base 32 as it is filesystem-safe for Unix and Windows
return base64.b32encode(hash.digest()).decode("ascii")
def make_wuuid(exp_dict):
"""Wandb UUID (ignores slurm_config for multi-cluster)"""
return _uuid_from_str(exp_dict_to_str(exp_dict, remove_keys=["slurm_config"]))
def make_uuid(exp_dict):
"""Wandb UUID derived from exp_dict"""
return _uuid_from_str(exp_dict_to_str(exp_dict))
def gen_uuid_to_exp_dicts(experiments):
"""Get a dictionary of uuids => experiment dictionary"""
return {make_uuid(exp_dict): exp_dict for exp_dict in experiments}
def get_exp_folder(exp_name):
folder = os.path.join(config.get_workspace(), exp_name)
Path(folder).mkdir(parents=True, exist_ok=True)
return folder
def get_exp_def_folder(exp_name):
folder = os.path.join(get_exp_folder(exp_name), "exp_defs")
Path(folder).mkdir(parents=True, exist_ok=True)
return folder
def get_jobs_folder(exp_name):
jobs_folder = os.path.join(get_exp_folder(exp_name), "jobs")
Path(jobs_folder).mkdir(parents=True, exist_ok=True)
return jobs_folder
def get_exp_full_path_json(exp_name, exp_dict):
uuid = make_uuid(exp_dict)
filename = f"{exp_name}_{uuid}.json"
return os.path.join(get_exp_def_folder(exp_name), filename)
def get_job_path(exp_name, exp_dict):
uuid = make_uuid(exp_dict)
filename = f"{exp_name}_{uuid}.sh"
return os.path.join(get_jobs_folder(exp_name), filename)
def create_experiment_definitions(exp_name, experiments):
experiment_folder = get_exp_def_folder(exp_name)
logging.info(f"Storing experiment files in {experiment_folder}")
uuid_to_expdicts = gen_uuid_to_exp_dicts(experiments)
for uuid, exp_dict in uuid_to_expdicts.items():
exp_filepath = get_exp_full_path_json(exp_name, exp_dict)
with open(exp_filepath, "w") as fp:
json.dump(exp_dict, fp)
logging.debug(f"Created {exp_filepath}")
summary_filepath = os.path.join(
get_exp_folder(exp_name), f"{exp_name}_summary.json"
)
with open(summary_filepath, "w") as fp:
json.dump(uuid_to_expdicts, fp, indent=4)
logging.info(f"Created {len(uuid_to_expdicts)} experiment files")
logging.info(f"Summary in {summary_filepath}")
def load_summary(exp_name):
summary_filepath = os.path.join(
get_exp_folder(exp_name), f"{exp_name}_summary.json"
)
with open(summary_filepath, "r") as fp:
summary = json.load(fp)
return summary
| 3,190 | 29.980583 | 82 |
py
|
pairwiseMKL
|
pairwiseMKL-master/main.py
|
import numpy as np
import copy
from math import sqrt
from sklearn import preprocessing, metrics
from pairwisemkl.learner.compute_M import *
from pairwisemkl.learner.compute_a_regression import *
from pairwisemkl.learner.optimize_kernel_weights import *
from pairwisemkl.learner.cg_kron_rls import CGKronRLS
data_path = './drug_response_data'
# Drug kernels
# Read file names of drug kernels
fn_kd = open(data_path + '/Drug_kernels/Drug_kernel_file_names.txt', 'r')
kd_file_names = fn_kd.readlines()
fn_kd.close()
kd_file_names = [x.split('\n')[0] for x in kd_file_names]
# Prepare a list of drug kernels
kd_list = []
for kd in kd_file_names:
f_kd = open(data_path + '/Drug_kernels/' + kd, 'r')
kd_list.append(np.loadtxt(f_kd))
f_kd.close()
# Cell line kernels
# Read file names of cell line kernels
fn_kc = open(data_path + '/Cell_line_kernels/Cell_kernel_file_names.txt', 'r')
kc_file_names = fn_kc.readlines()
fn_kc.close()
kc_file_names = [x.split('\n')[0] for x in kc_file_names]
kc_list = []
# Prepare a list of cell line kernels
for kc in kc_file_names:
f_kc = open(data_path + '/Cell_line_kernels/' + kc, 'r')
kc_list.append(np.loadtxt(f_kc))
f_kc.close()
# Number of pairwise kernels
P = len(kd_list)*len(kc_list)
# Generate pairwise kernel ids
kd_ids, kc_ids = np.unravel_index(np.arange(P), (len(kd_list),len(kc_list)), order = 'C')
# Compute matrix M needed to optimize pairwise kernel weights
M = compute_M(kd_list, kc_list)
# Labels
# Read matrix with drug responses in cancer cell lines
f = open(data_path + '/Labels.txt', 'r') # rows: drugs, columns: cell lines
Y = np.loadtxt(f)
f.close()
# Number of drugs and cell lines
n_d, n_c = Y.shape
# Labels in the vector form
y_vec = Y.ravel(order = 'C')
# Create indicies
ids = np.arange(n_d*n_c)
drug_ids, cell_ids = np.unravel_index(ids, (n_d,n_c), order = 'C')
# Remove missing values (if any) from the label vector as well as the corresponding ids
ids_known = ~np.isnan(y_vec)
y_vec_known = y_vec[ids_known]
drug_ids_known = drug_ids[ids_known]
cell_ids_known = cell_ids[ids_known]
# Values for the regularization parameter \lambda
# (to be optimized in the nested CV)
regparam = [10.**x for x in range(-5, 1)]
# CV (10 outer folds, 3 inner folds)
# Pairwise kernel weights from each outer fold
k_weights_outer = np.empty([10,P]); k_weights_outer[:] = np.NAN
# Selected value for the regularization parameter \lambda
model = np.empty([10,1]); model[:] = np.NAN
# Predicted drug responses
y_pred_outer_vec = np.empty([len(ids[ids_known])]); y_pred_outer_vec[:] = np.NAN
# Root mean squared error (RMSE)
rmse_outer = np.empty([10, 1]); rmse_outer[:] = np.NAN
# Pearson correlation
pearson_outer = np.empty([10, 1]); pearson_outer[:] = np.NAN
# F1 score
f1_outer = np.empty([10, 1]); f1_outer[:] = np.NAN
# Read pre-defined outer folds used in the experiments presented in pairwiseMKL paper
outer_folds = np.loadtxt(data_path + '/Folds/outer_folds.txt').astype(int)
# Outer CV loop
for i_out in range(10):
print('Outer loop ' + str(i_out+1) + '\n')
test_ids = np.array(np.where(outer_folds==i_out)).squeeze()
train_ids = np.array(np.where(outer_folds!=i_out)).squeeze()
# Test data
y_test = y_vec_known[test_ids]
drug_ids_test = drug_ids_known[test_ids]
cell_ids_test = cell_ids_known[test_ids]
# Training data
# Training labels in the vector form
y_train = y_vec_known[train_ids]
# Training labels in the matrix form
Y_train = copy.deepcopy(Y)
Y_train[drug_ids_test,cell_ids_test] = np.nan
drug_ids_train = drug_ids_known[train_ids]
cell_ids_train = cell_ids_known[train_ids]
# Stage 1 of determining pairwise kernel weights
# Compute vector a needed to optimize kernel weights
a = compute_a_regression(kd_list, kc_list, Y_train)
# Optimize pairwise kernel weights
k_weights_outer[i_out,:] = optimize_kernel_weights(a, M)
# Stage 2 of pairwise model training
# Find pairwise kernels with weights different from 0 (>10**-3).
ix = np.where(k_weights_outer[i_out,:] > 10**-3)[0]
# Corresponding kernel weights
w = k_weights_outer[i_out,ix]/sum(k_weights_outer[i_out,ix])
kd_list_selected = []
kc_list_selected = []
for i_p in range(len(w)):
kd_list_selected.append(kd_list[kd_ids[ix[i_p]]])
kc_list_selected.append(kc_list[kc_ids[ix[i_p]]])
# Inner CV loop
rmse_inner = np.empty([3, len(regparam)]); rmse_inner[:] = np.NAN
# Read pre-defined inner folds used in the experiments presented in pairwiseMKL paper
inner_folds = np.loadtxt(data_path + '/Folds/inner_folds_outer%d.txt'%i_out).astype(int)
for i_in in range(3):
print(' Inner loop ' + str(i_in+1))
inner_test_ids = np.array(np.where(inner_folds==i_in)).squeeze()
inner_train_ids = np.array(np.where((inner_folds!=i_in) & (inner_folds!=-1))).squeeze()
y_test_inner = y_vec_known[inner_test_ids]
drug_ids_test_inner = drug_ids_known[inner_test_ids]
cell_ids_test_inner = cell_ids_known[inner_test_ids]
y_train_inner = y_vec_known[inner_train_ids]
drug_ids_train_inner = drug_ids_known[inner_train_ids]
cell_ids_train_inner = cell_ids_known[inner_train_ids]
# Find optimal \lambda
for i_param in range(len(regparam)):
# Training
learner = CGKronRLS(K1 = kd_list_selected,
K2 = kc_list_selected,
weights = w.tolist(),
Y = y_train_inner,
label_row_inds = [drug_ids_train_inner for i in range(len(w))],
label_col_inds = [cell_ids_train_inner for i in range(len(w))],
regparam = regparam[i_param],
maxiter = 400)
# Prediction
pred_inner = learner.predict(kd_list_selected, kc_list_selected, [drug_ids_test_inner for i in range(len(w))], [cell_ids_test_inner for i in range(len(w))])
# RMSE
rmse_inner[i_in,i_param] = sqrt(((y_test_inner - pred_inner) ** 2).mean(axis=0))
# \lambda with the lowest RMSE
model[i_out,0] = regparam[np.argmin(np.mean(rmse_inner, axis=0))]
# Model training with selected \lambda
learner = CGKronRLS(K1 = kd_list_selected,
K2 = kc_list_selected,
weights = w.tolist(),
Y = y_train,
label_row_inds = [drug_ids_train for i in range(len(w))],
label_col_inds = [cell_ids_train for i in range(len(w))],
regparam = model[i_out,0],
maxiter = 400)
# Prediction
y_pred_outer_vec[test_ids] = learner.predict(kd_list_selected, kc_list_selected, [drug_ids_test for i in range(len(w))], [cell_ids_test for i in range(len(w))])
# RMSE
rmse_outer[i_out] = sqrt(((y_test - y_pred_outer_vec[test_ids]) ** 2).mean(axis=0))
# Pearson correlation
pearson_outer[i_out] = np.corrcoef(y_test, y_pred_outer_vec[test_ids])[0,1]
# F1 score
y_test_binary = copy.deepcopy(y_test)
y_test_binary = preprocessing.binarize(y_test_binary.reshape(1,-1), threshold=5, copy=False)[0]
y_pred_binary = copy.deepcopy(y_pred_outer_vec[test_ids])
y_pred_binary = preprocessing.binarize(y_pred_binary.reshape(1,-1), threshold=5, copy=False)[0]
f1_outer[i_out] = metrics.f1_score(y_test_binary, y_pred_binary)
# Predicted drug responses
np.savetxt(data_path + '/y_pred_vec.txt', y_pred_outer_vec, delimiter='\t')
# In the below files, each row corresponds to the result from a single outer CV fold
# RMSE
np.savetxt(data_path + '/RMSE.txt', rmse_outer, delimiter='\t')
# Pearson correlation
np.savetxt(data_path + '/Pearson_correlation.txt', pearson_outer, delimiter='\t')
# F1 score
np.savetxt(data_path + '/F1_score.txt', f1_outer, delimiter='\t')
# Optimal value of the regularization parameter \lambda
np.savetxt(data_path + '/selected_lambda.txt', model, delimiter='\t')
# Pairwise kernel weights
np.savetxt(data_path + '/pairwise_kernel_weights.txt', k_weights_outer, delimiter='\t')
# File names of the corresponding pairwise kernels, in the same order as in the file
# with pairwise kernel weights
thefile = open(data_path + '/pairwise_kernel_names.txt', 'w')
for i in range(P):
thefile.write("%s_KRONECKER_%s\t" %(kd_file_names[kd_ids[i]], kc_file_names[kc_ids[i]]))
thefile.close()
print('\nSuccess!')
| 8,753 | 37.563877 | 168 |
py
|
pairwiseMKL
|
pairwiseMKL-master/main_precalculate_M_arrayjob.py
|
from sys import argv, exit
import os
import numpy as np
from pairwisemkl.learner.compute_M__arrayjob import *
try:
id_in = int(argv[1])
except:
exit()
data_path = './drug_response_data'
# Drug kernels
# Read file names of drug kernels
fn_kd = open(data_path + '/Drug_kernels/Drug_kernel_file_names.txt', 'r')
kd_file_names = fn_kd.readlines()
fn_kd.close()
kd_file_names = [x.split('\n')[0] for x in kd_file_names]
# Prepare a list of drug kernels
kd_list = []
for kd in kd_file_names:
f_kd = open(data_path + '/Drug_kernels/' + kd, 'r')
kd_list.append(np.loadtxt(f_kd))
f_kd.close()
# Cell line kernels
# Read file names of cell line kernels
fn_kc = open(data_path + '/Cell_line_kernels/Cell_kernel_file_names.txt', 'r')
kc_file_names = fn_kc.readlines()
fn_kc.close()
kc_file_names = [x.split('\n')[0] for x in kc_file_names]
kc_list = []
# Prepare a list of cell line kernels
for kc in kc_file_names:
f_kc = open(data_path + '/Cell_line_kernels/' + kc, 'r')
kc_list.append(np.loadtxt(f_kc))
f_kc.close()
# Compute a single row of the matrix M (indexed by an integer id_in)
# Matrix M is needed to optimize pairwise kernel weights
m = compute_M_row(kd_list, kc_list, id_in)
new_path = data_path + "/M"
if not os.path.exists(new_path):
os.makedirs(new_path)
np.savetxt(new_path + '/M__row_'+str(id_in)+'.txt', m, delimiter='\t')
print('\nSuccess!')
| 1,409 | 25.111111 | 78 |
py
|
pairwiseMKL
|
pairwiseMKL-master/setup.py
|
from setuptools import setup, find_packages
from setuptools.extension import Extension
import numpy as np
USE_CYTHON = False
ext = '.pyx' if USE_CYTHON else '.c'
#sys.argv[1:] = ['build_ext', '--inplace']
ext_modules = [
Extension("pairwisemkl.utilities._sampled_kronecker_products",["pairwisemkl/utilities/_sampled_kronecker_products"+ext])
]
if USE_CYTHON:
from Cython.Build import cythonize
ext_modules = cythonize(ext_modules)
setup(
name = 'pairwisemkl',
description = 'pairwiseMKL package',
url = "https://github.com/aalto-ics-kepaco/pairwiseMKL",
version = "0.1",
license = "MIT",
include_dirs = [np.get_include()],
ext_modules = ext_modules,
packages = find_packages(),
)
| 735 | 23.533333 | 124 |
py
|
pairwiseMKL
|
pairwiseMKL-master/main_arrayjob_using_precalculated_M.py
|
from sys import argv, exit
import os
import numpy as np
import copy
from math import sqrt
from sklearn import preprocessing, metrics
from pairwisemkl.learner.compute_a_regression import *
from pairwisemkl.learner.optimize_kernel_weights import *
from pairwisemkl.learner.cg_kron_rls import CGKronRLS
try:
i_out = int(argv[1])
except:
exit()
data_path = './drug_response_data'
new_path = data_path + "/ArrayJob_results"
if i_out==0 and not os.path.exists(new_path):
os.makedirs(new_path)
# Drug kernels
# Read file names of drug kernels
fn_kd = open(data_path + '/Drug_kernels/Drug_kernel_file_names.txt', 'r')
kd_file_names = fn_kd.readlines()
fn_kd.close()
kd_file_names = [x.split('\n')[0] for x in kd_file_names]
# Prepare a list of drug kernels
kd_list = []
for kd in kd_file_names:
f_kd = open(data_path + '/Drug_kernels/' + kd, 'r')
kd_list.append(np.loadtxt(f_kd))
f_kd.close()
# Cell line kernels
# Read file names of cell line kernels
fn_kc = open(data_path + '/Cell_line_kernels/Cell_kernel_file_names.txt', 'r')
kc_file_names = fn_kc.readlines()
fn_kc.close()
kc_file_names = [x.split('\n')[0] for x in kc_file_names]
kc_list = []
# Prepare a list of cell line kernels
for kc in kc_file_names:
f_kc = open(data_path + '/Cell_line_kernels/' + kc, 'r')
kc_list.append(np.loadtxt(f_kc))
f_kc.close()
# Number of pairwise kernels
P = len(kd_list)*len(kc_list)
# Generate pairwise kernel ids
kd_ids, kc_ids = np.unravel_index(np.arange(P), (len(kd_list),len(kc_list)), order = 'C')
# Assemble matrix M precomputed using array jobs
M = np.empty([P,P]); M[:] = np.NAN
for row in range(P):
f_m = open(data_path + '/M/M__row_' + str(row) + '.txt')
m = np.loadtxt(f_m)
f_m.close()
M[row, row:P] = m[row:P]
M[row:P, row] = m[row:P]
# Labels
# Read matrix with drug responses in cancer cell lines
f = open(data_path + '/Labels.txt', 'r') # rows: drugs, columns: cell lines
Y = np.loadtxt(f)
f.close()
# Number of drugs and cell lines
n_d, n_c = Y.shape
# Labels in the vector form
y_vec = Y.ravel(order = 'C')
# Create indicies
ids = np.arange(n_d*n_c)
drug_ids, cell_ids = np.unravel_index(ids, (n_d,n_c), order = 'C')
# Remove missing values (if any) from the label vector as well as the corresponding ids
ids_known = ~np.isnan(y_vec)
y_vec_known = y_vec[ids_known]
drug_ids_known = drug_ids[ids_known]
cell_ids_known = cell_ids[ids_known]
# Values for the regularization parameter \lambda
# (to be optimized in the nested CV)
regparam = [10.**x for x in range(-5, 1)]
# CV (10 outer folds, 3 inner folds)
# Each outer CV loop is run as a separate array job
# Vector where predicted drug responses will be stored
y_pred_outer_vec = np.empty([len(ids[ids_known])]); y_pred_outer_vec[:] = np.NAN
# Read pre-defined outer folds used in the experiments presented in the pairwiseMKL paper
outer_folds = np.loadtxt(data_path + '/Folds/outer_folds.txt').astype(int)
# Outer CV loop
print('Outer loop ' + str(i_out+1) + '\n')
test_ids = np.array(np.where(outer_folds==i_out)).squeeze()
train_ids = np.array(np.where(outer_folds!=i_out)).squeeze()
# Test data
y_test = y_vec_known[test_ids]
drug_ids_test = drug_ids_known[test_ids]
cell_ids_test = cell_ids_known[test_ids]
# Training data
# Training labels in the vector form
y_train = y_vec_known[train_ids]
# Training labels in the matrix form
Y_train = copy.deepcopy(Y)
Y_train[drug_ids_test,cell_ids_test] = np.nan
drug_ids_train = drug_ids_known[train_ids]
cell_ids_train = cell_ids_known[train_ids]
# Stage 1 of determining pairwise kernel weights
# Compute vector a needed to optimize kernel weights
a = compute_a_regression(kd_list, kc_list, Y_train)
# Optimize pairwise kernel weights
k_weights_outer = optimize_kernel_weights(a, M)
# Stage 2 of pairwise model training
# Find pairwise kernels with weights different from 0 (>10**-3).
ix = np.where(k_weights_outer[0,:] > 10**-3)[0]
# Corresponding kernel weights
w = k_weights_outer[0,ix]/sum(k_weights_outer[0,ix])
kd_list_selected = []
kc_list_selected = []
for i_p in range(len(w)):
kd_list_selected.append(kd_list[kd_ids[ix[i_p]]])
kc_list_selected.append(kc_list[kc_ids[ix[i_p]]])
# Inner CV loop
rmse_inner = np.empty([3, len(regparam)]); rmse_inner[:] = np.NAN
# Read pre-defined inner folds used in the experiments presented in pairwiseMKL paper
inner_folds = np.loadtxt(data_path + '/Folds/inner_folds_outer%d.txt'%i_out).astype(int)
for i_in in range(3):
print(' Inner loop ' + str(i_in+1))
inner_test_ids = np.array(np.where(inner_folds==i_in)).squeeze()
inner_train_ids = np.array(np.where((inner_folds!=i_in) & (inner_folds!=-1))).squeeze()
y_test_inner = y_vec_known[inner_test_ids]
drug_ids_test_inner = drug_ids_known[inner_test_ids]
cell_ids_test_inner = cell_ids_known[inner_test_ids]
y_train_inner = y_vec_known[inner_train_ids]
drug_ids_train_inner = drug_ids_known[inner_train_ids]
cell_ids_train_inner = cell_ids_known[inner_train_ids]
# Find optimal \lambda
for i_param in range(len(regparam)):
# Training
learner = CGKronRLS(K1 = kd_list_selected,
K2 = kc_list_selected,
weights = w.tolist(),
Y = y_train_inner,
label_row_inds = [drug_ids_train_inner for i in range(len(w))],
label_col_inds = [cell_ids_train_inner for i in range(len(w))],
regparam = regparam[i_param],
maxiter = 400)
# Prediction
pred_inner = learner.predict(kd_list_selected, kc_list_selected, [drug_ids_test_inner for i in range(len(w))], [cell_ids_test_inner for i in range(len(w))])
# RMSE
rmse_inner[i_in,i_param] = sqrt(((y_test_inner - pred_inner) ** 2).mean(axis=0))
# \lambda with the lowest RMSE
model = regparam[np.argmin(np.mean(rmse_inner, axis=0))]
# Model training with selected \lambda
learner = CGKronRLS(K1 = kd_list_selected,
K2 = kc_list_selected,
weights = w.tolist(),
Y = y_train,
label_row_inds = [drug_ids_train for i in range(len(w))],
label_col_inds = [cell_ids_train for i in range(len(w))],
regparam = model,
maxiter = 400)
# Prediction
y_pred_outer_vec[test_ids] = learner.predict(kd_list_selected, kc_list_selected, [drug_ids_test for i in range(len(w))], [cell_ids_test for i in range(len(w))])
# RMSE
rmse_outer = sqrt(((y_test - y_pred_outer_vec[test_ids]) ** 2).mean(axis=0))
# Pearson correlation
pearson_outer = np.corrcoef(y_test, y_pred_outer_vec[test_ids])[0,1]
# F1 score
y_test_binary = copy.deepcopy(y_test)
y_test_binary = preprocessing.binarize(y_test_binary.reshape(1,-1), threshold=5, copy=False)[0]
y_pred_binary = copy.deepcopy(y_pred_outer_vec[test_ids])
y_pred_binary = preprocessing.binarize(y_pred_binary.reshape(1,-1), threshold=5, copy=False)[0]
f1_outer = metrics.f1_score(y_test_binary, y_pred_binary)
# Predicted drug responses
np.savetxt(new_path + '/y_pred_vec__fold_' + str(i_out+1) + '.txt', y_pred_outer_vec, delimiter='\t')
# Test ids
np.savetxt(new_path + '/test_ids__fold_' + str(i_out+1) + '.txt', test_ids, delimiter='\t')
# In the below files, each row corresponds to the result from a single outer CV fold
# RMSE
np.savetxt(new_path + '/RMSE__fold_' + str(i_out+1) + '.txt', np.asarray([rmse_outer]))
# Pearson correlation
np.savetxt(new_path + '/Pearson_correlation__fold_' + str(i_out+1) + '.txt', np.asarray([pearson_outer]))
# F1 score
np.savetxt(new_path + '/F1_score__fold_' + str(i_out+1) + '.txt', np.asarray([f1_outer]))
# Optimal value of the regularization parameter \lambda
np.savetxt(new_path + '/selected_lambda__fold_' + str(i_out+1) + '.txt', np.asarray([model]))
# Pairwise kernel weights
np.savetxt(new_path + '/pairwise_kernel_weights__fold_' + str(i_out+1) + '.txt', k_weights_outer, delimiter='\t')
# Vector a
np.savetxt(new_path +'/a__fold_' + str(i_out+1) + '.txt', a, delimiter='\t')
# File names of the corresponding pairwise kernels, in the same order as in the file
# with pairwise kernel weights
if i_out==0:
thefile = open(new_path + '/pairwise_kernel_names.txt', 'w')
for i in range(P):
thefile.write("%s_KRONECKER_%s\t" %(kd_file_names[kd_ids[i]], kc_file_names[kc_ids[i]]))
thefile.close()
print('\nSuccess!')
| 8,644 | 34.871369 | 164 |
py
|
pairwiseMKL
|
pairwiseMKL-master/cython_setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_modules = [
Extension("pairwisemkl.utilities._sampled_kronecker_products",["pairwisemkl/utilities/_sampled_kronecker_products.pyx"], include_dirs=[np.get_include()])
]
setup(
name = 'cmodules',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules,
)
| 424 | 25.5625 | 157 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/predictor/pairwise_predictor.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL and RLScore
#
# Copyright (c) 2018 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from pairwisemkl.utilities import sampled_kronecker_products
class PairwisePredictorInterface(object):
"""Computes predictions for test examples.
Parameters
----------
X1 : {array-like}, shape = [n_samples1, n_features1]
first test data matrix
X2 : {array-like}, shape = [n_samples2, n_features2]
second test data matrix
row_inds_X1pred : array of indices, optional
rows of X1, for which predictions are needed
row_inds_X2pred : array of indices, optional
rows of X2, for which predictions are needed
Note
----
If using kernels, give kernel matrices K1 and K2 as arguments instead of X1 and X2
"""
def predict(self, X1, X2, row_inds_X1pred = None, row_inds_X2pred = None):
return self.predictor.predict(X1, X2, row_inds_X1pred, row_inds_X2pred)
class KernelPairwisePredictor(object):
"""Pairwise kernel predictor
Parameters
----------
A : {array-like}, shape = [n_train_pairs]
dual coefficients
row_inds_K1training : list of indices, shape = [n_train_pairs], optional
maps dual coefficients to rows of K1, not needed if learning from complete data (i.e. n_train_pairs = n_samples1*n_samples2)
row_inds_K2training : list of indices, shape = [n_train_pairs], optional
maps dual coefficients to rows of K2, not needed if learning from complete data (i.e. n_train_pairs = n_samples1*n_samples2)
weights : {list, tuple, array-like}, shape = [n_kernels], optional
weights used by multiple pairwise kernel predictors
Attributes
----------
A : {array-like}, shape = [n_train_pairs]
dual coefficients
row_inds_K1training : list of indices, shape = [n_train_pairs] or None
maps dual coefficients to rows of K1, not needed if learning from complete data (i.e. n_train_pairs = n_samples1*n_samples2)
row_inds_K2training : list of indices, shape = [n_train_pairs] or None
maps dual coefficients to rows of K2, not needed if learning from complete data (i.e. n_train_pairs = n_samples1*n_samples2)
weights : {list, tuple, array-like}, shape = [n_kernels], optional
weights used by multiple pairwise kernel predictors
"""
def __init__(self, A, row_inds_K1training = None, row_inds_K2training = None, weights = None):
self.A = A
self.row_inds_K1training, self.row_inds_K2training = row_inds_K1training, row_inds_K2training
if weights is not None: self.weights = weights
def predict(self, K1pred, K2pred, row_inds_K1pred = None, row_inds_K2pred = None):
"""Computes predictions for test examples.
Parameters
----------
K1pred : {array-like, list of equally shaped array-likes}, shape = [n_samples1, n_train_pairs]
the first part of the test data matrix
K2pred : {array-like, list of equally shaped array-likes}, shape = [n_samples2, n_train_pairs]
the second part of the test data matrix
row_inds_K1pred : list of indices, shape = [n_test_pairs], optional
maps rows of K1pred to vector of predictions P. If not supplied, predictions are computed for all possible test pair combinations.
row_inds_K2pred : list of indices, shape = [n_test_pairs], optional
maps rows of K2pred to vector of predictions P. If not supplied, predictions are computed for all possible test pair combinations.
Returns
----------
P : array, shape = [n_test_pairs] or [n_samples1*n_samples2]
predictions, either ordered according to the supplied row indices, or if no such are supplied by default
prediction for (K1[i], K2[j]) maps to P[i + j*n_samples1].
"""
def inner_predict(K1pred, K2pred, row_inds_K1training, row_inds_K2training, row_inds_K1pred = None, row_inds_K2pred = None):
if len(K1pred.shape) == 1:
K1pred = K1pred.reshape(1, K1pred.shape[0])
if len(K2pred.shape) == 1:
K2pred = K2pred.reshape(1, K2pred.shape[0])
if row_inds_K1pred is not None:
row_inds_K1pred = np.array(row_inds_K1pred, dtype = np.int32)
row_inds_K2pred = np.array(row_inds_K2pred, dtype = np.int32)
P = sampled_kronecker_products.sampled_vec_trick(
self.A,
K2pred,
K1pred,
row_inds_K2pred,
row_inds_K1pred,
row_inds_K2training,
row_inds_K1training)
else:
P = sampled_kronecker_products.sampled_vec_trick(
self.A,
K2pred,
K1pred,
None,
None,
row_inds_K2training,
row_inds_K1training)
#P = P.reshape((K1pred.shape[0], K2pred.shape[0]), order = 'F')
P = np.array(P)
return P
if isinstance(K1pred, (list, tuple)):
P = None
for i in range(len(K1pred)):
K1i = K1pred[i]
K2i = K2pred[i]
inds1training = self.row_inds_K1training[i]
inds2training = self.row_inds_K2training[i]
if row_inds_K1pred is not None:
inds1pred = row_inds_K1pred[i]
inds2pred = row_inds_K2pred[i]
Pi = inner_predict(K1i, K2i, inds1training, inds2training, inds1pred, inds2pred)
else:
Pi = inner_predict(K1i, K2i, inds1training, inds2training, None, None)
if P is None: P = self.weights[i] * Pi
else: P = P + self.weights[i] * Pi
return P
else:
return inner_predict(K1pred, K2pred, self.row_inds_K1training, self.row_inds_K2training, row_inds_K1pred, row_inds_K2pred)
class LinearPairwisePredictor(object):
"""Linear pairwise predictor.
Parameters
----------
W : {array-like}, shape = [n_features1, n_features2]
primal coefficients for the Kronecker product features
Attributes
----------
W : {array-like}, shape = [n_features1, n_features2]
primal coefficients for the Kronecker product features
"""
def __init__(self, W):
self.W = W
def predict(self, X1pred, X2pred, row_inds_X1pred = None, row_inds_X2pred = None):
"""Computes predictions for test examples.
Parameters
----------
X1pred : array-like, shape = [n_samples1, n_features1]
the first part of the test data matrix
X2pred : array-like, shape = [n_samples2, n_features2]
the second part of the test data matrix
row_inds_X1pred : list of indices, shape = [n_test_pairs], optional
maps rows of X1pred to vector of predictions P. If not supplied, predictions are computed for all possible test pair combinations.
row_inds_X2pred : list of indices, shape = [n_test_pairs], optional
maps rows of X2pred to vector of predictions P. If not supplied, predictions are computed for all possible test pair combinations.
Returns
----------
P : array, shape = [n_test_pairs] or [n_samples1*n_samples2]
predictions, either ordered according to the supplied row indices, or if no such are supplied by default
prediction for (X1[i], X2[j]) maps to P[i + j*n_samples1].
"""
if len(X1pred.shape) == 1:
if self.W.shape[0] > 1:
X1pred = X1pred[np.newaxis, ...]
else:
X1pred = X1pred[..., np.newaxis]
if len(X2pred.shape) == 1:
if self.W.shape[1] > 1:
X2pred = X2pred[np.newaxis, ...]
else:
X2pred = X2pred[..., np.newaxis]
if row_inds_X1pred is None:
P = np.dot(np.dot(X1pred, self.W), X2pred.T)
else:
P = sampled_kronecker_products.sampled_vec_trick(
self.W.reshape((self.W.shape[0] * self.W.shape[1]), order = 'F'),
X2pred,
X1pred,
np.array(row_inds_X2pred, dtype = np.int32),
np.array(row_inds_X1pred, dtype = np.int32),
None,
None)
return P.ravel(order = 'F')
| 9,761 | 43.575342 | 142 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/utilities/array_tools.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL and RLScore
#
# Copyright (c) 2018 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from scipy import sparse as sp
def as_2d_array(A, allow_sparse = False):
#Interprets the input as 2d-array
if allow_sparse and sp.issparse(A):
s = np.sum(A.data)
if s == np.inf or s == -np.inf:
raise ValueError("Sparse matrix contains infinity")
elif np.isnan(s):
raise ValueError("Sparse matrix contains NaN")
return A
if not allow_sparse and sp.issparse(A):
A = A.todense()
A = np.array(A, copy = False)
shape = A.shape
if not np.issubdtype(A.dtype, int) and not np.issubdtype(A.dtype, float):
raise ValueError("Argument array contains non-numerical data")
if not len(shape) < 3:
raise ValueError("Argument array of incorrect shape: expected 1D or 2D array, got %d dimensions" %len(shape))
s = np.sum(A)
if s == np.inf or s == -np.inf:
raise ValueError("Array contains infinity")
elif np.isnan(s):
raise ValueError("Array contains NaN")
if len(A.shape) == 1:
A = A.reshape((A.shape[0], 1))
elif len(A.shape) == 0:
A = A.reshape((1,1))
return A
def as_index_list(I, maxind):
I = np.array(I, dtype=np.long, copy=False)
if len(I.shape) != 1:
raise ValueError("Index list should be one dimensional")
if len(I) == 0:
raise ValueError("Index list cannot be empty")
minval = np.min(I)
maxval = np.max(I)
if minval < 0 or maxval >= maxind:
raise IndexError("Index outside allowed range %d ... %d" %(0, maxind-1))
return I
def as_dense_matrix(A):
"""Returns the input as matrix
Parameters
----------
A: {array-like, sparse matrix}, shape = 2D
Returns
-------
A : np.matrix
"""
if sp.issparse(A):
return A.todense()
else:
return np.mat(A)
def as_matrix(A):
"""Returns the input as matrix or sparse matrix
Parameters
----------
A: {array-like, sparse matrix}, shape = 2D
Returns
-------
A : {matrix, sparse matrix}
"""
if sp.issparse(A):
return A
else:
return np.mat(A)
def as_array(A):
"""Returns the input as dense array
Parameters
----------
A: {array-like, sparse matrix}, shape = 2D
Returns
-------
A : {array}
"""
if sp.issparse(A):
A = A.todense()
return np.asarray(A)
def spmat_resize(A, fdim):
"""Resizes the number of columns in sparse matrix to fdim, either removing or adding columns.
Parameters
----------
A: sparse matrix, size = [n_rows, n_cols]
Returns
-------
A : csr_matrix, size = [n_rows, fdim]
"""
if fdim < A.shape[1]:
#Row slicing is efficient only for csr_matrix
A = sp.csc_matrix(A)[:,:fdim]
elif fdim > A.shape[1]:
diff = fdim - A.shape[1]
A = sp.hstack([A,sp.lil_matrix((A.shape[0],diff), dtype=np.float64)])
A = sp.csr_matrix(A)
return A
| 4,189 | 29.143885 | 117 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/utilities/sampled_kronecker_products.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL and RLScore
#
# Copyright (c) 2018 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from . import _sampled_kronecker_products
def _compute_subset_of_matprod_entries(*args):
_sampled_kronecker_products.compute_subset_of_matprod_entries(*args)
def _sparse_mat_from_left(*args):
_sampled_kronecker_products.sparse_mat_from_left(*args)
def _sparse_mat_from_right(*args):
_sampled_kronecker_products.sparse_mat_from_right(*args)
# u <- R * (M x N) * C * v
def sampled_vec_trick(v, M, N, row_inds_M = None, row_inds_N = None, col_inds_M = None, col_inds_N = None):
assert len(v.shape) == 1
if len(M.shape) == 1:
M = M[..., np.newaxis]
rc_m, cc_m = M.shape
if len(N.shape) == 1:
N = N[..., np.newaxis]
rc_n, cc_n = N.shape
if row_inds_N is None:
u_len = rc_m * rc_n
#row_inds_N, row_inds_M = np.unravel_index(np.arange(rc_m * rc_n), (rc_n, rc_m), order = 'F')
#Not sure why the next row is necessary
#row_inds_N, row_inds_M = np.array(row_inds_N, dtype = np.int32), np.array(row_inds_M, dtype = np.int32)
else:
u_len = len(row_inds_N)
assert len(row_inds_N) == len(row_inds_M)
assert np.min(row_inds_N) >= 0
assert np.min(row_inds_M) >= 0
assert np.max(row_inds_N) < rc_n
assert np.max(row_inds_M) < rc_m
if col_inds_N is None:
v_len = cc_m * cc_n
#col_inds_N, col_inds_M = np.unravel_index(np.arange(cc_m * cc_n), (cc_n, cc_m), order = 'F')
#Not sure why the next row is necessary
#col_inds_N, col_inds_M = np.array(col_inds_N, dtype = np.int32), np.array(col_inds_M, dtype = np.int32)
else:
v_len = len(col_inds_N)
assert len(col_inds_N) == len(col_inds_M)
assert len(col_inds_N) == v.shape[0]
assert np.min(col_inds_N) >= 0
assert np.min(col_inds_M) >= 0
assert np.max(col_inds_N) < cc_n
assert np.max(col_inds_M) < cc_m
if rc_m * v_len + cc_n * u_len < rc_n * v_len + cc_m * u_len:
if col_inds_N is None:
temp = np.dot(v.reshape((cc_n, cc_m), order = 'F'), M.T)
else:
temp = np.zeros((cc_n, rc_m), order='C')
M = np.array(M, order = 'FORTRAN')
_sampled_kronecker_products.sparse_mat_from_left(temp, v, M.T, col_inds_N, col_inds_M, v_len, rc_m)
if row_inds_N is None:
x_after = np.dot(N, temp)
x_after = x_after.reshape((u_len,), order = 'F')
else:
temp = np.array(temp, order = 'FORTRAN')
N = np.array(N, order = 'C')
x_after = np.zeros((u_len))
_sampled_kronecker_products.compute_subset_of_matprod_entries(x_after, N, temp, row_inds_N, row_inds_M, u_len, cc_n)
else:
if col_inds_N is None:
temp = np.dot(N, v.reshape((cc_n, cc_m), order = 'F'))
else:
temp = np.zeros((rc_n, cc_m), order = 'FORTRAN')
N = np.array(N, order = 'FORTRAN')
_sampled_kronecker_products.sparse_mat_from_right(temp, N, v, col_inds_N, col_inds_M, v_len, rc_n)
if row_inds_N is None:
x_after = np.dot(temp, M.T)
x_after = x_after.reshape((u_len,), order = 'F')
else:
temp = np.array(temp, order = 'C')
M = np.array(M, order = 'C')
x_after = np.zeros((u_len))
_sampled_kronecker_products.compute_subset_of_matprod_entries(x_after, temp, M.T, row_inds_N, row_inds_M, u_len, cc_m)
return x_after
def _x_gets_A_kron_B_times_sparse_v(v, M, N, row_inds, col_inds): #MVN=(N.T x M)v
if len(M.shape) == 1:
M = M[..., np.newaxis]
rc_m, cc_m = M.shape
if len(N.shape) == 1:
N = N[..., np.newaxis]
rc_n, cc_n = N.shape
nzc_v = len(row_inds)
len_c = rc_m * cc_n
if rc_m * cc_m * cc_n + cc_n * nzc_v < rc_n * cc_m * cc_n + cc_m * nzc_v:
temp = np.zeros((cc_m, cc_n))
_sampled_kronecker_products.sparse_mat_from_left(temp, v, N, row_inds, col_inds, nzc_v, cc_n)
temp = np.dot(M, temp)
return temp.reshape((len_c,), order = 'F')
else:
temp = np.zeros((rc_m, rc_n))
_sampled_kronecker_products.sparse_mat_from_right(temp, M, v, row_inds, col_inds, nzc_v, rc_m)
temp = np.dot(temp, N)
return temp.reshape((len_c,), order = 'F')
def _x_gets_subset_of_A_kron_B_times_v(v, M, N, row_inds, col_inds):
if len(M.shape) == 1:
M = M[..., np.newaxis]
rc_m, cc_m = M.shape
if len(N.shape) == 1:
N = N[..., np.newaxis]
rc_n, cc_n = N.shape
nzc_x = len(row_inds)
x_after = np.zeros(nzc_x)
temp = v.reshape((cc_m, rc_n), order = 'F')
if rc_m * cc_m * cc_n + cc_n * nzc_x < rc_n * cc_m * cc_n + cc_m * nzc_x:
temp = np.dot(M, temp)
_sampled_kronecker_products.compute_subset_of_matprod_entries(x_after, temp, N, row_inds, col_inds, nzc_x, rc_n)
return x_after
else:
temp = np.dot(temp, N)
_sampled_kronecker_products.compute_subset_of_matprod_entries(x_after, M, temp, row_inds, col_inds, nzc_x, cc_m)
return x_after
| 6,300 | 39.651613 | 130 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/compute_M.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL
#
# Copyright (c) 2018 Anna Cichonska
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import copy
from pairwisemkl.learner.kron_decomp import kron_decomp_centralization_operator
def compute_M(Ka_list, Kb_list):
"""
Task: to compute matrix 'M' needed for optimizing pairwise kernel weights
(equation 12 of the paper describing pairwiseMKL method)
Input: Ka_list List of drug (view A in general) kernel matrices
Kb_list List of cell line (view B in general) kernel matrices
Output: M Matrix storing Frobenius inner products between all
pairs of centered input pairwise kernels
References:
[1] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
# To compute the factors of the pairwise kernel centering operator
Q = kron_decomp_centralization_operator(Ka_list[0].shape[0], Kb_list[0].shape[0])
# Total number of pairwise kernels
p = len(Ka_list)*len(Kb_list)
M = np.empty([p,p]); M[:] = np.NAN
ids_kernels = np.arange(p)
Ka_ids, Kb_ids = np.unravel_index(ids_kernels, (len(Ka_list),len(Kb_list)), order = 'C')
# Calculate elements of the matrix M
for i_pairwise_k in range(p):
i = Ka_ids[i_pairwise_k]
j = Kb_ids[i_pairwise_k]
h_col_start = i_pairwise_k+1
h_col_temp = copy.deepcopy(h_col_start)
h = 0
for ii in Ka_ids[h_col_start:p]:
jj = Kb_ids[h_col_start:p][h]
h = h+1
# Compute < K_k, K_l>_F
M[i_pairwise_k, h_col_temp] = calculate_element(Q, Ka_list[i], Ka_list[ii], Kb_list[j], Kb_list[jj])
M[h_col_temp, i_pairwise_k] = M[i_pairwise_k, h_col_temp]
h_col_temp = h_col_temp + 1
# diagonal(M) = ( ||K_k||_F )^2
M[i_pairwise_k, i_pairwise_k] = calculate_element(Q, Ka_list[i], Ka_list[i], Kb_list[j], Kb_list[j])
return M
def calculate_element(Q, Ka_i, Ka_j, Kb_i, Kb_j):
"""
Task: to compute a single element of the matrix M
Input: Q List of lists, 2\times 2, of the factor matrices of
the kernel centering operator
Ka_i i'th drug kernel matrix
Ka_j j'th drug kernel matrix
Kb_i i'th cell line kernel matrix
Kb_j j'th cell line kernel matrix
Output: m Frobenius inner product between centered pairwise
kernels (Ka_i \otimes Kb_i) and (Ka_j \otimes Kb_j)
"""
nsvalue = 2
m = 0
for q in range(nsvalue):
for r in range(nsvalue):
m += np.trace( np.dot(np.dot(np.dot(Q[q][0],Ka_i),Q[r][0]),Ka_j) ) \
* np.trace( np.dot(np.dot(np.dot(Q[q][1],Kb_i),Q[r][1]),Kb_j) )
return m
| 4,219 | 37.018018 | 112 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/compute_a_regression.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL
#
# Copyright (c) 2018 Anna Cichonska
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import math
from scipy import stats
from pairwisemkl.learner.kron_decomp import kron_decomp_centralization_operator
def response_kernel_features(Y):
"""
Task: to compute feature vector for each label value
Input: Y Matrix with the original labels
Output: Psi_y Matrix storing features as row vectors
References:
[1] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
# Labels in the vector form
y = Y.ravel(order = 'C')
# Generate probability density function of the labels
min_y = min(y)
max_y = max(y)
n_interv = 50
step = float(max_y-min_y)/n_interv
x_interv = np.arange(math.floor((min_y)*10)/10-(n_interv+1)*step, math.ceil((max_y)*10)/10+(n_interv+1)*step, step)
# Intervals: [x_interv[0],x_interv[1]), [x_interv[1],x_interv[2]), ...
x = [(a+b)/2 for a,b in zip(x_interv[::1], x_interv[1::1])]
kde = stats.gaussian_kde(y)
x_kde = kde(x)
# plt.plot(x,x_kde)
# plt.xlim([min(x),max(x)])
# plt.show()
# Matrix storing features as row vectors (one feature vector per label)
Psi_y = np.empty([len(y), n_interv*2]); Psi_y[:] = np.NAN
for i in range(len(y)):
id_i = np.where(x >= y[i])[0][0]
Psi_y[i,] = x_kde[id_i-n_interv:id_i+n_interv]
Psi_y[i,] = Psi_y[i,]/np.linalg.norm(Psi_y[i,])
# Ky = Sum_q(Psi_y[:,q] Psi_y[:,q]^T)
return Psi_y
def compute_a_regression(Ka_list, Kb_list, Y):
"""
Task: to compute vector 'a' needed for optimizing pairwise kernel weights
(equation 16 of the paper describing pairwiseMKL method)
Input: Ka_list List of drug (view A in general) kernel matrices
Kb_list List of cell line (view B in general) kernel matrices
Y Matrix with the original labels
Output: a Vector storing Frobenius inner products between each
centered input pairwise kernel and the response
kernel
"""
# To compute the factors of the pairwise kernel centering operator
Q = kron_decomp_centralization_operator(Ka_list[0].shape[0], Kb_list[0].shape[0])
# Total number of pairwise kernels
p = len(Ka_list)*len(Kb_list)
ids_kernels = np.arange(p)
Ka_ids, Kb_ids = np.unravel_index(ids_kernels, (len(Ka_list),len(Kb_list)), order = 'C')
# Replace missing values in the label matrix with row means
if np.isnan(Y).any() == True:
nan_ids = np.where(np.isnan(Y))
row_mean = np.nanmean(Y, axis=1)
Y[nan_ids] = np.take(row_mean,nan_ids[0])
# If all the values in a row are missing, use global mean
if np.isnan(Y).any() == True:
nan_ids_remaining = np.where(np.isnan(Y))
global_mean = np.nanmean(Y.ravel(order = 'C'))
Y[nan_ids_remaining] = global_mean
# Compute feature vectors for each label value
Psi_y = response_kernel_features(Y)
a = np.zeros([1,p])
n_y = Psi_y.shape[0]
# Response kernel Ky
# K = np.zeros([n_y,n_y])
# q = 0
# while q < Psi_y.shape[1]:
# v_q = Psi_y[:,q].reshape(n_y,1)
# K = K + np.dot(v_q , v_q.T)
# q = q + 1
# Calculate elements of the vector 'a'
for i_pairwise_k in range(p):
i = Ka_ids[i_pairwise_k]
j = Kb_ids[i_pairwise_k]
Ka_1 = np.dot( np.dot(Q[0][0],Ka_list[i]), Q[0][0] )
Ka_2 = np.dot( np.dot(Q[1][0],Ka_list[i]), Q[1][0] )
Ka_3 = np.dot( np.dot(Q[0][0],Ka_list[i]), Q[1][0] )
Ka_4 = np.dot( np.dot(Q[1][0],Ka_list[i]), Q[0][0] )
Kb_1 = np.dot( np.dot(Q[0][1],Kb_list[j]), Q[0][1] )
Kb_2 = np.dot( np.dot(Q[1][1],Kb_list[j]), Q[1][1] )
Kb_3 = np.dot( np.dot(Q[1][1],Kb_list[j]), Q[0][1] )
Kb_4 = np.dot( np.dot(Q[0][1],Kb_list[j]), Q[1][1] )
# Compute < K_k^(c), K_y^(c)>_F
q = 0
while q < Psi_y.shape[1]:
psi_q = Psi_y[:,q].reshape(n_y,1) # vector
Psi_q = np.reshape(psi_q, Y.shape, order = 'C') # matrix form
v1 = np.dot( np.dot(Kb_1,Psi_q.T), Ka_1 ).ravel(order = 'F')
v2 = np.dot( np.dot(Kb_2,Psi_q.T), Ka_2 ).ravel(order = 'F')
v3 = np.dot( np.dot(Kb_3,Psi_q.T), Ka_3 ).ravel(order = 'F')
v4 = np.dot( np.dot(Kb_4,Psi_q.T), Ka_4 ).ravel(order = 'F')
a[0, i_pairwise_k] = a[0, i_pairwise_k] + np.dot(psi_q.T, v1+v2+v3+v4)
q = q + 1
return a
| 5,993 | 37.423077 | 119 |
py
|
pairwiseMKL
|
pairwiseMKL-master/pairwisemkl/learner/cg_kron_rls.py
|
#
# The MIT License (MIT)
#
# This file is part of pairwiseMKL and RLScore
#
# Copyright (c) 2018 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from scipy.sparse.linalg import LinearOperator
from scipy.sparse.linalg import minres
from pairwisemkl.predictor.pairwise_predictor import LinearPairwisePredictor
from pairwisemkl.predictor.pairwise_predictor import KernelPairwisePredictor
from pairwisemkl.utilities import array_tools
from pairwisemkl.utilities import sampled_kronecker_products
from pairwisemkl.predictor.pairwise_predictor import PairwisePredictorInterface
CALLBACK_FUNCTION = 'callback'
class CGKronRLS(PairwisePredictorInterface):
"""Regularized least-squares regression with
paired-input (dyadic) data and Kronecker kernels.
Iterative solver for incomplete data set.
Parameters
----------
X1 : {array-like}, shape = [n_samples1, n_features1]
Data matrix 1 (for linear KronRLS)
X2 : {array-like}, shape = [n_samples2, n_features2]
Data matrix 2 (for linear KronRLS)
K1 : {array-like, list of equally shaped array-likes}, shape = [n_samples1, n_samples1]
Kernel matrix 1 (for kernel KronRLS)
K2 : {array-like, list of equally shaped array-likes}, shape = [n_samples1, n_samples1]
Kernel matrix 2 (for kernel KronRLS)
weights : {list, tuple, array-like}, shape = [n_kernels], optional
weights used by multiple pairwise kernel predictors
Y : {array-like}, shape = [n_train_pairs]
Training set labels.
label_row_inds : list of indices, shape = [n_train_pairs]
row indices from X1, corresponding to labels in Y
label_col_inds : list of indices, shape = [n_train_pairs]
row indices from X2, corresponding to labels in Y
regparam : float, optional
regularization parameter, regparam > 0 (default=1.0)
maxiter : int, optional
maximum number of iterations (default: no upper limit)
Attributes
-----------
predictor : {LinearPairwisePredictor, KernelPairwisePredictor}
trained predictor
References
----------
[1] Tapio Pahikkala.
Fast gradient computation for learning with tensor product kernels and sparse training labels.
Structural, Syntactic, and Statistical Pattern Recognition (S+SSPR).
volume 8621 of Lecture Notes in Computer Science, pages 123--132. 2014.
[2] Anna Cichonska, Tapio Pahikkala, Sandor Szedmak, Heli Julkunen, Antti Airola,
Markus Heinonen, Tero Aittokallio, Juho Rousu.
Learning with multiple pairwise kernels for drug bioactivity prediction.
Bioinformatics, 34, pages i509–i518. 2018.
"""
def __init__(self, **kwargs):
Y = kwargs["Y"]
self.input1_inds = np.array(kwargs["label_row_inds"], dtype = np.int32)
self.input2_inds = np.array(kwargs["label_col_inds"], dtype = np.int32)
Y = array_tools.as_2d_array(Y)
self.Y = np.mat(Y)
self.trained = False
if "regparam" in kwargs:
self.regparam = kwargs["regparam"]
else:
self.regparam = 0.
if CALLBACK_FUNCTION in kwargs:
self.callbackfun = kwargs[CALLBACK_FUNCTION]
else:
self.callbackfun = None
if "compute_risk" in kwargs:
self.compute_risk = kwargs["compute_risk"]
else:
self.compute_risk = False
regparam = self.regparam
if 'K1' in kwargs:
K1 = kwargs['K1']
K2 = kwargs['K2']
if 'maxiter' in kwargs: maxiter = int(kwargs['maxiter'])
else: maxiter = None
Y = np.array(self.Y).ravel(order = 'F')
self.bestloss = float("inf")
def mv(v):
return sampled_kronecker_products.sampled_vec_trick(v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds) + regparam * v
def mv_mk(v):
vsum = regparam * v
for i in range(len(K1)):
K1i = K1[i]
K2i = K2[i]
inds2 = self.input2_inds[i]
inds1 = self.input1_inds[i]
vsum += weights[i] * sampled_kronecker_products.sampled_vec_trick(v, K2i, K1i, inds2, inds1, inds2, inds1)
return vsum
def mvr(v):
raise Exception('You should not be here!')
def cgcb(v):
if self.compute_risk:
P = sampled_kronecker_products.sampled_vec_trick(v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds)
z = (Y - P)
Ka = sampled_kronecker_products.sampled_vec_trick(v, K2, K1, self.input2_inds, self.input1_inds, self.input2_inds, self.input1_inds)
loss = (np.dot(z,z)+regparam*np.dot(v,Ka))
print("loss", 0.5*loss)
if loss < self.bestloss:
self.A = v.copy()
self.bestloss = loss
else:
self.A = v
if not self.callbackfun is None:
self.predictor = KernelPairwisePredictor(self.A, self.input1_inds, self.input2_inds)
self.callbackfun.callback(self)
if isinstance(K1, (list, tuple)):
if 'weights' in kwargs: weights = kwargs['weights']
else: weights = np.ones((len(K1)))
G = LinearOperator((len(self.input1_inds[0]), len(self.input1_inds[0])), matvec = mv_mk, rmatvec = mvr, dtype = np.float64)
else:
weights = None
G = LinearOperator((len(self.input1_inds), len(self.input1_inds)), matvec = mv, rmatvec = mvr, dtype = np.float64)
self.A = minres(G, self.Y, maxiter = maxiter, callback = cgcb, tol=1e-20)[0]
self.predictor = KernelPairwisePredictor(self.A, self.input1_inds, self.input2_inds, weights)
else:
X1 = kwargs['X1']
X2 = kwargs['X2']
self.X1, self.X2 = X1, X2
if 'maxiter' in kwargs: maxiter = int(kwargs['maxiter'])
else: maxiter = None
if isinstance(X1, (list, tuple)):
raise NotImplementedError("Got list or tuple as X1 but multiple kernel learning has not been implemented for the proal case yet.")
x1tsize, x1fsize = X1[0].shape #m, d
x2tsize, x2fsize = X2[0].shape #q, r
else:
x1tsize, x1fsize = X1.shape #m, d
x2tsize, x2fsize = X2.shape #q, r
kronfcount = x1fsize * x2fsize
Y = np.array(self.Y).ravel(order = 'F')
self.bestloss = float("inf")
def mv(v):
v_after = sampled_kronecker_products.sampled_vec_trick(v, X2, X1, self.input2_inds, self.input1_inds)
v_after = sampled_kronecker_products.sampled_vec_trick(v_after, X2.T, X1.T, None, None, self.input2_inds, self.input1_inds) + regparam * v
return v_after
def mv_mk(v):
vsum = regparam * v
for i in range(len(X1)):
X1i = X1[i]
X2i = X2[i]
v_after = sampled_kronecker_products.sampled_vec_trick(v, X2i, X1i, self.input2_inds, self.input1_inds)
v_after = sampled_kronecker_products.sampled_vec_trick(v_after, X2i.T, X1i.T, None, None, self.input2_inds, self.input1_inds)
vsum = vsum + v_after
return vsum
def mvr(v):
raise Exception('You should not be here!')
return None
def cgcb(v):
if self.compute_risk:
P = sampled_kronecker_products.sampled_vec_trick(v, X2, X1, self.input2_inds, self.input1_inds)
z = (Y - P)
loss = (np.dot(z,z)+regparam*np.dot(v,v))
if loss < self.bestloss:
self.W = v.copy().reshape((x1fsize, x2fsize), order = 'F')
self.bestloss = loss
else:
self.W = v.reshape((x1fsize, x2fsize), order = 'F')
if not self.callbackfun is None:
self.predictor = LinearPairwisePredictor(self.W)
self.callbackfun.callback(self)
if isinstance(X1, (list, tuple)):
G = LinearOperator((kronfcount, kronfcount), matvec = mv_mk, rmatvec = mvr, dtype = np.float64)
vsum = np.zeros(kronfcount)
v_init = np.array(self.Y).reshape(self.Y.shape[0])
for i in range(len(X1)):
X1i = X1[i]
X2i = X2[i]
vsum += sampled_kronecker_products.sampled_vec_trick(v_init, X2i.T, X1i.T, None, None, self.input2_inds, self.input1_inds)
v_init = vsum
else:
G = LinearOperator((kronfcount, kronfcount), matvec = mv, rmatvec = mvr, dtype = np.float64)
v_init = np.array(self.Y).reshape(self.Y.shape[0])
v_init = sampled_kronecker_products.sampled_vec_trick(v_init, X2.T, X1.T, None, None, self.input2_inds, self.input1_inds)
v_init = np.array(v_init).reshape(kronfcount)
if 'warm_start' in kwargs:
x0 = np.array(kwargs['warm_start']).reshape(kronfcount, order = 'F')
else:
x0 = None
minres(G, v_init, x0 = x0, maxiter = maxiter, callback = cgcb, tol=1e-20)[0].reshape((x1fsize, x2fsize), order='F')
self.predictor = LinearPairwisePredictor(self.W)
if not self.callbackfun is None:
self.callbackfun.finished(self)
| 11,138 | 43.378486 | 165 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.