body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def validate_key(self, activation_key):
'\n Verify that the activation key is valid and within the\n permitted activation time window, returning the username if\n valid or ``None`` if not.\n\n '
try:
username = signing.loads(activation_key, salt=self.key_salt, max_age=(conf.get('ACCOUNT_ACTIVATION_DAYS') * 86400))
return username
except signing.BadSignature:
return None | -5,516,472,830,826,670,000 | Verify that the activation key is valid and within the
permitted activation time window, returning the username if
valid or ``None`` if not. | polyaxon/api/users/views.py | validate_key | AntoineToubhans/polyaxon | python | def validate_key(self, activation_key):
'\n Verify that the activation key is valid and within the\n permitted activation time window, returning the username if\n valid or ``None`` if not.\n\n '
try:
username = signing.loads(activation_key, salt=self.key_salt, max_age=(conf.get('ACCOUNT_ACTIVATION_DAYS') * 86400))
return username
except signing.BadSignature:
return None |
def get_user(self, username):
"\n Given the verified username, look up and return the\n corresponding user account if it exists, or ``None`` if it\n doesn't.\n\n "
User = get_user_model()
try:
user = User.objects.get(**{User.USERNAME_FIELD: username, 'is_active': False})
return user
except User.DoesNotExist:
return None | -3,394,210,588,801,529,300 | Given the verified username, look up and return the
corresponding user account if it exists, or ``None`` if it
doesn't. | polyaxon/api/users/views.py | get_user | AntoineToubhans/polyaxon | python | def get_user(self, username):
"\n Given the verified username, look up and return the\n corresponding user account if it exists, or ``None`` if it\n doesn't.\n\n "
User = get_user_model()
try:
user = User.objects.get(**{User.USERNAME_FIELD: username, 'is_active': False})
return user
except User.DoesNotExist:
return None |
def get(self, request, *args, **kwargs):
'The base activation logic; subclasses should leave this method\n alone and implement activate(), which is called from this method.\n '
activated_user = self.activate(*args, **kwargs)
if activated_user:
users_signals.user_activated.send(sender=self.__class__, user=activated_user, request=request)
return redirect(self.success_url)
return super().get(request, *args, **kwargs) | -4,971,766,240,887,481,000 | The base activation logic; subclasses should leave this method
alone and implement activate(), which is called from this method. | polyaxon/api/users/views.py | get | AntoineToubhans/polyaxon | python | def get(self, request, *args, **kwargs):
'The base activation logic; subclasses should leave this method\n alone and implement activate(), which is called from this method.\n '
activated_user = self.activate(*args, **kwargs)
if activated_user:
users_signals.user_activated.send(sender=self.__class__, user=activated_user, request=request)
return redirect(self.success_url)
return super().get(request, *args, **kwargs) |
def __init__(self, Lower=(- 1.0), Upper=1.0):
'Initialize of Cosine mixture benchmark.\n\n\t\tArgs:\n\t\t\tLower (Optional[float]): Lower bound of problem.\n\t\t\tUpper (Optional[float]): Upper bound of problem.\n\n\t\tSee Also:\n\t\t\t:func:`NiaPy.benchmarks.Benchmark.__init__`\n\t\t'
Benchmark.__init__(self, Lower, Upper) | -8,085,448,226,588,698,000 | Initialize of Cosine mixture benchmark.
Args:
Lower (Optional[float]): Lower bound of problem.
Upper (Optional[float]): Upper bound of problem.
See Also:
:func:`NiaPy.benchmarks.Benchmark.__init__` | NiaPy/benchmarks/cosinemixture.py | __init__ | lucijabrezocnik/NiaPy | python | def __init__(self, Lower=(- 1.0), Upper=1.0):
'Initialize of Cosine mixture benchmark.\n\n\t\tArgs:\n\t\t\tLower (Optional[float]): Lower bound of problem.\n\t\t\tUpper (Optional[float]): Upper bound of problem.\n\n\t\tSee Also:\n\t\t\t:func:`NiaPy.benchmarks.Benchmark.__init__`\n\t\t'
Benchmark.__init__(self, Lower, Upper) |
@staticmethod
def latex_code():
'Return the latex code of the problem.\n\n\t\tReturns:\n\t\t\tstr: Latex code\n\t\t'
return '$f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2$' | 1,873,629,386,612,206,300 | Return the latex code of the problem.
Returns:
str: Latex code | NiaPy/benchmarks/cosinemixture.py | latex_code | lucijabrezocnik/NiaPy | python | @staticmethod
def latex_code():
'Return the latex code of the problem.\n\n\t\tReturns:\n\t\t\tstr: Latex code\n\t\t'
return '$f(\\textbf{x}) = - 0.1 \\sum_{i = 1}^D \\cos (5 \\pi x_i) - \\sum_{i = 1}^D x_i^2$' |
def function(self):
'Return benchmark evaluation function.\n\n\t\tReturns:\n\t\t\tCallable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n\t\t'
def f(D, X):
'Fitness function.\n\n\t\t\tArgs:\n\t\t\t\tD (int): Dimensionality of the problem\n\t\t\t\tsol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n\t\t\tReturns:\n\t\t\t\tfloat: Fitness value for the solution.\n\t\t\t'
(v1, v2) = (0.0, 0.0)
for i in range(D):
(v1, v2) = ((v1 + cos(((5 * pi) * X[i]))), (v2 + (X[i] ** 2)))
return (((- 0.1) * v1) - v2)
return f | 6,318,619,589,099,463,000 | Return benchmark evaluation function.
Returns:
Callable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function | NiaPy/benchmarks/cosinemixture.py | function | lucijabrezocnik/NiaPy | python | def function(self):
'Return benchmark evaluation function.\n\n\t\tReturns:\n\t\t\tCallable[[int, Union[int, float, List[int, float], numpy.ndarray]], float]: Fitness function\n\t\t'
def f(D, X):
'Fitness function.\n\n\t\t\tArgs:\n\t\t\t\tD (int): Dimensionality of the problem\n\t\t\t\tsol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n\t\t\tReturns:\n\t\t\t\tfloat: Fitness value for the solution.\n\t\t\t'
(v1, v2) = (0.0, 0.0)
for i in range(D):
(v1, v2) = ((v1 + cos(((5 * pi) * X[i]))), (v2 + (X[i] ** 2)))
return (((- 0.1) * v1) - v2)
return f |
def f(D, X):
'Fitness function.\n\n\t\t\tArgs:\n\t\t\t\tD (int): Dimensionality of the problem\n\t\t\t\tsol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n\t\t\tReturns:\n\t\t\t\tfloat: Fitness value for the solution.\n\t\t\t'
(v1, v2) = (0.0, 0.0)
for i in range(D):
(v1, v2) = ((v1 + cos(((5 * pi) * X[i]))), (v2 + (X[i] ** 2)))
return (((- 0.1) * v1) - v2) | 5,649,132,174,974,258,000 | Fitness function.
Args:
D (int): Dimensionality of the problem
sol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.
Returns:
float: Fitness value for the solution. | NiaPy/benchmarks/cosinemixture.py | f | lucijabrezocnik/NiaPy | python | def f(D, X):
'Fitness function.\n\n\t\t\tArgs:\n\t\t\t\tD (int): Dimensionality of the problem\n\t\t\t\tsol (Union[int, float, List[int, float], numpy.ndarray]): Solution to check.\n\n\t\t\tReturns:\n\t\t\t\tfloat: Fitness value for the solution.\n\t\t\t'
(v1, v2) = (0.0, 0.0)
for i in range(D):
(v1, v2) = ((v1 + cos(((5 * pi) * X[i]))), (v2 + (X[i] ** 2)))
return (((- 0.1) * v1) - v2) |
def load_pv_systems(metadata_filename: str=METADATA_FILENAME, stats_filename: str=PV_STATS_FILENAME, timeseries_filename: str=TIMESERIES_FILENAME) -> xr.Dataset:
'Load metadata about PV systems'
pv_metadata = pd.read_csv(metadata_filename, index_col='system_id')
pv_stats = pd.read_csv(stats_filename, index_col='system_id', parse_dates=['actual_date_from', 'actual_date_to', 'record_efficiency_date'])
pv_systems = pv_metadata.join(pv_stats[['actual_date_from', 'actual_date_to', 'outputs']], how='left')
pv_systems_filtered = pv_systems.query('status_interval_minutes <= 60 and outputs > 100')
pv_systems_filtered = pv_systems_filtered.dropna(subset=['latitude', 'longitude'])
system_ids = _get_system_ids_dataframe_from_timeseries(timeseries_filename)
pv_systems_filtered = pv_systems_filtered.join(system_ids, how='inner')
pv_systems_filtered = pv_systems_filtered[['system_name', 'latitude', 'longitude']]
ds = xr.Dataset.from_dataframe(pv_systems_filtered)
ds = _transform_pv_systems(ds)
return ds | 3,270,417,478,702,474,000 | Load metadata about PV systems | predict_pv_yield_nwp/pv.py | load_pv_systems | openclimatefix/predict_pv_yield_nwp | python | def load_pv_systems(metadata_filename: str=METADATA_FILENAME, stats_filename: str=PV_STATS_FILENAME, timeseries_filename: str=TIMESERIES_FILENAME) -> xr.Dataset:
pv_metadata = pd.read_csv(metadata_filename, index_col='system_id')
pv_stats = pd.read_csv(stats_filename, index_col='system_id', parse_dates=['actual_date_from', 'actual_date_to', 'record_efficiency_date'])
pv_systems = pv_metadata.join(pv_stats[['actual_date_from', 'actual_date_to', 'outputs']], how='left')
pv_systems_filtered = pv_systems.query('status_interval_minutes <= 60 and outputs > 100')
pv_systems_filtered = pv_systems_filtered.dropna(subset=['latitude', 'longitude'])
system_ids = _get_system_ids_dataframe_from_timeseries(timeseries_filename)
pv_systems_filtered = pv_systems_filtered.join(system_ids, how='inner')
pv_systems_filtered = pv_systems_filtered[['system_name', 'latitude', 'longitude']]
ds = xr.Dataset.from_dataframe(pv_systems_filtered)
ds = _transform_pv_systems(ds)
return ds |
def _get_system_ids_dataframe_from_timeseries(timeseries_filename: str=TIMESERIES_FILENAME) -> pd.DataFrame:
'Get all the PV system IDs from the timeseries file'
ds = xr.open_dataset(timeseries_filename)
system_ids = [int(x) for x in list(ds.data_vars.keys())]
df = pd.DataFrame({'system_id': system_ids})
df = df.set_index('system_id')
return df | -8,597,639,138,764,902,000 | Get all the PV system IDs from the timeseries file | predict_pv_yield_nwp/pv.py | _get_system_ids_dataframe_from_timeseries | openclimatefix/predict_pv_yield_nwp | python | def _get_system_ids_dataframe_from_timeseries(timeseries_filename: str=TIMESERIES_FILENAME) -> pd.DataFrame:
ds = xr.open_dataset(timeseries_filename)
system_ids = [int(x) for x in list(ds.data_vars.keys())]
df = pd.DataFrame({'system_id': system_ids})
df = df.set_index('system_id')
return df |
def _transform_pv_systems(pv_systems: xr.Dataset) -> xr.Dataset:
'Transform the system locations into the same coordinate system used by UKV'
(system_latitudes, system_longitudes) = (pv_systems['latitude'].values, pv_systems['longitude'].values)
wgs84 = ccrs.Geodetic()
ukv_crs = ccrs.OSGB(approx=False)
locs = ukv_crs.transform_points(src_crs=wgs84, x=np.asanyarray(system_longitudes), y=np.asanyarray(system_latitudes))[:, :(- 1)]
new_coords = {'easting': (['system_id'], locs[:, 0].astype('int32')), 'northing': (['system_id'], locs[:, 1].astype('int32'))}
return pv_systems.assign_coords(new_coords) | 3,621,632,862,040,749,000 | Transform the system locations into the same coordinate system used by UKV | predict_pv_yield_nwp/pv.py | _transform_pv_systems | openclimatefix/predict_pv_yield_nwp | python | def _transform_pv_systems(pv_systems: xr.Dataset) -> xr.Dataset:
(system_latitudes, system_longitudes) = (pv_systems['latitude'].values, pv_systems['longitude'].values)
wgs84 = ccrs.Geodetic()
ukv_crs = ccrs.OSGB(approx=False)
locs = ukv_crs.transform_points(src_crs=wgs84, x=np.asanyarray(system_longitudes), y=np.asanyarray(system_latitudes))[:, :(- 1)]
new_coords = {'easting': (['system_id'], locs[:, 0].astype('int32')), 'northing': (['system_id'], locs[:, 1].astype('int32'))}
return pv_systems.assign_coords(new_coords) |
def _transform_pv_systems_pyproj(pv_systems: xr.Dataset) -> xr.Dataset:
'Transform the system locations into the same coordinate system used by UKV, using pyproj'
import pyproj
(system_latitudes, system_longitudes) = (pv_systems['latitude'].values, pv_systems['longitude'].values)
transformer = pyproj.Transformer.from_crs('epsg:4326', 'epsg:27700', always_xy=True)
locs = transformer.transform(np.asanyarray(system_longitudes), np.asanyarray(system_latitudes))
print(locs)
new_coords = {'easting': (['system_id'], locs[0]), 'northing': (['system_id'], locs[1])}
return pv_systems.assign_coords(new_coords) | 1,727,714,883,699,049,200 | Transform the system locations into the same coordinate system used by UKV, using pyproj | predict_pv_yield_nwp/pv.py | _transform_pv_systems_pyproj | openclimatefix/predict_pv_yield_nwp | python | def _transform_pv_systems_pyproj(pv_systems: xr.Dataset) -> xr.Dataset:
import pyproj
(system_latitudes, system_longitudes) = (pv_systems['latitude'].values, pv_systems['longitude'].values)
transformer = pyproj.Transformer.from_crs('epsg:4326', 'epsg:27700', always_xy=True)
locs = transformer.transform(np.asanyarray(system_longitudes), np.asanyarray(system_latitudes))
print(locs)
new_coords = {'easting': (['system_id'], locs[0]), 'northing': (['system_id'], locs[1])}
return pv_systems.assign_coords(new_coords) |
def load_pv_timeseries(start_date: str, end_date: str, metadata_filename: str=METADATA_FILENAME, stats_filename: str=PV_STATS_FILENAME, timeseries_filename: str=TIMESERIES_FILENAME) -> xr.Dataset:
'Load the PV timeseries as an xarray dataset, restricted to a given time range, and including location metadata.'
ds = xr.open_dataset(timeseries_filename)
subset = ds.sel(datetime=slice(start_date, end_date))
df = subset.to_dataframe()
df = df.dropna(axis=1, how='all')
pv_df = load_pv_systems(metadata_filename, stats_filename, timeseries_filename).to_dataframe()
pv_metadata_system_ids = pv_df.index.tolist()
timeseries_system_ids = [int(system_id) for system_id in df.columns.tolist()]
system_ids = list(set(pv_metadata_system_ids).intersection(set(timeseries_system_ids)))
system_id_columns = [str(system_id) for system_id in system_ids]
df = df[system_id_columns]
df['datetime'] = df.index
df = pd.melt(df, id_vars=['datetime'], var_name='system_id', value_name='pv_yield')
df = df.astype({'system_id': 'int64'})
df = df.set_index(['system_id', 'datetime'])
ds = xr.Dataset.from_dataframe(df)
new_coords = {'latitude': (['system_id'], pv_df.lookup(system_ids, (['latitude'] * len(system_ids)))), 'longitude': (['system_id'], pv_df.lookup(system_ids, (['longitude'] * len(system_ids)))), 'easting': (['system_id'], pv_df.lookup(system_ids, (['easting'] * len(system_ids)))), 'northing': (['system_id'], pv_df.lookup(system_ids, (['northing'] * len(system_ids))))}
ds = ds.assign_coords(new_coords)
return ds | 3,110,037,801,621,847,600 | Load the PV timeseries as an xarray dataset, restricted to a given time range, and including location metadata. | predict_pv_yield_nwp/pv.py | load_pv_timeseries | openclimatefix/predict_pv_yield_nwp | python | def load_pv_timeseries(start_date: str, end_date: str, metadata_filename: str=METADATA_FILENAME, stats_filename: str=PV_STATS_FILENAME, timeseries_filename: str=TIMESERIES_FILENAME) -> xr.Dataset:
ds = xr.open_dataset(timeseries_filename)
subset = ds.sel(datetime=slice(start_date, end_date))
df = subset.to_dataframe()
df = df.dropna(axis=1, how='all')
pv_df = load_pv_systems(metadata_filename, stats_filename, timeseries_filename).to_dataframe()
pv_metadata_system_ids = pv_df.index.tolist()
timeseries_system_ids = [int(system_id) for system_id in df.columns.tolist()]
system_ids = list(set(pv_metadata_system_ids).intersection(set(timeseries_system_ids)))
system_id_columns = [str(system_id) for system_id in system_ids]
df = df[system_id_columns]
df['datetime'] = df.index
df = pd.melt(df, id_vars=['datetime'], var_name='system_id', value_name='pv_yield')
df = df.astype({'system_id': 'int64'})
df = df.set_index(['system_id', 'datetime'])
ds = xr.Dataset.from_dataframe(df)
new_coords = {'latitude': (['system_id'], pv_df.lookup(system_ids, (['latitude'] * len(system_ids)))), 'longitude': (['system_id'], pv_df.lookup(system_ids, (['longitude'] * len(system_ids)))), 'easting': (['system_id'], pv_df.lookup(system_ids, (['easting'] * len(system_ids)))), 'northing': (['system_id'], pv_df.lookup(system_ids, (['northing'] * len(system_ids))))}
ds = ds.assign_coords(new_coords)
return ds |
def _install_system_packages(session):
"\n Because some python packages are provided by the distribution and cannot\n be pip installed, and because we don't want the whole system python packages\n on our virtualenvs, we copy the required system python packages into\n the virtualenv\n "
version_info = _get_session_python_version_info(session)
py_version_keys = ['{}'.format(*version_info), '{}.{}'.format(*version_info)]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
session_site_packages_dir = os.path.relpath(session_site_packages_dir, REPO_ROOT)
for py_version in py_version_keys:
dist_packages_path = '/usr/lib/python{}/dist-packages'.format(py_version)
if (not os.path.isdir(dist_packages_path)):
continue
for aptpkg in glob.glob(os.path.join(dist_packages_path, '*apt*')):
src = os.path.realpath(aptpkg)
dst = os.path.join(session_site_packages_dir, os.path.basename(src))
if os.path.exists(dst):
session.log('Not overwritting already existing %s with %s', dst, src)
continue
session.log('Copying %s into %s', src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst) | 6,968,002,749,129,078,000 | Because some python packages are provided by the distribution and cannot
be pip installed, and because we don't want the whole system python packages
on our virtualenvs, we copy the required system python packages into
the virtualenv | noxfile.py | _install_system_packages | 99-lives/salt | python | def _install_system_packages(session):
"\n Because some python packages are provided by the distribution and cannot\n be pip installed, and because we don't want the whole system python packages\n on our virtualenvs, we copy the required system python packages into\n the virtualenv\n "
version_info = _get_session_python_version_info(session)
py_version_keys = ['{}'.format(*version_info), '{}.{}'.format(*version_info)]
session_site_packages_dir = _get_session_python_site_packages_dir(session)
session_site_packages_dir = os.path.relpath(session_site_packages_dir, REPO_ROOT)
for py_version in py_version_keys:
dist_packages_path = '/usr/lib/python{}/dist-packages'.format(py_version)
if (not os.path.isdir(dist_packages_path)):
continue
for aptpkg in glob.glob(os.path.join(dist_packages_path, '*apt*')):
src = os.path.realpath(aptpkg)
dst = os.path.join(session_site_packages_dir, os.path.basename(src))
if os.path.exists(dst):
session.log('Not overwritting already existing %s with %s', dst, src)
continue
session.log('Copying %s into %s', src, dst)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copyfile(src, dst) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodome'])
def runtests_parametrized(session, coverage, transport, crypto):
'\n DO NOT CALL THIS NOX SESSION DIRECTLY\n '
_runtests(session) | -9,043,520,752,504,276,000 | DO NOT CALL THIS NOX SESSION DIRECTLY | noxfile.py | runtests_parametrized | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodome'])
def runtests_parametrized(session, coverage, transport, crypto):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def runtests(session, coverage):
'\n runtests.py session with zeromq transport and default crypto\n '
_runtests(session) | 845,727,871,123,702,300 | runtests.py session with zeromq transport and default crypto | noxfile.py | runtests | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def runtests(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp')
@nox.parametrize('coverage', [False, True])
def runtests_tcp(session, coverage):
'\n runtests.py session with TCP transport and default crypto\n '
_runtests(session) | -6,682,511,237,389,340,000 | runtests.py session with TCP transport and default crypto | noxfile.py | runtests_tcp | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp')
@nox.parametrize('coverage', [False, True])
def runtests_tcp(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq(session, coverage):
'\n runtests.py session with zeromq transport and default crypto\n '
_runtests(session) | -138,091,728,447,024,620 | runtests.py session with zeromq transport and default crypto | noxfile.py | runtests_zeromq | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_m2crypto(session, coverage):
'\n runtests.py session with zeromq transport and m2crypto\n '
_runtests(session) | -654,897,974,616,398,700 | runtests.py session with zeromq transport and m2crypto | noxfile.py | runtests_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_m2crypto(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_m2crypto(session, coverage):
'\n runtests.py session with TCP transport and m2crypto\n '
_runtests(session) | 6,956,894,239,134,347,000 | runtests.py session with TCP transport and m2crypto | noxfile.py | runtests_tcp_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_m2crypto(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_m2crypto(session, coverage):
'\n runtests.py session with zeromq transport and m2crypto\n '
_runtests(session) | 6,990,492,541,497,945,000 | runtests.py session with zeromq transport and m2crypto | noxfile.py | runtests_zeromq_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_m2crypto(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_pycryptodome(session, coverage):
'\n runtests.py session with zeromq transport and pycryptodome\n '
_runtests(session) | -6,725,514,069,020,401,000 | runtests.py session with zeromq transport and pycryptodome | noxfile.py | runtests_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_pycryptodome(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_pycryptodome(session, coverage):
'\n runtests.py session with TCP transport and pycryptodome\n '
_runtests(session) | 3,697,328,192,543,421,000 | runtests.py session with TCP transport and pycryptodome | noxfile.py | runtests_tcp_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-tcp-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_tcp_pycryptodome(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_pycryptodome(session, coverage):
'\n runtests.py session with zeromq transport and pycryptodome\n '
_runtests(session) | -7,133,153,785,818,747,000 | runtests.py session with zeromq transport and pycryptodome | noxfile.py | runtests_zeromq_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-zeromq-pycryptodome')
@nox.parametrize('coverage', [False, True])
def runtests_zeromq_pycryptodome(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-cloud')
@nox.parametrize('coverage', [False, True])
def runtests_cloud(session, coverage):
'\n runtests.py cloud tests session\n '
_runtests(session) | 2,808,524,427,608,219,600 | runtests.py cloud tests session | noxfile.py | runtests_cloud | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-cloud')
@nox.parametrize('coverage', [False, True])
def runtests_cloud(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='runtests-tornado')
@nox.parametrize('coverage', [False, True])
def runtests_tornado(session, coverage):
'\n runtests.py tornado tests session\n '
_runtests(session) | -5,665,973,324,566,924,000 | runtests.py tornado tests session | noxfile.py | runtests_tornado | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='runtests-tornado')
@nox.parametrize('coverage', [False, True])
def runtests_tornado(session, coverage):
'\n \n '
_runtests(session) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodome'])
def pytest_parametrized(session, coverage, transport, crypto):
'\n DO NOT CALL THIS NOX SESSION DIRECTLY\n '
_install_requirements(session, transport)
if crypto:
session.run('pip', 'uninstall', '-y', 'm2crypto', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
install_command = ['--progress-bar=off', '--constraint', _get_pip_requirements_file(session, transport, crypto=True)]
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s', '--transport={}'.format(transport)] + session.posargs)
_pytest(session, coverage, cmd_args) | -4,137,572,141,235,951,600 | DO NOT CALL THIS NOX SESSION DIRECTLY | noxfile.py | pytest_parametrized | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-parametrized')
@nox.parametrize('coverage', [False, True])
@nox.parametrize('transport', ['zeromq', 'tcp'])
@nox.parametrize('crypto', [None, 'm2crypto', 'pycryptodome'])
def pytest_parametrized(session, coverage, transport, crypto):
'\n \n '
_install_requirements(session, transport)
if crypto:
session.run('pip', 'uninstall', '-y', 'm2crypto', 'pycrypto', 'pycryptodome', 'pycryptodomex', silent=True)
install_command = ['--progress-bar=off', '--constraint', _get_pip_requirements_file(session, transport, crypto=True)]
install_command.append(crypto)
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s', '--transport={}'.format(transport)] + session.posargs)
_pytest(session, coverage, cmd_args) |
@nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def pytest(session, coverage):
'\n pytest session with zeromq transport and default crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='zeromq')) | 2,154,575,470,419,668,500 | pytest session with zeromq transport and default crypto | noxfile.py | pytest | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS)
@nox.parametrize('coverage', [False, True])
def pytest(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp')
@nox.parametrize('coverage', [False, True])
def pytest_tcp(session, coverage):
'\n pytest session with TCP transport and default crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='tcp')) | 3,040,151,632,284,312,600 | pytest session with TCP transport and default crypto | noxfile.py | pytest_tcp | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp')
@nox.parametrize('coverage', [False, True])
def pytest_tcp(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='tcp')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq(session, coverage):
'\n pytest session with zeromq transport and default crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='zeromq')) | -8,740,272,814,266,476,000 | pytest session with zeromq transport and default crypto | noxfile.py | pytest_zeromq | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto=None, transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_m2crypto(session, coverage):
'\n pytest session with zeromq transport and m2crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='zeromq')) | 8,899,858,550,189,629,000 | pytest session with zeromq transport and m2crypto | noxfile.py | pytest_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_m2crypto(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_m2crypto(session, coverage):
'\n pytest session with TCP transport and m2crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='tcp')) | -5,684,078,692,834,469,000 | pytest session with TCP transport and m2crypto | noxfile.py | pytest_tcp_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_m2crypto(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='tcp')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_m2crypto(session, coverage):
'\n pytest session with zeromq transport and m2crypto\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='zeromq')) | 5,171,910,656,219,338,000 | pytest session with zeromq transport and m2crypto | noxfile.py | pytest_zeromq_m2crypto | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-m2crypto')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_m2crypto(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='m2crypto', transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_pycryptodome(session, coverage):
'\n pytest session with zeromq transport and pycryptodome\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='zeromq')) | -2,269,956,677,477,665,000 | pytest session with zeromq transport and pycryptodome | noxfile.py | pytest_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_pycryptodome(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_pycryptodome(session, coverage):
'\n pytest session with TCP transport and pycryptodome\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='tcp')) | -1,304,393,818,394,454,800 | pytest session with TCP transport and pycryptodome | noxfile.py | pytest_tcp_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-tcp-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_tcp_pycryptodome(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='tcp')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_pycryptodome(session, coverage):
'\n pytest session with zeromq transport and pycryptodome\n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='zeromq')) | 1,148,822,481,792,937,200 | pytest session with zeromq transport and pycryptodome | noxfile.py | pytest_zeromq_pycryptodome | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-zeromq-pycryptodome')
@nox.parametrize('coverage', [False, True])
def pytest_zeromq_pycryptodome(session, coverage):
'\n \n '
session.notify(find_session_runner(session, 'pytest-parametrized-{}'.format(session.python), coverage=coverage, crypto='pycryptodome', transport='zeromq')) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-cloud')
@nox.parametrize('coverage', [False, True])
def pytest_cloud(session, coverage):
'\n pytest cloud tests session\n '
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, 'zeromq')
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'cloud.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s', '--run-expensive', '-k', 'cloud'] + session.posargs)
_pytest(session, coverage, cmd_args) | -3,397,264,623,128,888,300 | pytest cloud tests session | noxfile.py | pytest_cloud | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-cloud')
@nox.parametrize('coverage', [False, True])
def pytest_cloud(session, coverage):
'\n \n '
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, 'zeromq')
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'cloud.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s', '--run-expensive', '-k', 'cloud'] + session.posargs)
_pytest(session, coverage, cmd_args) |
@nox.session(python=_PYTHON_VERSIONS, name='pytest-tornado')
@nox.parametrize('coverage', [False, True])
def pytest_tornado(session, coverage):
'\n pytest tornado tests session\n '
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s'] + session.posargs)
_pytest(session, coverage, cmd_args) | 2,697,728,017,375,556,600 | pytest tornado tests session | noxfile.py | pytest_tornado | 99-lives/salt | python | @nox.session(python=_PYTHON_VERSIONS, name='pytest-tornado')
@nox.parametrize('coverage', [False, True])
def pytest_tornado(session, coverage):
'\n \n '
if _upgrade_pip_setuptools_and_wheel(session):
_install_requirements(session, 'zeromq')
session.install('--progress-bar=off', 'tornado==5.0.2', silent=PIP_INSTALL_SILENT)
session.install('--progress-bar=off', 'pyzmq==17.0.0', silent=PIP_INSTALL_SILENT)
cmd_args = (['--rootdir', REPO_ROOT, '--log-file={}'.format(RUNTESTS_LOGFILE), '--log-file-level=debug', '--show-capture=no', '-ra', '-s'] + session.posargs)
_pytest(session, coverage, cmd_args) |
@nox.session(python='3')
def lint(session):
"\n Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.\n "
session.notify('lint-salt-{}'.format(session.python))
session.notify('lint-tests-{}'.format(session.python)) | -1,453,134,618,559,754 | Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output. | noxfile.py | lint | 99-lives/salt | python | @nox.session(python='3')
def lint(session):
"\n \n "
session.notify('lint-salt-{}'.format(session.python))
session.notify('lint-tests-{}'.format(session.python)) |
@nox.session(python='3', name='lint-salt')
def lint_salt(session):
'\n Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.\n '
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'noxfile.py', 'salt/', 'tasks/']
_lint(session, '.pylintrc', flags, paths) | -544,538,526,701,332,700 | Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output. | noxfile.py | lint_salt | 99-lives/salt | python | @nox.session(python='3', name='lint-salt')
def lint_salt(session):
'\n \n '
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'noxfile.py', 'salt/', 'tasks/']
_lint(session, '.pylintrc', flags, paths) |
@nox.session(python='3', name='lint-tests')
def lint_tests(session):
"\n Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.\n "
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint(session, '.pylintrc', flags, paths) | -5,180,132,152,038,868,000 | Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output. | noxfile.py | lint_tests | 99-lives/salt | python | @nox.session(python='3', name='lint-tests')
def lint_tests(session):
"\n \n "
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint(session, '.pylintrc', flags, paths) |
@nox.session(python=False, name='lint-salt-pre-commit')
def lint_salt_pre_commit(session):
'\n Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output.\n '
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'noxfile.py', 'salt/']
_lint_pre_commit(session, '.pylintrc', flags, paths) | 3,978,327,733,629,347,000 | Run PyLint against Salt. Set PYLINT_REPORT to a path to capture output. | noxfile.py | lint_salt_pre_commit | 99-lives/salt | python | @nox.session(python=False, name='lint-salt-pre-commit')
def lint_salt_pre_commit(session):
'\n \n '
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['setup.py', 'noxfile.py', 'salt/']
_lint_pre_commit(session, '.pylintrc', flags, paths) |
@nox.session(python=False, name='lint-tests-pre-commit')
def lint_tests_pre_commit(session):
"\n Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output.\n "
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint_pre_commit(session, '.pylintrc', flags, paths) | -3,709,611,297,284,424,700 | Run PyLint against Salt and it's test suite. Set PYLINT_REPORT to a path to capture output. | noxfile.py | lint_tests_pre_commit | 99-lives/salt | python | @nox.session(python=False, name='lint-tests-pre-commit')
def lint_tests_pre_commit(session):
"\n \n "
flags = ['--disable=I']
if session.posargs:
paths = session.posargs
else:
paths = ['tests/']
_lint_pre_commit(session, '.pylintrc', flags, paths) |
@nox.session(python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('update', [False, True])
@nox.parametrize('compress', [False, True])
def docs(session, compress, update, clean):
"\n Build Salt's Documentation\n "
session.notify('docs-html-{}(compress={})'.format(session.python, compress))
session.notify(find_session_runner(session, 'docs-man-{}'.format(session.python), compress=compress, update=update, clean=clean)) | 5,115,661,677,157,032,000 | Build Salt's Documentation | noxfile.py | docs | 99-lives/salt | python | @nox.session(python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('update', [False, True])
@nox.parametrize('compress', [False, True])
def docs(session, compress, update, clean):
"\n \n "
session.notify('docs-html-{}(compress={})'.format(session.python, compress))
session.notify(find_session_runner(session, 'docs-man-{}'.format(session.python), compress=compress, update=update, clean=clean)) |
@nox.session(name='docs-html', python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('compress', [False, True])
def docs_html(session, compress, clean):
"\n Build Salt's HTML Documentation\n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'docs.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
if clean:
session.run('make', 'clean', external=True)
session.run('make', 'html', 'SPHINXOPTS=-W', external=True)
if compress:
session.run('tar', '-cJvf', 'html-archive.tar.xz', '_build/html', external=True)
os.chdir('..') | 274,001,716,584,311,520 | Build Salt's HTML Documentation | noxfile.py | docs_html | 99-lives/salt | python | @nox.session(name='docs-html', python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('compress', [False, True])
def docs_html(session, compress, clean):
"\n \n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'docs.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
if clean:
session.run('make', 'clean', external=True)
session.run('make', 'html', 'SPHINXOPTS=-W', external=True)
if compress:
session.run('tar', '-cJvf', 'html-archive.tar.xz', '_build/html', external=True)
os.chdir('..') |
@nox.session(name='docs-man', python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('update', [False, True])
@nox.parametrize('compress', [False, True])
def docs_man(session, compress, update, clean):
"\n Build Salt's Manpages Documentation\n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'docs.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
if clean:
session.run('make', 'clean', external=True)
session.run('make', 'man', 'SPHINXOPTS=-W', external=True)
if update:
session.run('rm', '-rf', 'man/', external=True)
session.run('cp', '-Rp', '_build/man', 'man/', external=True)
if compress:
session.run('tar', '-cJvf', 'man-archive.tar.xz', '_build/man', external=True)
os.chdir('..') | 5,536,846,113,340,623,000 | Build Salt's Manpages Documentation | noxfile.py | docs_man | 99-lives/salt | python | @nox.session(name='docs-man', python='3')
@nox.parametrize('clean', [False, True])
@nox.parametrize('update', [False, True])
@nox.parametrize('compress', [False, True])
def docs_man(session, compress, update, clean):
"\n \n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'docs.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
os.chdir('doc/')
if clean:
session.run('make', 'clean', external=True)
session.run('make', 'man', 'SPHINXOPTS=-W', external=True)
if update:
session.run('rm', '-rf', 'man/', external=True)
session.run('cp', '-Rp', '_build/man', 'man/', external=True)
if compress:
session.run('tar', '-cJvf', 'man-archive.tar.xz', '_build/man', external=True)
os.chdir('..') |
@nox.session(name='invoke', python='3')
def invoke(session):
'\n Run invoke tasks\n '
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'invoke.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd = ['inv']
files = []
for (idx, posarg) in enumerate(session.posargs):
if (idx == 0):
cmd.append(posarg)
continue
if posarg.startswith('--'):
cmd.append(posarg)
continue
files.append(posarg)
if files:
cmd.append('--files={}'.format(' '.join(files)))
session.run(*cmd) | 9,109,923,732,392,836,000 | Run invoke tasks | noxfile.py | invoke | 99-lives/salt | python | @nox.session(name='invoke', python='3')
def invoke(session):
'\n \n '
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'invoke.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
cmd = ['inv']
files = []
for (idx, posarg) in enumerate(session.posargs):
if (idx == 0):
cmd.append(posarg)
continue
if posarg.startswith('--'):
cmd.append(posarg)
continue
files.append(posarg)
if files:
cmd.append('--files={}'.format(' '.join(files)))
session.run(*cmd) |
@nox.session(name='changelog', python='3')
@nox.parametrize('draft', [False, True])
def changelog(session, draft):
"\n Generate salt's changelog\n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'changelog.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
town_cmd = ['towncrier', '--version={}'.format(session.posargs[0])]
if draft:
town_cmd.append('--draft')
session.run(*town_cmd) | 1,634,428,497,451,441,400 | Generate salt's changelog | noxfile.py | changelog | 99-lives/salt | python | @nox.session(name='changelog', python='3')
@nox.parametrize('draft', [False, True])
def changelog(session, draft):
"\n \n "
if _upgrade_pip_setuptools_and_wheel(session):
requirements_file = os.path.join('requirements', 'static', 'ci', _get_pydir(session), 'changelog.txt')
install_command = ['--progress-bar=off', '-r', requirements_file]
session.install(*install_command, silent=PIP_INSTALL_SILENT)
town_cmd = ['towncrier', '--version={}'.format(session.posargs[0])]
if draft:
town_cmd.append('--draft')
session.run(*town_cmd) |
def __init__(self):
'Constructs a ClassificationTask'
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = (- 1)
self.train_phase_idx = (- 1)
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = 'spawn'
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False | -502,440,207,293,914,560 | Constructs a ClassificationTask | classy_vision/tasks/classification_task.py | __init__ | hahaxun/ClassyVision | python | def __init__(self):
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = (- 1)
self.train_phase_idx = (- 1)
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = 'spawn'
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False |
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
'Sets maximum gradient norm.\n\n None means gradient clipping is disabled. Defaults to None.'
self.clip_grad_norm = clip_grad_norm
if (clip_grad_norm is None):
logging.info('Disabled gradient norm clipping.')
else:
logging.info(f'Enabled gradient norm clipping with threshold: {clip_grad_norm}')
return self | 8,983,691,473,306,001,000 | Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None. | classy_vision/tasks/classification_task.py | set_clip_grad_norm | hahaxun/ClassyVision | python | def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
'Sets maximum gradient norm.\n\n None means gradient clipping is disabled. Defaults to None.'
self.clip_grad_norm = clip_grad_norm
if (clip_grad_norm is None):
logging.info('Disabled gradient norm clipping.')
else:
logging.info(f'Enabled gradient norm clipping with threshold: {clip_grad_norm}')
return self |
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
'Sets a simulated batch size by gradient accumulation.\n\n Gradient accumulation adds up gradients from multiple minibatches and\n steps the optimizer every N train_steps, where N is optimizer_period.\n When enabled, the very last train_steps might end up not updating the\n model, depending on the number of total steps. None means gradient\n accumulation is disabled. Defaults to None.'
self.simulated_global_batchsize = simulated_global_batchsize
return self | 561,482,616,653,683,140 | Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None. | classy_vision/tasks/classification_task.py | set_simulated_global_batchsize | hahaxun/ClassyVision | python | def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
'Sets a simulated batch size by gradient accumulation.\n\n Gradient accumulation adds up gradients from multiple minibatches and\n steps the optimizer every N train_steps, where N is optimizer_period.\n When enabled, the very last train_steps might end up not updating the\n model, depending on the number of total steps. None means gradient\n accumulation is disabled. Defaults to None.'
self.simulated_global_batchsize = simulated_global_batchsize
return self |
def set_checkpoint(self, checkpoint_path: str):
'Sets checkpoint on task.\n\n Args:\n checkpoint_path: The path to load the checkpoint from. Can be a file or a\n directory. See :func:`load_checkpoint` for more information.\n '
self.checkpoint_path = checkpoint_path
return self | -709,116,024,819,137,700 | Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information. | classy_vision/tasks/classification_task.py | set_checkpoint | hahaxun/ClassyVision | python | def set_checkpoint(self, checkpoint_path: str):
'Sets checkpoint on task.\n\n Args:\n checkpoint_path: The path to load the checkpoint from. Can be a file or a\n directory. See :func:`load_checkpoint` for more information.\n '
self.checkpoint_path = checkpoint_path
return self |
def _set_checkpoint_dict(self, checkpoint_dict: Dict[(str, Any)]):
'Sets the checkpoint dict in the task. Only used for testing.\n\n Args:\n checkpoint_dict: A serializable dict representing current task state\n '
self.checkpoint_dict = checkpoint_dict
return self | 8,534,080,152,626,152,000 | Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state | classy_vision/tasks/classification_task.py | _set_checkpoint_dict | hahaxun/ClassyVision | python | def _set_checkpoint_dict(self, checkpoint_dict: Dict[(str, Any)]):
'Sets the checkpoint dict in the task. Only used for testing.\n\n Args:\n checkpoint_dict: A serializable dict representing current task state\n '
self.checkpoint_dict = checkpoint_dict
return self |
def set_num_epochs(self, num_epochs: Union[(int, float)]):
'Set number of epochs to be run.\n\n Args:\n num_epochs: Number of epochs to run task\n '
self.num_epochs = num_epochs
return self | -8,979,083,074,103,443,000 | Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task | classy_vision/tasks/classification_task.py | set_num_epochs | hahaxun/ClassyVision | python | def set_num_epochs(self, num_epochs: Union[(int, float)]):
'Set number of epochs to be run.\n\n Args:\n num_epochs: Number of epochs to run task\n '
self.num_epochs = num_epochs
return self |
def set_test_phase_period(self, test_phase_period: int):
'Set the period of test phase.\n\n Args:\n test_phase_period: The period of test phase\n '
self.test_phase_period = test_phase_period
return self | 6,875,986,525,145,431,000 | Set the period of test phase.
Args:
test_phase_period: The period of test phase | classy_vision/tasks/classification_task.py | set_test_phase_period | hahaxun/ClassyVision | python | def set_test_phase_period(self, test_phase_period: int):
'Set the period of test phase.\n\n Args:\n test_phase_period: The period of test phase\n '
self.test_phase_period = test_phase_period
return self |
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
'Set dataset for phase type on task\n\n Args:\n dataset: ClassyDataset for returning samples.\n phase_type: str must be one of "train" or "test"\n '
assert (phase_type in ['train', 'test']), "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if (phase_type == 'train'):
self.train_phases_per_epoch = getattr(dataset, 'phases_per_epoch', 1)
else:
self._train_only = False
return self | 2,596,033,858,578,315,000 | Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test" | classy_vision/tasks/classification_task.py | set_dataset | hahaxun/ClassyVision | python | def set_dataset(self, dataset: ClassyDataset, phase_type: str):
'Set dataset for phase type on task\n\n Args:\n dataset: ClassyDataset for returning samples.\n phase_type: str must be one of "train" or "test"\n '
assert (phase_type in ['train', 'test']), "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if (phase_type == 'train'):
self.train_phases_per_epoch = getattr(dataset, 'phases_per_epoch', 1)
else:
self._train_only = False
return self |
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"Set the multiprocessing context used by the dataloader.\n\n The context can be either 'spawn', 'fork', 'forkserver' or None (uses the\n default context). See\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context\n for more details."
self.dataloader_mp_context = dataloader_mp_context
return self | -6,788,116,500,497,534,000 | Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details. | classy_vision/tasks/classification_task.py | set_dataloader_mp_context | hahaxun/ClassyVision | python | def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"Set the multiprocessing context used by the dataloader.\n\n The context can be either 'spawn', 'fork', 'forkserver' or None (uses the\n default context). See\n https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context\n for more details."
self.dataloader_mp_context = dataloader_mp_context
return self |
def set_optimizer(self, optimizer: ClassyOptimizer):
'Set optimizer for task\n\n Args:\n optimizer: optimizer for task\n '
self.optimizer = optimizer
return self | -1,356,130,641,651,106,000 | Set optimizer for task
Args:
optimizer: optimizer for task | classy_vision/tasks/classification_task.py | set_optimizer | hahaxun/ClassyVision | python | def set_optimizer(self, optimizer: ClassyOptimizer):
'Set optimizer for task\n\n Args:\n optimizer: optimizer for task\n '
self.optimizer = optimizer
return self |
def set_loss(self, loss: ClassyLoss):
'Set loss function for task\n\n Args:\n loss: loss for task\n '
self.base_loss = loss
return self | -8,139,487,164,818,706,000 | Set loss function for task
Args:
loss: loss for task | classy_vision/tasks/classification_task.py | set_loss | hahaxun/ClassyVision | python | def set_loss(self, loss: ClassyLoss):
'Set loss function for task\n\n Args:\n loss: loss for task\n '
self.base_loss = loss
return self |
def set_meters(self, meters: List['ClassyMeter']):
'Set meters for task\n\n Args:\n meters: list of meters to compute during training\n '
self.meters = meters
return self | -7,888,962,777,615,976,000 | Set meters for task
Args:
meters: list of meters to compute during training | classy_vision/tasks/classification_task.py | set_meters | hahaxun/ClassyVision | python | def set_meters(self, meters: List['ClassyMeter']):
'Set meters for task\n\n Args:\n meters: list of meters to compute during training\n '
self.meters = meters
return self |
def set_distributed_options(self, broadcast_buffers_mode: BroadcastBuffersMode=BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode=BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int=0, find_unused_parameters: bool=False, bucket_cap_mb: int=25, fp16_grad_compress: bool=False):
'Set distributed options.\n\n Args:\n broadcast_buffers_mode: Broadcast buffers mode. See\n :class:`BroadcastBuffersMode` for options.\n batch_norm_sync_mode: Batch normalization synchronization mode. See\n :class:`BatchNormSyncMode` for options.\n batch_norm_sync_group_size: Group size to use for synchronized batch norm.\n 0 means that the stats are synchronized across all replicas. For\n efficient synchronization, set it to the number of GPUs in a node (\n usually 8).\n find_unused_parameters: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n bucket_cap_mb: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n Raises:\n RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex\n is not installed.\n '
self.broadcast_buffers_mode = broadcast_buffers_mode
if (batch_norm_sync_group_size > 0):
if (not (batch_norm_sync_mode == BatchNormSyncMode.APEX)):
raise ValueError('batch_norm_sync_group_size can be > 0 only when Apex Synchronized Batch Normalization is being used.')
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if (batch_norm_sync_mode == BatchNormSyncMode.DISABLED):
logging.info('Synchronized Batch Normalization is disabled')
else:
if ((batch_norm_sync_mode == BatchNormSyncMode.APEX) and (not apex_available)):
raise RuntimeError('apex is not installed')
msg = f'Using Synchronized Batch Normalization using {batch_norm_sync_mode}'
if (self.batch_norm_sync_group_size > 0):
msg += f' and group size {batch_norm_sync_group_size}'
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info('Enabling find_unused_parameters in DDP')
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if (get_torch_version() < [1, 8, 0]):
raise RuntimeError('FP16 grad compression is only supported since PyTorch 1.8')
logging.info('Enabling FP16 grad compression')
self.fp16_grad_compress = fp16_grad_compress
return self | 8,815,734,520,811,869,000 | Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed. | classy_vision/tasks/classification_task.py | set_distributed_options | hahaxun/ClassyVision | python | def set_distributed_options(self, broadcast_buffers_mode: BroadcastBuffersMode=BroadcastBuffersMode.BEFORE_EVAL, batch_norm_sync_mode: BatchNormSyncMode=BatchNormSyncMode.DISABLED, batch_norm_sync_group_size: int=0, find_unused_parameters: bool=False, bucket_cap_mb: int=25, fp16_grad_compress: bool=False):
'Set distributed options.\n\n Args:\n broadcast_buffers_mode: Broadcast buffers mode. See\n :class:`BroadcastBuffersMode` for options.\n batch_norm_sync_mode: Batch normalization synchronization mode. See\n :class:`BatchNormSyncMode` for options.\n batch_norm_sync_group_size: Group size to use for synchronized batch norm.\n 0 means that the stats are synchronized across all replicas. For\n efficient synchronization, set it to the number of GPUs in a node (\n usually 8).\n find_unused_parameters: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n bucket_cap_mb: See\n :class:`torch.nn.parallel.DistributedDataParallel` for information.\n Raises:\n RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex\n is not installed.\n '
self.broadcast_buffers_mode = broadcast_buffers_mode
if (batch_norm_sync_group_size > 0):
if (not (batch_norm_sync_mode == BatchNormSyncMode.APEX)):
raise ValueError('batch_norm_sync_group_size can be > 0 only when Apex Synchronized Batch Normalization is being used.')
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if (batch_norm_sync_mode == BatchNormSyncMode.DISABLED):
logging.info('Synchronized Batch Normalization is disabled')
else:
if ((batch_norm_sync_mode == BatchNormSyncMode.APEX) and (not apex_available)):
raise RuntimeError('apex is not installed')
msg = f'Using Synchronized Batch Normalization using {batch_norm_sync_mode}'
if (self.batch_norm_sync_group_size > 0):
msg += f' and group size {batch_norm_sync_group_size}'
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info('Enabling find_unused_parameters in DDP')
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if (get_torch_version() < [1, 8, 0]):
raise RuntimeError('FP16 grad compression is only supported since PyTorch 1.8')
logging.info('Enabling FP16 grad compression')
self.fp16_grad_compress = fp16_grad_compress
return self |
def set_hooks(self, hooks: List['ClassyHook']):
'Set hooks for task\n\n Args:\n hooks: List of hooks to apply during training\n '
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all((isinstance(hook, ClassyHook) for hook in hooks))
assert (len({hook.name() for hook in hooks}) == len(hooks)), 'Cannot have repeated hooks of the same class'
non_checkpoint_hooks = [hook for hook in hooks if (not isinstance(hook, CheckpointHook))]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = (non_checkpoint_hooks + checkpoint_hooks)
self.hooks = hooks
return self | -961,870,791,464,746,500 | Set hooks for task
Args:
hooks: List of hooks to apply during training | classy_vision/tasks/classification_task.py | set_hooks | hahaxun/ClassyVision | python | def set_hooks(self, hooks: List['ClassyHook']):
'Set hooks for task\n\n Args:\n hooks: List of hooks to apply during training\n '
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all((isinstance(hook, ClassyHook) for hook in hooks))
assert (len({hook.name() for hook in hooks}) == len(hooks)), 'Cannot have repeated hooks of the same class'
non_checkpoint_hooks = [hook for hook in hooks if (not isinstance(hook, CheckpointHook))]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = (non_checkpoint_hooks + checkpoint_hooks)
self.hooks = hooks
return self |
def set_model(self, model: ClassyModel):
'Set model for task\n\n Args:\n model: Model to be trained\n '
self.base_model = model
return self | 6,614,835,632,053,417,000 | Set model for task
Args:
model: Model to be trained | classy_vision/tasks/classification_task.py | set_model | hahaxun/ClassyVision | python | def set_model(self, model: ClassyModel):
'Set model for task\n\n Args:\n model: Model to be trained\n '
self.base_model = model
return self |
def set_test_only(self, test_only: bool):
'Set test only flag\n\n Args:\n test_only: If true, only test phases will be run\n '
self.test_only = test_only
return self | -7,330,165,197,773,054,000 | Set test only flag
Args:
test_only: If true, only test phases will be run | classy_vision/tasks/classification_task.py | set_test_only | hahaxun/ClassyVision | python | def set_test_only(self, test_only: bool):
'Set test only flag\n\n Args:\n test_only: If true, only test phases will be run\n '
self.test_only = test_only
return self |
def set_amp_args(self, amp_args: Optional[Dict[(str, Any)]]):
'Disable / enable apex.amp and set the automatic mixed precision parameters.\n\n apex.amp can be utilized for mixed / half precision training.\n\n Args:\n amp_args: Dictionary containing arguments to be passed to\n amp.initialize. Set to None to disable amp. To enable mixed\n precision training, pass amp_args={"opt_level": "O1"} here.\n See https://nvidia.github.io/apex/amp.html for more info.\n\n Raises:\n RuntimeError: If opt_level is not None and apex is not installed.\n\n Warning: apex needs to be installed to utilize this feature.\n '
self.amp_args = amp_args
if (amp_args is None):
logging.info('AMP disabled')
else:
try:
self.amp_type = AmpType[self.amp_args['amp_type'].upper()]
except KeyError:
logging.info('AMP type not specified, defaulting to Apex')
self.amp_type = AmpType.APEX
if (not torch.cuda.is_available()):
raise RuntimeError('AMP is required but CUDA is not supported, cannot enable AMP')
if ((self.amp_type == AmpType.APEX) and (not apex_available)):
raise RuntimeError('Apex AMP is required but Apex is not installed, cannot enable AMP')
if self.use_sharded_ddp:
if (self.amp_type == AmpType.APEX):
raise RuntimeError('ShardedDDP has been requested, which is incompatible with Apex AMP')
if (not fairscale_available):
raise RuntimeError('ShardedDDP has been requested, but fairscale is not installed in the current environment')
elif (self.amp_type == AmpType.PYTORCH):
if self.use_sharded_ddp:
logging.info('Using ShardedGradScaler to manage Pytorch AMP')
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f'AMP enabled with args {amp_args}')
return self | 5,959,304,053,381,014,000 | Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature. | classy_vision/tasks/classification_task.py | set_amp_args | hahaxun/ClassyVision | python | def set_amp_args(self, amp_args: Optional[Dict[(str, Any)]]):
'Disable / enable apex.amp and set the automatic mixed precision parameters.\n\n apex.amp can be utilized for mixed / half precision training.\n\n Args:\n amp_args: Dictionary containing arguments to be passed to\n amp.initialize. Set to None to disable amp. To enable mixed\n precision training, pass amp_args={"opt_level": "O1"} here.\n See https://nvidia.github.io/apex/amp.html for more info.\n\n Raises:\n RuntimeError: If opt_level is not None and apex is not installed.\n\n Warning: apex needs to be installed to utilize this feature.\n '
self.amp_args = amp_args
if (amp_args is None):
logging.info('AMP disabled')
else:
try:
self.amp_type = AmpType[self.amp_args['amp_type'].upper()]
except KeyError:
logging.info('AMP type not specified, defaulting to Apex')
self.amp_type = AmpType.APEX
if (not torch.cuda.is_available()):
raise RuntimeError('AMP is required but CUDA is not supported, cannot enable AMP')
if ((self.amp_type == AmpType.APEX) and (not apex_available)):
raise RuntimeError('Apex AMP is required but Apex is not installed, cannot enable AMP')
if self.use_sharded_ddp:
if (self.amp_type == AmpType.APEX):
raise RuntimeError('ShardedDDP has been requested, which is incompatible with Apex AMP')
if (not fairscale_available):
raise RuntimeError('ShardedDDP has been requested, but fairscale is not installed in the current environment')
elif (self.amp_type == AmpType.PYTORCH):
if self.use_sharded_ddp:
logging.info('Using ShardedGradScaler to manage Pytorch AMP')
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f'AMP enabled with args {amp_args}')
return self |
def set_mixup_transform(self, mixup_transform: Optional['MixupTransform']):
'Disable / enable mixup transform for data augmentation\n\n Args::\n mixup_transform: a callable object which performs mixup data augmentation\n '
self.mixup_transform = mixup_transform
if (mixup_transform is None):
logging.info('mixup disabled')
else:
logging.info('mixup enabled')
return self | -1,817,254,112,740,007,400 | Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation | classy_vision/tasks/classification_task.py | set_mixup_transform | hahaxun/ClassyVision | python | def set_mixup_transform(self, mixup_transform: Optional['MixupTransform']):
'Disable / enable mixup transform for data augmentation\n\n Args::\n mixup_transform: a callable object which performs mixup data augmentation\n '
self.mixup_transform = mixup_transform
if (mixup_transform is None):
logging.info('mixup disabled')
else:
logging.info('mixup enabled')
return self |
@classmethod
def from_config(cls, config: Dict[(str, Any)]) -> 'ClassificationTask':
'Instantiates a ClassificationTask from a configuration.\n\n Args:\n config: A configuration for a ClassificationTask.\n See :func:`__init__` for parameters expected in the config.\n\n Returns:\n A ClassificationTask instance.\n '
test_only = config.get('test_only', False)
if (not test_only):
train_phases_per_epoch = config['dataset']['train'].get('phases_per_epoch', 1)
optimizer_config = config['optimizer']
optimizer_config['num_epochs'] = (config['num_epochs'] * train_phases_per_epoch)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ['train', 'test']
for phase_type in phase_types:
if (phase_type in config['dataset']):
datasets[phase_type] = build_dataset(config['dataset'][phase_type])
loss = build_loss(config['loss'])
amp_args = config.get('amp_args')
meters = build_meters(config.get('meters', {}))
model = build_model(config['model'])
mixup_transform = None
if (config.get('mixup') is not None):
assert ('alpha' in config['mixup']), 'key alpha is missing in mixup dict'
mixup_transform = MixupTransform(config['mixup']['alpha'], config['mixup'].get('num_classes'))
hooks_config = config.get('hooks')
hooks = []
if (hooks_config is not None):
hooks = build_hooks(hooks_config)
distributed_config = config.get('distributed', {})
distributed_options = {'broadcast_buffers_mode': BroadcastBuffersMode[distributed_config.get('broadcast_buffers', 'before_eval').upper()], 'batch_norm_sync_mode': BatchNormSyncMode[distributed_config.get('batch_norm_sync_mode', 'disabled').upper()], 'batch_norm_sync_group_size': distributed_config.get('batch_norm_sync_group_size', 0), 'find_unused_parameters': distributed_config.get('find_unused_parameters', False), 'bucket_cap_mb': distributed_config.get('bucket_cap_mb', 25), 'fp16_grad_compress': distributed_config.get('fp16_grad_compress', False)}
task = cls().set_num_epochs(config['num_epochs']).set_test_phase_period(config.get('test_phase_period', 1)).set_loss(loss).set_test_only(test_only).set_model(model).set_meters(meters).set_amp_args(amp_args).set_mixup_transform(mixup_transform).set_distributed_options(**distributed_options).set_hooks(hooks).set_bn_weight_decay(config.get('bn_weight_decay', False)).set_clip_grad_norm(config.get('clip_grad_norm')).set_simulated_global_batchsize(config.get('simulated_global_batchsize')).set_use_sharded_ddp(config.get('use_sharded_ddp', False))
if (not test_only):
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get('use_gpu')
if (use_gpu is not None):
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
task._config = config
return task | -1,422,442,786,474,634,500 | Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance. | classy_vision/tasks/classification_task.py | from_config | hahaxun/ClassyVision | python | @classmethod
def from_config(cls, config: Dict[(str, Any)]) -> 'ClassificationTask':
'Instantiates a ClassificationTask from a configuration.\n\n Args:\n config: A configuration for a ClassificationTask.\n See :func:`__init__` for parameters expected in the config.\n\n Returns:\n A ClassificationTask instance.\n '
test_only = config.get('test_only', False)
if (not test_only):
train_phases_per_epoch = config['dataset']['train'].get('phases_per_epoch', 1)
optimizer_config = config['optimizer']
optimizer_config['num_epochs'] = (config['num_epochs'] * train_phases_per_epoch)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ['train', 'test']
for phase_type in phase_types:
if (phase_type in config['dataset']):
datasets[phase_type] = build_dataset(config['dataset'][phase_type])
loss = build_loss(config['loss'])
amp_args = config.get('amp_args')
meters = build_meters(config.get('meters', {}))
model = build_model(config['model'])
mixup_transform = None
if (config.get('mixup') is not None):
assert ('alpha' in config['mixup']), 'key alpha is missing in mixup dict'
mixup_transform = MixupTransform(config['mixup']['alpha'], config['mixup'].get('num_classes'))
hooks_config = config.get('hooks')
hooks = []
if (hooks_config is not None):
hooks = build_hooks(hooks_config)
distributed_config = config.get('distributed', {})
distributed_options = {'broadcast_buffers_mode': BroadcastBuffersMode[distributed_config.get('broadcast_buffers', 'before_eval').upper()], 'batch_norm_sync_mode': BatchNormSyncMode[distributed_config.get('batch_norm_sync_mode', 'disabled').upper()], 'batch_norm_sync_group_size': distributed_config.get('batch_norm_sync_group_size', 0), 'find_unused_parameters': distributed_config.get('find_unused_parameters', False), 'bucket_cap_mb': distributed_config.get('bucket_cap_mb', 25), 'fp16_grad_compress': distributed_config.get('fp16_grad_compress', False)}
task = cls().set_num_epochs(config['num_epochs']).set_test_phase_period(config.get('test_phase_period', 1)).set_loss(loss).set_test_only(test_only).set_model(model).set_meters(meters).set_amp_args(amp_args).set_mixup_transform(mixup_transform).set_distributed_options(**distributed_options).set_hooks(hooks).set_bn_weight_decay(config.get('bn_weight_decay', False)).set_clip_grad_norm(config.get('clip_grad_norm')).set_simulated_global_batchsize(config.get('simulated_global_batchsize')).set_use_sharded_ddp(config.get('use_sharded_ddp', False))
if (not test_only):
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get('use_gpu')
if (use_gpu is not None):
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
task._config = config
return task |
@property
def num_batches_per_phase(self):
'Returns number of batches in current phase iterator'
return len(self.data_iterator) | -6,139,086,927,270,886,000 | Returns number of batches in current phase iterator | classy_vision/tasks/classification_task.py | num_batches_per_phase | hahaxun/ClassyVision | python | @property
def num_batches_per_phase(self):
return len(self.data_iterator) |
@property
def model(self):
'Returns model used in training (can be wrapped with DDP)'
return (self.distributed_model if is_distributed_training_run() else self.base_model) | 5,909,357,874,804,241,000 | Returns model used in training (can be wrapped with DDP) | classy_vision/tasks/classification_task.py | model | hahaxun/ClassyVision | python | @property
def model(self):
return (self.distributed_model if is_distributed_training_run() else self.base_model) |
@property
def loss(self):
'Returns loss used in training (can be wrapped with DDP)'
return (self.distributed_loss if self.distributed_loss else self.base_loss) | 542,399,534,204,788,100 | Returns loss used in training (can be wrapped with DDP) | classy_vision/tasks/classification_task.py | loss | hahaxun/ClassyVision | python | @property
def loss(self):
return (self.distributed_loss if self.distributed_loss else self.base_loss) |
@property
def phase_type(self):
'Returns current phase type. String with value "train" or "test" '
return ('train' if self.train else 'test') | -674,432,275,946,742,700 | Returns current phase type. String with value "train" or "test" | classy_vision/tasks/classification_task.py | phase_type | hahaxun/ClassyVision | python | @property
def phase_type(self):
' '
return ('train' if self.train else 'test') |
@property
def eval_phase_idx(self):
'Returns current evaluation phase'
return ((self.phase_idx - self.train_phase_idx) - 1) | -3,803,939,708,086,919,700 | Returns current evaluation phase | classy_vision/tasks/classification_task.py | eval_phase_idx | hahaxun/ClassyVision | python | @property
def eval_phase_idx(self):
return ((self.phase_idx - self.train_phase_idx) - 1) |
def get_total_training_phases(self):
'\n Returns the total number of "train" phases in the task\n '
num_training_phases = 0
for phase in self.phases:
if (phase['train'] is True):
num_training_phases += 1
return num_training_phases | 2,032,511,598,330,732,500 | Returns the total number of "train" phases in the task | classy_vision/tasks/classification_task.py | get_total_training_phases | hahaxun/ClassyVision | python | def get_total_training_phases(self):
'\n \n '
num_training_phases = 0
for phase in self.phases:
if (phase['train'] is True):
num_training_phases += 1
return num_training_phases |
def get_total_test_phases(self):
'\n Returns the total number of "test" phases in the task\n '
num_test_phases = 0
for phase in self.phases:
if (phase['train'] is False):
num_test_phases += 1
return num_test_phases | -7,734,286,215,081,280,000 | Returns the total number of "test" phases in the task | classy_vision/tasks/classification_task.py | get_total_test_phases | hahaxun/ClassyVision | python | def get_total_test_phases(self):
'\n \n '
num_test_phases = 0
for phase in self.phases:
if (phase['train'] is False):
num_test_phases += 1
return num_test_phases |
def _build_phases(self):
'Returns list of phases from config.\n\n These phases will look like:\n {\n train: is this a train or test phase?\n optimizer: optimizer settings\n }\n\n - If this is a test only run, then only test phases will be\n generated\n - If this is a training run with both train and test datasets, then x phases =\n x train phases + x test phases, interleaved. If test_phase_period > 1, test\n phases are only added after test_phase_period train phases. The last phase is\n always a test phase.\n - If this is a training run with only a train dataset, then x phases = x train\n phases.\n '
if (not self.test_only):
phases = [{'train': True} for _ in range(math.ceil((self.train_phases_per_epoch * self.num_epochs)))]
if self._train_only:
return phases
final_phases = []
for (i, phase) in enumerate(phases):
final_phases.append(phase)
if (((i + 1) % self.test_phase_period) == 0):
final_phases.append({'train': False})
if final_phases[(- 1)]['train']:
final_phases.append({'train': False})
return final_phases
return [{'train': False} for _ in range(self.num_epochs)] | -5,931,279,902,887,682,000 | Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases. | classy_vision/tasks/classification_task.py | _build_phases | hahaxun/ClassyVision | python | def _build_phases(self):
'Returns list of phases from config.\n\n These phases will look like:\n {\n train: is this a train or test phase?\n optimizer: optimizer settings\n }\n\n - If this is a test only run, then only test phases will be\n generated\n - If this is a training run with both train and test datasets, then x phases =\n x train phases + x test phases, interleaved. If test_phase_period > 1, test\n phases are only added after test_phase_period train phases. The last phase is\n always a test phase.\n - If this is a training run with only a train dataset, then x phases = x train\n phases.\n '
if (not self.test_only):
phases = [{'train': True} for _ in range(math.ceil((self.train_phases_per_epoch * self.num_epochs)))]
if self._train_only:
return phases
final_phases = []
for (i, phase) in enumerate(phases):
final_phases.append(phase)
if (((i + 1) % self.test_phase_period) == 0):
final_phases.append({'train': False})
if final_phases[(- 1)]['train']:
final_phases.append({'train': False})
return final_phases
return [{'train': False} for _ in range(self.num_epochs)] |
def build_dataloader_from_dataset(self, dataset, **kwargs):
'Builds a dataloader from the provided dataset\n\n Args:\n dataset: A ClassyDataset\n kwargs: Additional kwargs to pass during dataloader construction for\n derived classes\n '
return dataset.iterator(phase_type=self.phase_type, current_phase_id=(self.train_phase_idx if self.train else 0), pin_memory=(self.use_gpu and (torch.cuda.device_count() > 1)), multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs) | -217,371,196,178,296,260 | Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes | classy_vision/tasks/classification_task.py | build_dataloader_from_dataset | hahaxun/ClassyVision | python | def build_dataloader_from_dataset(self, dataset, **kwargs):
'Builds a dataloader from the provided dataset\n\n Args:\n dataset: A ClassyDataset\n kwargs: Additional kwargs to pass during dataloader construction for\n derived classes\n '
return dataset.iterator(phase_type=self.phase_type, current_phase_id=(self.train_phase_idx if self.train else 0), pin_memory=(self.use_gpu and (torch.cuda.device_count() > 1)), multiprocessing_context=mp.get_context(self.dataloader_mp_context), **kwargs) |
def build_dataloaders_for_current_phase(self):
'Builds dataloader(s) for the current phase.\n\n Deriving classes can override this method to support custom behavior, like\n supporting multiple dataloaders in parallel.\n '
self.dataloader = self.build_dataloader_from_dataset(self.datasets[self.phase_type]) | 1,567,424,470,600,948,500 | Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel. | classy_vision/tasks/classification_task.py | build_dataloaders_for_current_phase | hahaxun/ClassyVision | python | def build_dataloaders_for_current_phase(self):
'Builds dataloader(s) for the current phase.\n\n Deriving classes can override this method to support custom behavior, like\n supporting multiple dataloaders in parallel.\n '
self.dataloader = self.build_dataloader_from_dataset(self.datasets[self.phase_type]) |
def prepare(self):
'Prepares task for training, populates all derived attributes '
self.phases = self._build_phases()
self.train = (False if self.test_only else self.train)
if (self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH):
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif (self.batch_norm_sync_mode == BatchNormSyncMode.APEX):
sync_bn_process_group = apex.parallel.create_syncbn_process_group(self.batch_norm_sync_group_size)
self.base_model = apex.parallel.convert_syncbn_model(self.base_model, process_group=sync_bn_process_group)
if self.use_gpu:
(self.base_model, self.base_loss) = copy_model_to_gpu(self.base_model, self.base_loss)
else:
self.base_loss.cpu()
self.base_model.cpu()
if (self.optimizer is not None):
self.prepare_optimizer(optimizer=self.optimizer, model=self.base_model, loss=self.base_loss)
if (self.amp_args is not None):
if (self.amp_type == AmpType.APEX):
if (self.optimizer is None):
self.base_model = apex.amp.initialize(self.base_model, optimizers=None, **self.amp_args)
else:
(self.base_model, self.optimizer.optimizer) = apex.amp.initialize(self.base_model, self.optimizer.optimizer, **self.amp_args)
if (self.simulated_global_batchsize is not None):
if ((self.simulated_global_batchsize % self.get_global_batchsize()) != 0):
raise ValueError(f'Global batch size ({self.get_global_batchsize()}) must divide simulated_global_batchsize ({self.simulated_global_batchsize})')
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (self.simulated_global_batchsize // self.get_global_batchsize())
if (self.optimizer_period > 1):
logging.info(f'Using gradient accumulation with a period of {self.optimizer_period}')
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (None if (self.checkpoint_dict is None) else self.checkpoint_dict['classy_state_dict'])
if (classy_state_dict is not None):
state_load_success = update_classy_state(self, classy_state_dict)
assert state_load_success, 'Update classy state from checkpoint was unsuccessful.'
self.init_distributed_data_parallel_model() | 3,907,489,649,831,067,600 | Prepares task for training, populates all derived attributes | classy_vision/tasks/classification_task.py | prepare | hahaxun/ClassyVision | python | def prepare(self):
' '
self.phases = self._build_phases()
self.train = (False if self.test_only else self.train)
if (self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH):
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif (self.batch_norm_sync_mode == BatchNormSyncMode.APEX):
sync_bn_process_group = apex.parallel.create_syncbn_process_group(self.batch_norm_sync_group_size)
self.base_model = apex.parallel.convert_syncbn_model(self.base_model, process_group=sync_bn_process_group)
if self.use_gpu:
(self.base_model, self.base_loss) = copy_model_to_gpu(self.base_model, self.base_loss)
else:
self.base_loss.cpu()
self.base_model.cpu()
if (self.optimizer is not None):
self.prepare_optimizer(optimizer=self.optimizer, model=self.base_model, loss=self.base_loss)
if (self.amp_args is not None):
if (self.amp_type == AmpType.APEX):
if (self.optimizer is None):
self.base_model = apex.amp.initialize(self.base_model, optimizers=None, **self.amp_args)
else:
(self.base_model, self.optimizer.optimizer) = apex.amp.initialize(self.base_model, self.optimizer.optimizer, **self.amp_args)
if (self.simulated_global_batchsize is not None):
if ((self.simulated_global_batchsize % self.get_global_batchsize()) != 0):
raise ValueError(f'Global batch size ({self.get_global_batchsize()}) must divide simulated_global_batchsize ({self.simulated_global_batchsize})')
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (self.simulated_global_batchsize // self.get_global_batchsize())
if (self.optimizer_period > 1):
logging.info(f'Using gradient accumulation with a period of {self.optimizer_period}')
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (None if (self.checkpoint_dict is None) else self.checkpoint_dict['classy_state_dict'])
if (classy_state_dict is not None):
state_load_success = update_classy_state(self, classy_state_dict)
assert state_load_success, 'Update classy state from checkpoint was unsuccessful.'
self.init_distributed_data_parallel_model() |
def init_distributed_data_parallel_model(self):
'\n Initialize\n `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/\n docs/stable/nn.html#distributeddataparallel>`_.\n\n Needed for distributed training. This is where a model should be wrapped by DDP.\n '
if (not is_distributed_training_run()):
return
assert (self.distributed_model is None), 'init_ddp_non_elastic must only be called once'
broadcast_buffers = (self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS)
if self.use_sharded_ddp:
if (not isinstance(self.optimizer, ZeRO)):
raise ValueError('ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer')
from fairscale.nn.data_parallel import ShardedDataParallel
self.distributed_model = ShardedDataParallel(module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers)
else:
self.distributed_model = init_distributed_data_parallel_model(self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
process_group = None
self.distributed_model.register_comm_hook(process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook)
if (isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters()):
logging.info('Initializing distributed loss')
self.distributed_loss = init_distributed_data_parallel_model(self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb) | 4,334,758,329,578,739,000 | Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP. | classy_vision/tasks/classification_task.py | init_distributed_data_parallel_model | hahaxun/ClassyVision | python | def init_distributed_data_parallel_model(self):
'\n Initialize\n `torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/\n docs/stable/nn.html#distributeddataparallel>`_.\n\n Needed for distributed training. This is where a model should be wrapped by DDP.\n '
if (not is_distributed_training_run()):
return
assert (self.distributed_model is None), 'init_ddp_non_elastic must only be called once'
broadcast_buffers = (self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS)
if self.use_sharded_ddp:
if (not isinstance(self.optimizer, ZeRO)):
raise ValueError('ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer')
from fairscale.nn.data_parallel import ShardedDataParallel
self.distributed_model = ShardedDataParallel(module=self.base_model, sharded_optimizer=self.optimizer.optimizer, broadcast_buffers=broadcast_buffers)
else:
self.distributed_model = init_distributed_data_parallel_model(self.base_model, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
process_group = None
self.distributed_model.register_comm_hook(process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook)
if (isinstance(self.base_loss, ClassyLoss) and self.base_loss.has_learned_parameters()):
logging.info('Initializing distributed loss')
self.distributed_loss = init_distributed_data_parallel_model(self.base_loss, broadcast_buffers=broadcast_buffers, find_unused_parameters=self.find_unused_parameters, bucket_cap_mb=self.ddp_bucket_cap_mb) |
@property
def where(self):
'Returns the proportion of training that has completed. If in test\n only mode, returns proportion of testing completed\n\n Returned value is a float in the range [0, 1)\n '
current_step = (self.num_updates / self.get_global_batchsize())
num_phases = (self.get_total_test_phases() if self.test_only else self.get_total_training_phases())
if (self.num_batches_per_phase <= 0):
raise RuntimeError('No batches to read. Is the dataset empty?')
num_steps = (num_phases * self.num_batches_per_phase)
where = (current_step / num_steps)
return where | 6,274,725,735,764,194,000 | Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1) | classy_vision/tasks/classification_task.py | where | hahaxun/ClassyVision | python | @property
def where(self):
'Returns the proportion of training that has completed. If in test\n only mode, returns proportion of testing completed\n\n Returned value is a float in the range [0, 1)\n '
current_step = (self.num_updates / self.get_global_batchsize())
num_phases = (self.get_total_test_phases() if self.test_only else self.get_total_training_phases())
if (self.num_batches_per_phase <= 0):
raise RuntimeError('No batches to read. Is the dataset empty?')
num_steps = (num_phases * self.num_batches_per_phase)
where = (current_step / num_steps)
return where |
def get_classy_state(self, deep_copy: bool=False):
'Returns serialiable state of task\n\n Args:\n deep_copy: If true, does a deep copy of state before returning.\n '
optimizer_state = {}
if (self.optimizer is not None):
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {'train': self.train, 'base_model': self.base_model.get_classy_state(), 'meters': [meter.get_classy_state() for meter in self.meters], 'optimizer': optimizer_state, 'phase_idx': self.phase_idx, 'train_phase_idx': self.train_phase_idx, 'num_updates': self.num_updates, 'losses': self.losses, 'hooks': {hook.name(): hook.get_classy_state() for hook in self.hooks}, 'loss': {}}
if (('train' in self.datasets) and self._is_checkpointable_dataset(self.datasets['train'])):
classy_state_dict['train_dataset_iterator'] = self.datasets['train'].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict['loss'] = self.base_loss.get_classy_state()
if (self.amp_args is not None):
if (self.amp_type == AmpType.APEX):
classy_state_dict['amp'] = apex.amp.state_dict()
elif (self.amp_grad_scaler is not None):
classy_state_dict['amp'] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict | 3,085,759,520,983,295,000 | Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning. | classy_vision/tasks/classification_task.py | get_classy_state | hahaxun/ClassyVision | python | def get_classy_state(self, deep_copy: bool=False):
'Returns serialiable state of task\n\n Args:\n deep_copy: If true, does a deep copy of state before returning.\n '
optimizer_state = {}
if (self.optimizer is not None):
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {'train': self.train, 'base_model': self.base_model.get_classy_state(), 'meters': [meter.get_classy_state() for meter in self.meters], 'optimizer': optimizer_state, 'phase_idx': self.phase_idx, 'train_phase_idx': self.train_phase_idx, 'num_updates': self.num_updates, 'losses': self.losses, 'hooks': {hook.name(): hook.get_classy_state() for hook in self.hooks}, 'loss': {}}
if (('train' in self.datasets) and self._is_checkpointable_dataset(self.datasets['train'])):
classy_state_dict['train_dataset_iterator'] = self.datasets['train'].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict['loss'] = self.base_loss.get_classy_state()
if (self.amp_args is not None):
if (self.amp_type == AmpType.APEX):
classy_state_dict['amp'] = apex.amp.state_dict()
elif (self.amp_grad_scaler is not None):
classy_state_dict['amp'] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict |
def set_classy_state(self, state):
'Set task state\n\n Args:\n state: Dict containing state of a task\n '
self.train = (False if self.test_only else state['train'])
if (not self.test_only):
self.phase_idx = state['phase_idx']
self.num_updates = state['num_updates']
self.train_phase_idx = state['train_phase_idx']
self.losses = state['losses']
for (meter, meter_state) in zip(self.meters, state['meters']):
meter.set_classy_state(meter_state)
self.base_model.set_classy_state(state['base_model'])
if (self.optimizer is not None):
self.optimizer.set_classy_state(state['optimizer'])
if (state.get('loss') and isinstance(self.base_loss, ClassyLoss)):
self.base_loss.set_classy_state(state['loss'])
if ('amp' in state):
if (self.amp_type == AmpType.APEX):
apex.amp.load_state_dict(state['amp'])
else:
self.amp_grad_scaler.load_state_dict(state['amp'])
for hook in self.hooks:
if (hook.name() in state['hooks']):
hook.set_classy_state(state['hooks'][hook.name()])
else:
logging.warning(f'No state found for hook: {hook.name()}')
if (('train' in self.datasets) and self._is_checkpointable_dataset(self.datasets['train'])):
self.datasets['train'].set_classy_state(state.get('train_dataset_iterator')) | -4,464,662,588,784,485,000 | Set task state
Args:
state: Dict containing state of a task | classy_vision/tasks/classification_task.py | set_classy_state | hahaxun/ClassyVision | python | def set_classy_state(self, state):
'Set task state\n\n Args:\n state: Dict containing state of a task\n '
self.train = (False if self.test_only else state['train'])
if (not self.test_only):
self.phase_idx = state['phase_idx']
self.num_updates = state['num_updates']
self.train_phase_idx = state['train_phase_idx']
self.losses = state['losses']
for (meter, meter_state) in zip(self.meters, state['meters']):
meter.set_classy_state(meter_state)
self.base_model.set_classy_state(state['base_model'])
if (self.optimizer is not None):
self.optimizer.set_classy_state(state['optimizer'])
if (state.get('loss') and isinstance(self.base_loss, ClassyLoss)):
self.base_loss.set_classy_state(state['loss'])
if ('amp' in state):
if (self.amp_type == AmpType.APEX):
apex.amp.load_state_dict(state['amp'])
else:
self.amp_grad_scaler.load_state_dict(state['amp'])
for hook in self.hooks:
if (hook.name() in state['hooks']):
hook.set_classy_state(state['hooks'][hook.name()])
else:
logging.warning(f'No state found for hook: {hook.name()}')
if (('train' in self.datasets) and self._is_checkpointable_dataset(self.datasets['train'])):
self.datasets['train'].set_classy_state(state.get('train_dataset_iterator')) |
def _should_do_step(self):
'Tells if we will be performing an optimizer step.\n\n Returns True always if there is no gradient accumulation. With gradient\n accumulation returns True only when the gradients will be synchronized and we\n will be performing an optimizer step.\n '
update_idx = (self.num_updates // self.get_global_batchsize())
return ((update_idx % self.optimizer_period) == (self.optimizer_period - 1)) | 7,095,668,484,539,073,000 | Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step. | classy_vision/tasks/classification_task.py | _should_do_step | hahaxun/ClassyVision | python | def _should_do_step(self):
'Tells if we will be performing an optimizer step.\n\n Returns True always if there is no gradient accumulation. With gradient\n accumulation returns True only when the gradients will be synchronized and we\n will be performing an optimizer step.\n '
update_idx = (self.num_updates // self.get_global_batchsize())
return ((update_idx % self.optimizer_period) == (self.optimizer_period - 1)) |
def train_step(self):
'Train step to be executed in train loop.'
self.last_batch = None
with Timer() as timer:
sample = next(self.data_iterator)
assert (isinstance(sample, dict) and ('input' in sample) and ('target' in sample)), (f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys")
target = sample['target']
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if (self.mixup_transform is not None):
sample = self.mixup_transform(sample)
torch_amp_context = (torch.cuda.amp.autocast() if (self.amp_type == AmpType.PYTORCH) else contextlib.suppress())
do_step = self._should_do_step()
ctx_mgr_model = (self.distributed_model.no_sync() if ((self.distributed_model is not None) and (not do_step)) else contextlib.suppress())
ctx_mgr_loss = (self.distributed_loss.no_sync() if ((self.distributed_loss is not None) and (not do_step)) else contextlib.suppress())
with ctx_mgr_model, ctx_mgr_loss:
with torch.enable_grad(), torch_amp_context:
output = self.model(sample['input'])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append((loss.data.cpu().item() * target.size(0)))
self.update_meters(output, sample)
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
self.last_batch = LastBatchInfo(loss=loss, output=output, target=target, sample=sample, step_data={'sample_fetch_time': timer.elapsed_time}) | 1,406,572,901,275,031,300 | Train step to be executed in train loop. | classy_vision/tasks/classification_task.py | train_step | hahaxun/ClassyVision | python | def train_step(self):
self.last_batch = None
with Timer() as timer:
sample = next(self.data_iterator)
assert (isinstance(sample, dict) and ('input' in sample) and ('target' in sample)), (f"Returned sample [{sample}] is not a map with 'input' and" + "'target' keys")
target = sample['target']
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if (self.mixup_transform is not None):
sample = self.mixup_transform(sample)
torch_amp_context = (torch.cuda.amp.autocast() if (self.amp_type == AmpType.PYTORCH) else contextlib.suppress())
do_step = self._should_do_step()
ctx_mgr_model = (self.distributed_model.no_sync() if ((self.distributed_model is not None) and (not do_step)) else contextlib.suppress())
ctx_mgr_loss = (self.distributed_loss.no_sync() if ((self.distributed_loss is not None) and (not do_step)) else contextlib.suppress())
with ctx_mgr_model, ctx_mgr_loss:
with torch.enable_grad(), torch_amp_context:
output = self.model(sample['input'])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append((loss.data.cpu().item() * target.size(0)))
self.update_meters(output, sample)
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
self.last_batch = LastBatchInfo(loss=loss, output=output, target=target, sample=sample, step_data={'sample_fetch_time': timer.elapsed_time}) |
def run_optimizer(self, loss):
'Runs backwards pass and update the optimizer'
self.check_inf_nan(loss)
update_idx = (self.num_updates // self.get_global_batchsize())
do_zero_grad = ((update_idx % self.optimizer_period) == 0)
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if (self.amp_type == AmpType.APEX):
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif (self.amp_type == AmpType.PYTORCH):
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
if (self.optimizer_period != 1):
self._rescale_gradients((1 / self.optimizer_period))
if (self.clip_grad_norm is not None):
self._clip_gradients(self.clip_grad_norm)
if (self.amp_type == AmpType.PYTORCH):
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where) | 5,778,416,676,523,271,000 | Runs backwards pass and update the optimizer | classy_vision/tasks/classification_task.py | run_optimizer | hahaxun/ClassyVision | python | def run_optimizer(self, loss):
self.check_inf_nan(loss)
update_idx = (self.num_updates // self.get_global_batchsize())
do_zero_grad = ((update_idx % self.optimizer_period) == 0)
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if (self.amp_type == AmpType.APEX):
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif (self.amp_type == AmpType.PYTORCH):
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
if (self.optimizer_period != 1):
self._rescale_gradients((1 / self.optimizer_period))
if (self.clip_grad_norm is not None):
self._clip_gradients(self.clip_grad_norm)
if (self.amp_type == AmpType.PYTORCH):
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where) |
def synchronize_losses(self):
'Average the losses across the different replicas'
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist() | 273,915,927,694,461,470 | Average the losses across the different replicas | classy_vision/tasks/classification_task.py | synchronize_losses | hahaxun/ClassyVision | python | def synchronize_losses(self):
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist() |
def advance_phase(self):
'Performs bookkeeping / task updates between phases\n\n Increments phase idx, resets meters, resets loss history,\n resets counters, shuffles dataset, rebuilds iterators, and\n sets the train / test state for phase.\n '
logging.debug('Advancing phase')
for meter in self.meters:
meter.reset()
self.losses = []
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = (True if phase['train'] else False)
if self.train:
self.train_phase_idx += 1
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
self._set_model_train_mode() | 8,052,403,596,791,451,000 | Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase. | classy_vision/tasks/classification_task.py | advance_phase | hahaxun/ClassyVision | python | def advance_phase(self):
'Performs bookkeeping / task updates between phases\n\n Increments phase idx, resets meters, resets loss history,\n resets counters, shuffles dataset, rebuilds iterators, and\n sets the train / test state for phase.\n '
logging.debug('Advancing phase')
for meter in self.meters:
meter.reset()
self.losses = []
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = (True if phase['train'] else False)
if self.train:
self.train_phase_idx += 1
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
self._set_model_train_mode() |
def done_training(self):
'Stop condition for training'
return ((self.phase_idx + 1) >= len(self.phases)) | -7,333,598,901,078,115,000 | Stop condition for training | classy_vision/tasks/classification_task.py | done_training | hahaxun/ClassyVision | python | def done_training(self):
return ((self.phase_idx + 1) >= len(self.phases)) |
def create_data_iterators(self):
'Creates data iterator(s) for the current phase.'
del self.data_iterator
self.data_iterator = iter(self.dataloader) | 2,382,980,055,976,533,000 | Creates data iterator(s) for the current phase. | classy_vision/tasks/classification_task.py | create_data_iterators | hahaxun/ClassyVision | python | def create_data_iterators(self):
del self.data_iterator
self.data_iterator = iter(self.dataloader) |
def _set_model_train_mode(self):
'Set train mode for model'
phase = self.phases[self.phase_idx]
self.base_model.train(phase['train'])
self.base_loss.train(phase['train'])
if ((self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL) and (not self.train)):
self._broadcast_buffers() | 1,552,558,941,805,127,200 | Set train mode for model | classy_vision/tasks/classification_task.py | _set_model_train_mode | hahaxun/ClassyVision | python | def _set_model_train_mode(self):
phase = self.phases[self.phase_idx]
self.base_model.train(phase['train'])
self.base_loss.train(phase['train'])
if ((self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL) and (not self.train)):
self._broadcast_buffers() |
def _broadcast_buffers(self):
'Explicitly synchronize buffers across all devices.'
if (self.distributed_model is None):
return
buffers = list(self.base_model.buffers())
if (len(buffers) > 0):
logging.info('Synchronizing buffers before evaluation.')
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group) | 3,299,607,995,830,788,600 | Explicitly synchronize buffers across all devices. | classy_vision/tasks/classification_task.py | _broadcast_buffers | hahaxun/ClassyVision | python | def _broadcast_buffers(self):
if (self.distributed_model is None):
return
buffers = list(self.base_model.buffers())
if (len(buffers) > 0):
logging.info('Synchronizing buffers before evaluation.')
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group) |
def get_batchsize_per_replica(self):
"Return local replica's batchsize for dataset (e.g. batchsize per GPU)"
return self.datasets[self.phase_type].get_batchsize_per_replica() | 513,977,166,658,707,800 | Return local replica's batchsize for dataset (e.g. batchsize per GPU) | classy_vision/tasks/classification_task.py | get_batchsize_per_replica | hahaxun/ClassyVision | python | def get_batchsize_per_replica(self):
return self.datasets[self.phase_type].get_batchsize_per_replica() |
def get_global_batchsize(self):
'Return global batchsize across all trainers'
return self.datasets[self.phase_type].get_global_batchsize() | -3,338,348,201,598,782,500 | Return global batchsize across all trainers | classy_vision/tasks/classification_task.py | get_global_batchsize | hahaxun/ClassyVision | python | def get_global_batchsize(self):
return self.datasets[self.phase_type].get_global_batchsize() |
def __init__(self, dic):
' initalize. '
self._dic = dic
self._lazyload = {} | -7,927,460,824,320,636,000 | initalize. | pycwr/configure/pyart_lazydict.py | __init__ | 1271756664/study | python | def __init__(self, dic):
' '
self._dic = dic
self._lazyload = {} |
def __setitem__(self, key, value):
' Set a key which will not be stored and evaluated traditionally. '
self._dic[key] = value
if (key in self._lazyload):
del self._lazyload[key] | 6,706,434,709,855,033,000 | Set a key which will not be stored and evaluated traditionally. | pycwr/configure/pyart_lazydict.py | __setitem__ | 1271756664/study | python | def __setitem__(self, key, value):
' '
self._dic[key] = value
if (key in self._lazyload):
del self._lazyload[key] |
def __getitem__(self, key):
' Get the value of a key, evaluating a lazy key if needed. '
if (key in self._lazyload):
value = self._lazyload[key]()
self._dic[key] = value
del self._lazyload[key]
return self._dic[key] | -639,051,963,247,231,400 | Get the value of a key, evaluating a lazy key if needed. | pycwr/configure/pyart_lazydict.py | __getitem__ | 1271756664/study | python | def __getitem__(self, key):
' '
if (key in self._lazyload):
value = self._lazyload[key]()
self._dic[key] = value
del self._lazyload[key]
return self._dic[key] |
def __delitem__(self, key):
' Remove a lazy or traditional key from the dictionary. '
if (key in self._lazyload):
del self._lazyload[key]
else:
del self._dic[key] | -8,030,515,860,348,572,000 | Remove a lazy or traditional key from the dictionary. | pycwr/configure/pyart_lazydict.py | __delitem__ | 1271756664/study | python | def __delitem__(self, key):
' '
if (key in self._lazyload):
del self._lazyload[key]
else:
del self._dic[key] |
def __iter__(self):
' Iterate over all lazy and traditional keys. '
return itertools.chain(self._dic.copy(), self._lazyload.copy()) | -6,806,612,679,534,530,000 | Iterate over all lazy and traditional keys. | pycwr/configure/pyart_lazydict.py | __iter__ | 1271756664/study | python | def __iter__(self):
' '
return itertools.chain(self._dic.copy(), self._lazyload.copy()) |
def __len__(self):
' Return the number of traditional and lazy keys. '
return (len(self._dic) + len(self._lazyload)) | -579,698,376,871,627,000 | Return the number of traditional and lazy keys. | pycwr/configure/pyart_lazydict.py | __len__ | 1271756664/study | python | def __len__(self):
' '
return (len(self._dic) + len(self._lazyload)) |
def __str__(self):
' Return a string representation of the object. '
if ((len(self._dic) == 0) or (len(self._lazyload) == 0)):
seperator = ''
else:
seperator = ', '
lazy_reprs = [(repr(k), repr(v)) for (k, v) in self._lazyload.items()]
lazy_strs = [('%s: LazyLoad(%s)' % r) for r in lazy_reprs]
lazy_str = (', '.join(lazy_strs) + '}')
return ((str(self._dic)[:(- 1)] + seperator) + lazy_str) | 1,127,157,148,018,508,300 | Return a string representation of the object. | pycwr/configure/pyart_lazydict.py | __str__ | 1271756664/study | python | def __str__(self):
' '
if ((len(self._dic) == 0) or (len(self._lazyload) == 0)):
seperator =
else:
seperator = ', '
lazy_reprs = [(repr(k), repr(v)) for (k, v) in self._lazyload.items()]
lazy_strs = [('%s: LazyLoad(%s)' % r) for r in lazy_reprs]
lazy_str = (', '.join(lazy_strs) + '}')
return ((str(self._dic)[:(- 1)] + seperator) + lazy_str) |
def has_key(self, key):
' True if dictionary has key, else False. '
return (key in self) | -6,618,503,770,004,047,000 | True if dictionary has key, else False. | pycwr/configure/pyart_lazydict.py | has_key | 1271756664/study | python | def has_key(self, key):
' '
return (key in self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.