python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestElectronic:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_electronic_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(input_case="cased", cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=100, punct_post_process=False,
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestRoman:
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
# address is tagged by the measure class
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_roman.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
# pred = self.normalizer_en.normalize(test_input, verbose=False)
# assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False,
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_roman.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestTelephone:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_telephone.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_telephone_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_telephone.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestCardinal:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_cardinal_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(
input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True
)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False, punct_post_process=False)
assert pred == expected, f"input: {test_input}"
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False,
)
assert expected in pred_non_deterministic, f"input: {test_input}"
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestDate:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_date_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(
input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True
)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_date.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_uncased(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, punct_post_process=False, n_tagged=100
)
assert expected in pred_non_deterministic, f"INPUT: {test_input}"
normalizer_uppercased = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
cases_uppercased = {"Aug. 8": "august eighth", "8 Aug.": "the eighth of august", "aug. 8": "august eighth"}
@parameterized.expand(cases_uppercased.items())
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_cased(self, test_input, expected):
pred = self.normalizer_uppercased.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, punct_post_process=False, n_tagged=30
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestRange:
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
# address is tagged by the measure class
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_range.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False,
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_range.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestWord:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_word.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_word_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(
input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True
)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_word.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input} != {expected}"
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False
)
assert expected in pred_non_deterministic, f"input: {test_input}"
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestSpecialText:
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_special_text.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
# Audio-based normalization will output only options without digits
if self.normalizer_with_audio_en and sum([1 for ch in expected if ch.isdigit()]) == 0:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=True,
)
assert expected in pred_non_deterministic, f"input: {test_input}"
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_special_text.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestMath:
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
# math is tagged by the measure class
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_math.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False,
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_math.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestMoney:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_money.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input}"
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input}"
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_money_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input}"
normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_money.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input}"
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=30, punct_post_process=False,
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from ..utils import CACHE_DIR
class TestTextSentenceSplit:
normalizer_en = Normalizer(
input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_text_sentence_split(self):
text = "This happened in 1918 when Mrs. and Mr. Smith paid $111.12 in U.S.A. at 9 a.m. on Dec. 1. 2020. And Jan. 17th. This is an example. He paid $123 for this desk. 123rd, St. Patrick. This is a. b. and there is c.b."
gt_sentences = [
'This happened in 1918 when Mrs. and Mr. Smith paid $111.12 in U.S.A. at 9 a.m. on Dec. 1. 2020.',
'And Jan. 17th.',
'This is an example.',
'He paid $123 for this desk.',
'123rd, St. Patrick. This is a.b. and there is c.b.',
]
sentences = self.normalizer_en.split_text_into_sentences(text)
for s, gt in zip(sentences, gt_sentences):
print(s, gt)
assert gt_sentences == sentences
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_text_split.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestWhitelist:
inverse_normalizer_en = InverseNormalizer(lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_en_cased = InverseNormalizer(
lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
@parameterized.expand(parse_test_case_file('en/data_inverse_text_normalization/test_cases_whitelist_cased.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_en_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_en = Normalizer(input_case='cased', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_with_audio_en = (
NormalizerWithAudio(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('en/data_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_en.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
normalizer_uppercased = Normalizer(input_case='cased', lang='en')
cases_uppercased = {"Dr. Evil": "doctor Evil", "dr. Evil": "dr. Evil", "no. 4": "no. four"}
@parameterized.expand(cases_uppercased.items())
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_cased(self, test_input, expected):
pred = self.normalizer_uppercased.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_with_audio_en:
pred_non_deterministic = self.normalizer_with_audio_en.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/en/test_whitelist.py |
"""
"""
import sys
import os
from pkg_resources import VersionConflict, require
try:
require("setuptools>=42")
except VersionConflict:
print("Error: version of setuptools is too old (<42)!")
sys.exit(1)
if __name__ == "__main__":
import skbuild
PytorchNvCodec = "PytorchNvCodec @ git+https://github.com/NVIDIA/VideoProcessingFramework.git#subdirectory=src/PytorchNvCodec/"
skbuild.setup(
name="PyNvCodec",
version="2.0",
description="Video Processing Library with full NVENC/NVDEC hardware acceleration",
author="NVIDIA",
license="Apache 2.0",
install_requires=["numpy"],
extras_require={
# , "PyOpenGL-accelerate" # does not compile on 3.10
"dev": ["pycuda", "pyopengl", "torch", "torchvision", "opencv-python", "onnx", "tensorrt", f"PytorchNvCodec @ file://{os.getcwd()}/src/PytorchNvCodec/"],
"samples": ["pycuda", "pyopengl", "torch", "torchvision", "opencv-python", "onnx", "tensorrt", "tqdm", PytorchNvCodec],
"tests": ["pycuda", "pyopengl", "torch", "torchvision", "opencv-python", PytorchNvCodec],
"torch": ["torch", "torchvision", PytorchNvCodec],
"tensorrt": ["torch", "torchvision", PytorchNvCodec],
},
dependency_links=[
"https://pypi.ngc.nvidia.com"
],
packages=["PyNvCodec"],
package_data={"PyNvCodec": ["__init__.pyi"]},
package_dir={"": "src"},
cmake_install_dir="src",
)
| VideoProcessingFramework-master | setup.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from os.path import join, dirname
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import unittest
import random
# Ground truth information about input video
gt_file = join(dirname(__file__), "test.mp4")
gt_width = 848
gt_height = 464
gt_is_vfr = False
gt_pix_fmt = nvc.PixelFormat.NV12
gt_framerate = 30
gt_num_frames = 96
gt_len_seconds = 3.23
gt_color_space = nvc.ColorSpace.BT_709
gt_color_range = nvc.ColorRange.MPEG
class TestDemuxer(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
enc_file = gt_file
self.nvDmx = nvc.PyFFmpegDemuxer(enc_file, {})
def test_width(self):
self.assertEqual(gt_width, self.nvDmx.Width())
def test_height(self):
self.assertEqual(gt_height, self.nvDmx.Height())
def test_color_space(self):
self.assertEqual(gt_color_space, self.nvDmx.ColorSpace())
def test_color_range(self):
self.assertEqual(gt_color_range, self.nvDmx.ColorRange())
def test_format(self):
self.assertEqual(gt_pix_fmt, self.nvDmx.Format())
def test_framerate(self):
self.assertEqual(gt_framerate, self.nvDmx.Framerate())
def test_avgframerate(self):
self.assertEqual(gt_framerate, self.nvDmx.AvgFramerate())
def test_isvfr(self):
self.assertEqual(gt_is_vfr, self.nvDmx.IsVFR())
def test_timebase(self):
epsilon = 1e-4
gt_timebase = 8.1380e-5
self.assertLessEqual(np.abs(gt_timebase - self.nvDmx.Timebase()), epsilon)
def test_demux_all_packets(self):
num_packets = 0
last_dts = 0
while True:
pdata = nvc.PacketData()
packet = np.ndarray(shape=(0), dtype=np.uint8)
if not self.nvDmx.DemuxSinglePacket(
packet,
):
break
self.nvDmx.LastPacketData(pdata)
if 0 != num_packets:
self.assertGreaterEqual(pdata.dts, last_dts)
last_dts = pdata.dts
num_packets += 1
self.assertEqual(gt_num_frames, num_packets)
def test_seek_framenum(self):
seek_frame = random.randint(0, gt_num_frames - 1)
if self.nvDmx.IsVFR():
print("Seek on VFR sequence, skipping this test")
pass
for mode in (nvc.SeekMode.EXACT_FRAME, nvc.SeekMode.PREV_KEY_FRAME):
packet = np.ndarray(shape=(0), dtype=np.uint8)
sk = nvc.SeekContext(
seek_frame=seek_frame,
mode=mode,
)
self.assertTrue(self.nvDmx.Seek(sk, packet))
pdata = nvc.PacketData()
self.nvDmx.LastPacketData(pdata)
if nvc.SeekMode.EXACT_FRAME == mode:
self.assertEqual(pdata.dts, pdata.duration * seek_frame)
elif nvc.SeekMode.PREV_KEY_FRAME == mode:
self.assertLessEqual(pdata.dts, pdata.duration * seek_frame)
def test_seek_timestamp(self):
timestamp = random.random() * gt_len_seconds
if self.nvDmx.IsVFR():
print("Seek on VFR sequence, skipping this test")
return
packet = np.ndarray(shape=(0), dtype=np.uint8)
sk = nvc.SeekContext(
seek_ts=timestamp,
mode=nvc.SeekMode.PREV_KEY_FRAME,
)
self.assertTrue(self.nvDmx.Seek(sk, packet))
pdata = nvc.PacketData()
self.nvDmx.LastPacketData(pdata)
self.assertLessEqual(pdata.dts * self.nvDmx.Timebase(), timestamp)
def test_demux_single_packet(self):
packet = np.ndarray(shape=(0), dtype=np.uint8)
while self.nvDmx.DemuxSinglePacket(packet):
self.assertNotEqual(0, packet.size)
def test_sei(self):
total_sei_size = 0
while True:
packet = np.ndarray(shape=(0), dtype=np.uint8)
sei = np.ndarray(shape=(0), dtype=np.uint8)
if not self.nvDmx.DemuxSinglePacket(packet, sei):
break
total_sei_size += sei.size
self.assertNotEqual(0, total_sei_size)
def test_lastpacketdata(self):
try:
pdata = nvc.PacketData()
self.nvDmx.LastPacketData(pdata)
except:
self.fail("Test case raised exception unexpectedly!")
if __name__ == "__main__":
unittest.main()
| VideoProcessingFramework-master | tests/test_PyFfmpegDemuxer.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from os.path import join, dirname
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import unittest
try:
import pycuda.driver as cuda
import torch
except ImportError as e:
raise unittest.SkipTest(f"Skipping because of insufficient dependencies: {e}")
# Ground truth information about input video
gt_file = join(dirname(__file__), "test.mp4")
gt_width = 848
gt_height = 464
gt_is_vfr = False
gt_pix_fmt = nvc.PixelFormat.NV12
gt_framerate = 30
gt_num_frames = 96
gt_color_space = nvc.ColorSpace.BT_709
gt_color_range = nvc.ColorRange.MPEG
class TestSurfacePycuda(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
self.gpu_id = 0
enc_file = gt_file
cuda.init()
self.cuda_ctx = cuda.Device(self.gpu_id).retain_primary_context()
self.cuda_ctx.push()
self.cuda_str = cuda.Stream()
self.cuda_ctx.pop()
self.nvDec = nvc.PyNvDecoder(
enc_file, self.cuda_ctx.handle, self.cuda_str.handle
)
self.nvDwn = nvc.PySurfaceDownloader(
self.nvDec.Width(),
self.nvDec.Height(),
self.nvDec.Format(),
self.cuda_ctx.handle,
self.cuda_str.handle,
)
def test_pycuda_memcpy_Surface_Surface(self):
while True:
surf_src = self.nvDec.DecodeSingleSurface()
if surf_src.Empty():
break
src_plane = surf_src.PlanePtr()
surf_dst = nvc.Surface.Make(
self.nvDec.Format(),
self.nvDec.Width(),
self.nvDec.Height(),
self.gpu_id,
)
self.assertFalse(surf_dst.Empty())
dst_plane = surf_dst.PlanePtr()
memcpy_2d = cuda.Memcpy2D()
memcpy_2d.width_in_bytes = src_plane.Width() * src_plane.ElemSize()
memcpy_2d.src_pitch = src_plane.Pitch()
memcpy_2d.dst_pitch = dst_plane.Pitch()
memcpy_2d.width = src_plane.Width()
memcpy_2d.height = src_plane.Height()
memcpy_2d.set_src_device(src_plane.GpuMem())
memcpy_2d.set_dst_device(dst_plane.GpuMem())
memcpy_2d(self.cuda_str)
frame_src = np.ndarray(shape=(0), dtype=np.uint8)
if not self.nvDwn.DownloadSingleSurface(surf_src, frame_src):
self.fail("Failed to download decoded surface")
frame_dst = np.ndarray(shape=(0), dtype=np.uint8)
if not self.nvDwn.DownloadSingleSurface(surf_dst, frame_dst):
self.fail("Failed to download decoded surface")
if not np.array_equal(frame_src, frame_dst):
self.fail("Video frames are not equal")
def test_pycuda_memcpy_Surface_Tensor(self):
while True:
surf_src = self.nvDec.DecodeSingleSurface()
if surf_src.Empty():
break
src_plane = surf_src.PlanePtr()
surface_tensor = torch.zeros(
src_plane.Height(),
src_plane.Width(),
1,
dtype=torch.uint8,
device=torch.device(f"cuda:{self.gpu_id}"),
)
dst_plane = surface_tensor.data_ptr()
memcpy_2d = cuda.Memcpy2D()
memcpy_2d.width_in_bytes = src_plane.Width() * src_plane.ElemSize()
memcpy_2d.src_pitch = src_plane.Pitch()
memcpy_2d.dst_pitch = self.nvDec.Width()
memcpy_2d.width = src_plane.Width()
memcpy_2d.height = src_plane.Height()
memcpy_2d.set_src_device(src_plane.GpuMem())
memcpy_2d.set_dst_device(dst_plane)
memcpy_2d(self.cuda_str)
frame_src = np.ndarray(shape=(0), dtype=np.uint8)
if not self.nvDwn.DownloadSingleSurface(surf_src, frame_src):
self.fail("Failed to download decoded surface")
frame_dst = surface_tensor.to("cpu").numpy()
frame_dst = frame_dst.reshape((src_plane.Height() * src_plane.Width()))
if not np.array_equal(frame_src, frame_dst):
self.fail("Video frames are not equal")
def test_list_append(self):
dec_frames = []
nvDec = nvc.PyNvDecoder(gt_file, 0)
# Decode all the surfaces and store them in the list.
while True:
surf = nvDec.DecodeSingleSurface()
if not surf or surf.Empty():
break
else:
# Please note that we need to clone surfaces because those
# surfaces returned by decoder belongs to it's internal
# memory pool.
dec_frames.append(surf.Clone(self.gpu_id))
# Make sure all the surfaces are kept.
self.assertEqual(len(dec_frames), gt_num_frames)
# Now compare saved surfaces with data from decoder to make sure
# no crruption happened.
nvDec = nvc.PyNvDecoder(gt_file, 0)
nvDwn = nvc.PySurfaceDownloader(
nvDec.Width(), nvDec.Height(), nvDec.Format(), self.gpu_id
)
for surf in dec_frames:
dec_frame = np.ndarray(shape=(0), dtype=np.uint8)
svd_frame = np.ndarray(shape=(0), dtype=np.uint8)
nvDwn.DownloadSingleSurface(surf, svd_frame)
nvDec.DecodeSingleFrame(dec_frame)
self.assertTrue(np.array_equal(dec_frame, svd_frame))
if __name__ == "__main__":
unittest.main()
| VideoProcessingFramework-master | tests/test_PySurface.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from os.path import join, dirname
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import unittest
# Ground truth information about input video
gt_file = join(dirname(__file__), "test.mp4")
gt_file_res_change = join(dirname(__file__), "test_res_change.h264")
gt_width = 848
gt_height = 464
gt_res_change = 47
gt_is_vfr = False
gt_pix_fmt = nvc.PixelFormat.NV12
gt_framerate = 30
gt_num_frames = 96
gt_timebase = 8.1380e-5
gt_color_space = nvc.ColorSpace.BT_709
gt_color_range = nvc.ColorRange.MPEG
class TestEncoderBasic(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
def test_encode_all_surfaces(self):
gpu_id = 0
res = str(gt_width) + "x" + str(gt_height)
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
nvDec = nvc.PyNvDecoder(gt_file, gpu_id)
nvEnc = nvc.PyNvEncoder(
{
"preset": "P4",
"tuning_info": "high_quality",
"codec": "h264",
"profile": "high",
"s": res,
"bitrate": "1M",
},
gpu_id,
)
frames_sent = 0
frames_recv = 0
while True:
dec_surf = nvDec.DecodeSingleSurface()
if not dec_surf or dec_surf.Empty():
break
frames_sent += 1
nvEnc.EncodeSingleSurface(dec_surf, encFrame)
if encFrame.size:
frames_recv += 1
while True:
success = nvEnc.FlushSinglePacket(encFrame)
if success and encFrame.size:
frames_recv += 1
else:
break
self.assertEqual(frames_sent, frames_recv)
def test_reconfigure(self):
gpu_id = 0
res = str(gt_width) + "x" + str(gt_height)
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
nvDec = nvc.PyNvDecoder(gt_file_res_change, gpu_id)
nvRcn = nvc.PyNvDecoder(
gt_width, gt_height, nvc.PixelFormat.NV12, nvc.CudaVideoCodec.H264, gpu_id
)
nvEnc = nvc.PyNvEncoder(
{
"preset": "P4",
"tuning_info": "high_quality",
"codec": "h264",
"profile": "high",
"s": res,
"bitrate": "1M",
},
gpu_id,
)
frames_recn = 0
while True:
dec_surf = nvDec.DecodeSingleSurface()
if not dec_surf or dec_surf.Empty():
break
sw = dec_surf.Width()
sh = dec_surf.Height()
if sw != gt_width or sh != gt_height:
# Flush encoder before reconfigure.
# Some encoded frames will be lost but that doesn't matter.
# Decoder will be reconfigured upon resolution change anyway.
while nvEnc.FlushSinglePacket(encFrame):
frames_recn += 1
# Now reconfigure.
res = str(sw) + "x" + str(sh)
self.assertTrue(
nvEnc.Reconfigure({"s": res}, force_idr=True, reset_encoder=True)
)
self.assertEqual(nvEnc.Width(), sw)
self.assertEqual(nvEnc.Height(), sh)
nvEnc.EncodeSingleSurface(dec_surf, encFrame)
if encFrame.size:
dec_surf = nvRcn.DecodeSurfaceFromPacket(encFrame)
if dec_surf and not dec_surf.Empty():
frames_recn += 1
if frames_recn < gt_res_change:
self.assertEqual(dec_surf.Width(), gt_width)
self.assertEqual(dec_surf.Height(), gt_height)
else:
self.assertEqual(dec_surf.Width(), sw)
self.assertEqual(dec_surf.Height(), sh)
if __name__ == "__main__":
unittest.main()
| VideoProcessingFramework-master | tests/test_PyNvEncoder.py |
import PyNvCodec as nvc
import numpy as np
import unittest
from os.path import join, dirname
def test_issue_455():
gpuID = 0
nvEnc = nvc.PyNvEncoder({'bitrate': '30K', 'fps': '10', 'codec': 'hevc', 's': '256x256'}, gpuID)
nvDec = nvc.PyNvDecoder(256, 256, nvc.PixelFormat.NV12, nvc.CudaVideoCodec.HEVC, gpuID)
rawFrame = np.random.randint(0, 255, size=(256, 256, 3), dtype=np.uint8)
print('Raw frame size is ' + str(rawFrame.size) + ' bytes.')
encodedFrame = np.ndarray(shape=(0), dtype=np.uint8)
count, success = 0, False
while success is not True and count < 10:
success = nvEnc.EncodeSingleFrame(rawFrame, encodedFrame, sync=False)
count += 1
print('Encoded frame size is ' + str(encodedFrame.size) + ' bytes.')
exception_raised = False
try:
success = nvDec.DecodeSingleFrame(encodedFrame)
except Exception as ex:
exception_raised = True
assert ("Tried to call DecodeSurface/DecodeFrame on a Decoder that has been initialized without a built-in "
"demuxer. Please use DecodeSurfaceFromPacket/DecodeFrameFromPacket instead or intialize the decoder"
" with a demuxer when decoding from a file" == str(ex))
assert exception_raised
decodedFrame = np.ndarray(shape=(0), dtype=np.uint8)
success = nvDec.DecodeFrameFromPacket(decodedFrame, encodedFrame)
@unittest.skip('Skipping because still causing segfault due to built-in demuxer being NULL')
def test_issue_457():
encFilePath = join(dirname(__file__), "test_res_change.h264")
nvDec = nvc.PyFfmpegDecoder(encFilePath, {}, 1)
nvDec.GetMotionVectors()
| VideoProcessingFramework-master | tests/test_reported_bugs.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from os.path import join, dirname
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import unittest
import random
# Ground truth information about input video
gt_file = join(dirname(__file__), "test.mp4")
gt_file_res_change = join(dirname(__file__), "test_res_change.h264")
gt_width = 848
gt_height = 464
gt_res_change = 47
gt_res_change_factor = 0.5
gt_is_vfr = False
gt_pix_fmt = nvc.PixelFormat.NV12
gt_framerate = 30
gt_num_frames = 96
gt_timebase = 8.1380e-5
gt_color_space = nvc.ColorSpace.BT_709
gt_color_range = nvc.ColorRange.MPEG
class TestDecoderBasic(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
gpu_id = 0
enc_file = gt_file
self.nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
def test_width(self):
self.assertEqual(gt_width, self.nvDec.Width())
def test_height(self):
self.assertEqual(gt_height, self.nvDec.Height())
def test_color_space(self):
self.assertEqual(gt_color_space, self.nvDec.ColorSpace())
def test_color_range(self):
self.assertEqual(gt_color_range, self.nvDec.ColorRange())
def test_format(self):
self.assertEqual(gt_pix_fmt, self.nvDec.Format())
def test_framerate(self):
self.assertEqual(gt_framerate, self.nvDec.Framerate())
def test_avgframerate(self):
self.assertEqual(gt_framerate, self.nvDec.AvgFramerate())
def test_isvfr(self):
self.assertEqual(gt_is_vfr, self.nvDec.IsVFR())
def test_framesize(self):
frame_size = int(self.nvDec.Width() * self.nvDec.Height() * 3 / 2)
self.assertEqual(frame_size, self.nvDec.Framesize())
def test_timebase(self):
epsilon = 1e-4
self.assertLessEqual(np.abs(gt_timebase - self.nvDec.Timebase()), epsilon)
def test_lastpacketdata(self):
try:
pdata = nvc.PacketData()
self.nvDec.LastPacketData(pdata)
except:
self.fail("Test case raised exception unexpectedly!")
class TestDecoderStandalone(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
def test_decodesurfacefrompacket(self):
nvDmx = nvc.PyFFmpegDemuxer(gt_file, {})
nvDec = nvc.PyNvDecoder(
nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvDmx.Codec(), 0
)
packet = np.ndarray(shape=(0), dtype=np.uint8)
while nvDmx.DemuxSinglePacket(packet):
surf = nvDec.DecodeSurfaceFromPacket(packet)
self.assertIsNotNone(surf)
if not surf.Empty():
self.assertNotEqual(0, surf.PlanePtr().GpuMem())
self.assertEqual(nvDmx.Width(), surf.Width())
self.assertEqual(nvDmx.Height(), surf.Height())
self.assertEqual(nvDmx.Format(), surf.Format())
return
def test_decodesurfacefrompacket_outpktdata(self):
nvDmx = nvc.PyFFmpegDemuxer(gt_file, {})
nvDec = nvc.PyNvDecoder(
nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvDmx.Codec(), 0
)
dec_frames = 0
packet = np.ndarray(shape=(0), dtype=np.uint8)
out_bst_size = 0
while nvDmx.DemuxSinglePacket(packet):
in_pdata = nvc.PacketData()
nvDmx.LastPacketData(in_pdata)
out_pdata = nvc.PacketData()
surf = nvDec.DecodeSurfaceFromPacket(in_pdata, packet, out_pdata)
self.assertIsNotNone(surf)
if not surf.Empty():
dec_frames += 1
out_bst_size += out_pdata.bsl
while True:
out_pdata = nvc.PacketData()
surf = nvDec.FlushSingleSurface(out_pdata)
if not surf.Empty():
out_bst_size += out_pdata.bsl
else:
break
self.assertNotEqual(0, out_bst_size)
def test_decode_all_surfaces(self):
nvDmx = nvc.PyFFmpegDemuxer(gt_file, {})
nvDec = nvc.PyNvDecoder(
nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvDmx.Codec(), 0
)
dec_frames = 0
packet = np.ndarray(shape=(0), dtype=np.uint8)
while nvDmx.DemuxSinglePacket(packet):
surf = nvDec.DecodeSurfaceFromPacket(packet)
self.assertIsNotNone(surf)
if not surf.Empty():
dec_frames += 1
while True:
surf = nvDec.FlushSingleSurface()
self.assertIsNotNone(surf)
if not surf.Empty():
dec_frames += 1
else:
break
self.assertEqual(gt_num_frames, dec_frames)
class TestDecoderBuiltin(unittest.TestCase):
def __init__(self, methodName):
super().__init__(methodName=methodName)
def test_decodesinglesurface(self):
gpu_id = 0
enc_file = gt_file
nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
try:
surf = nvDec.DecodeSingleSurface()
self.assertIsNotNone(surf)
self.assertFalse(surf.Empty())
except:
self.fail("Test case raised exception unexpectedly!")
def test_decodesinglesurface_outpktdata(self):
gpu_id = 0
enc_file = gt_file
nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
dec_frame = 0
last_pts = nvc.NO_PTS
while True:
pdata = nvc.PacketData()
surf = nvDec.DecodeSingleSurface(pdata)
if surf.Empty():
break
self.assertNotEqual(pdata.pts, nvc.NO_PTS)
if 0 != dec_frame:
self.assertGreaterEqual(pdata.pts, last_pts)
dec_frame += 1
last_pts = pdata.pts
def test_decodesinglesurface_sei(self):
gpu_id = 0
enc_file = gt_file
nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
total_sei_size = 0
while True:
sei = np.ndarray(shape=(0), dtype=np.uint8)
surf = nvDec.DecodeSingleSurface(sei)
if surf.Empty():
break
total_sei_size += sei.size
self.assertNotEqual(0, total_sei_size)
def test_decodesinglesurface_seek(self):
gpu_id = 0
enc_file = gt_file
nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
start_frame = random.randint(0, gt_num_frames - 1)
dec_frames = 1
seek_ctx = nvc.SeekContext(seek_frame=start_frame)
surf = nvDec.DecodeSingleSurface(seek_ctx)
self.assertNotEqual(True, surf.Empty())
while True:
surf = nvDec.DecodeSingleSurface()
if surf.Empty():
break
dec_frames += 1
self.assertEqual(gt_num_frames - start_frame, dec_frames)
def test_decodesinglesurface_cmp_vs_continuous(self):
gpu_id = 0
enc_file = gt_file
nvDec = nvc.PyNvDecoder(enc_file, gpu_id)
# First get reconstructed frame with seek
for idx in range(0, gt_num_frames):
seek_ctx = nvc.SeekContext(seek_frame=idx)
frame_seek = np.ndarray(shape=(0), dtype=np.uint8)
pdata_seek = nvc.PacketData()
self.assertTrue(nvDec.DecodeSingleFrame(frame_seek, seek_ctx, pdata_seek))
# Then get it with continuous decoding
nvDec = nvc.PyNvDecoder(gt_file, 0)
frame_cont = np.ndarray(shape=(0), dtype=np.uint8)
pdata_cont = nvc.PacketData()
for i in range(0, idx + 1):
self.assertTrue(nvDec.DecodeSingleFrame(frame_cont, pdata_cont))
# Compare frames
if not np.array_equal(frame_seek, frame_cont):
fail_msg = ""
fail_msg += "Seek frame number: " + str(idx) + ".\n"
fail_msg += "Seek frame pts: " + str(pdata_seek.pts) + ".\n"
fail_msg += "Cont frame pts: " + str(pdata_cont.pts) + ".\n"
fail_msg += "Video frames are not same\n"
self.fail(fail_msg)
def test_decode_all_surfaces(self):
nvDec = nvc.PyNvDecoder(gt_file, 0)
dec_frames = 0
while True:
surf = nvDec.DecodeSingleSurface()
if not surf or surf.Empty():
break
dec_frames += 1
self.assertEqual(gt_num_frames, dec_frames)
def test_decode_resolution_change(self):
nvDec = nvc.PyNvDecoder(gt_file_res_change, 0)
rw = int(gt_width * gt_res_change_factor)
rh = int(gt_height * gt_res_change_factor)
dec_frames = 0
while True:
surf = nvDec.DecodeSingleSurface()
if not surf or surf.Empty():
break
else:
dec_frames += 1
if dec_frames < gt_res_change:
self.assertEqual(surf.Width(), gt_width)
self.assertEqual(surf.Height(), gt_height)
else:
self.assertEqual(surf.Width(), rw)
self.assertEqual(surf.Height(), rh)
if __name__ == "__main__":
unittest.main()
| VideoProcessingFramework-master | tests/test_PyNvDecoder.py |
import os
import sys
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
]
autosummary_generate = True
autosummary_imported_members = True
autodoc_default_options = {"members": True, "memer-order": "bysource"}
source_suffix = ".rst"
master_doc = "index"
project = "VPF"
copyright = "2022 NVIDIA Corporation"
author = ""
language = None
exclude_patterns = ["_build"]
pygments_style = "sphinx"
todo_include_todos = False
html_theme = "haiku"
htmlhelp_basename = "VPFdoc"
| VideoProcessingFramework-master | docs/conf.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from torch import onnx
from torch._C import ListType
import torchvision
from subprocess import PIPE, STDOUT, run
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import torch
import tensorrt as trt
import pycuda
import pycuda.driver as cuda
import numpy as np
import PyNvCodec as nvc
import PytorchNvCodec as pnvc
from torchvision import transforms
resnet_categories = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead",
"electric ray",
"stingray",
"cock",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel",
"kite",
"bald eagle",
"vulture",
"great grey owl",
"European fire salamander",
"common newt",
"eft",
"spotted salamander",
"axolotl",
"bullfrog",
"tree frog",
"tailed frog",
"loggerhead",
"leatherback turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"common iguana",
"American chameleon",
"whiptail",
"agama",
"frilled lizard",
"alligator lizard",
"Gila monster",
"green lizard",
"African chameleon",
"Komodo dragon",
"African crocodile",
"American alligator",
"triceratops",
"thunder snake",
"ringneck snake",
"hognose snake",
"green snake",
"king snake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"rock python",
"Indian cobra",
"green mamba",
"sea snake",
"horned viper",
"diamondback",
"sidewinder",
"trilobite",
"harvestman",
"scorpion",
"black and gold garden spider",
"barn spider",
"garden spider",
"black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie chicken",
"peacock",
"quail",
"partridge",
"African grey",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"American egret",
"bittern",
"crane",
"limpkin",
"European gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"red-backed sandpiper",
"redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog",
"Pekinese",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound",
"basset",
"beagle",
"bloodhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound",
"English foxhound",
"redbone",
"borzoi",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound",
"Norwegian elkhound",
"otterhound",
"Saluki",
"Scottish deerhound",
"Weimaraner",
"Staffordshire bullterrier",
"American Staffordshire terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier",
"Airedale",
"cairn",
"Australian terrier",
"Dandie Dinmont",
"Boston bull",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier",
"Tibetan terrier",
"silky terrier",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla",
"English setter",
"Irish setter",
"Gordon setter",
"Brittany spaniel",
"clumber",
"English springer",
"Welsh springer spaniel",
"cocker spaniel",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog",
"Shetland sheepdog",
"collie",
"Border collie",
"Bouvier des Flandres",
"Rottweiler",
"German shepherd",
"Doberman",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo dog",
"malamute",
"Siberian husky",
"dalmatian",
"affenpinscher",
"basenji",
"pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"chow",
"keeshond",
"Brabancon griffon",
"Pembroke",
"Cardigan",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf",
"white wolf",
"red wolf",
"coyote",
"dingo",
"dhole",
"African hunting dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian cat",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"ice bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"long-horned beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket",
"walking stick",
"cockroach",
"mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"admiral",
"ringlet",
"monarch",
"cabbage butterfly",
"sulphur butterfly",
"lycaenid",
"starfish",
"sea urchin",
"sea cucumber",
"wood rabbit",
"hare",
"Angora",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"sorrel",
"zebra",
"hog",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram",
"bighorn",
"ibex",
"hartebeest",
"impala",
"gazelle",
"Arabian camel",
"llama",
"weasel",
"mink",
"polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas",
"baboon",
"macaque",
"langur",
"colobus",
"proboscis monkey",
"marmoset",
"capuchin",
"howler monkey",
"titi",
"spider monkey",
"squirrel monkey",
"Madagascar cat",
"indri",
"Indian elephant",
"African elephant",
"lesser panda",
"giant panda",
"barracouta",
"eel",
"coho",
"rock beauty",
"anemone fish",
"sturgeon",
"gar",
"lionfish",
"puffer",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibian",
"analog clock",
"apiary",
"apron",
"ashcan",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint",
"Band Aid",
"banjo",
"bannister",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"barrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap",
"bath towel",
"bathtub",
"beach wagon",
"beacon",
"beaker",
"bearskin",
"beer bottle",
"beer glass",
"bell cote",
"bib",
"bicycle-built-for-two",
"bikini",
"binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsled",
"bolo tie",
"bonnet",
"bookcase",
"bookshop",
"bottlecap",
"bow",
"bow tie",
"brass",
"brassiere",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"bullet train",
"butcher shop",
"cab",
"caldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"carpenter's kit",
"carton",
"car wheel",
"cash machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"cellular telephone",
"chain",
"chainlink fence",
"chain mail",
"chain saw",
"chest",
"chiffonier",
"chime",
"china cabinet",
"Christmas stocking",
"church",
"cinema",
"cleaver",
"cliff dwelling",
"cloak",
"clog",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil",
"combination lock",
"computer keyboard",
"confectionery",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishrag",
"dishwasher",
"disk brake",
"dock",
"dogsled",
"dome",
"doormat",
"drilling platform",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa",
"file",
"fireboat",
"fire engine",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gasmask",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golfcart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"grille",
"grocery store",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower",
"hand-held computer",
"handkerchief",
"hard disc",
"harmonica",
"harp",
"harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoopskirt",
"horizontal bar",
"horse cart",
"hourglass",
"iPod",
"iron",
"jack-o'-lantern",
"jean",
"jeep",
"jersey",
"jigsaw puzzle",
"jinrikisha",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"liner",
"lipstick",
"Loafer",
"lotion",
"loudspeaker",
"loupe",
"lumbermill",
"magnetic compass",
"mailbag",
"mailbox",
"maillot",
"maillot",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine chest",
"megalith",
"microphone",
"microwave",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter",
"mountain bike",
"mountain tent",
"mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"organ",
"oscilloscope",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle",
"paddlewheel",
"padlock",
"paintbrush",
"pajama",
"palace",
"panpipe",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"passenger car",
"patio",
"pay-phone",
"pedestal",
"pencil box",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"pick",
"pickelhaube",
"picket fence",
"pickup",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate",
"pitcher",
"plane",
"planetarium",
"plastic bag",
"plate rack",
"plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"pop bottle",
"pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"projectile",
"projector",
"puck",
"punching bag",
"purse",
"quill",
"quilt",
"racer",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"rubber eraser",
"rugby ball",
"rule",
"running shoe",
"safe",
"safety pin",
"saltshaker",
"sandal",
"sarong",
"sax",
"scabbard",
"scale",
"school bus",
"schooner",
"scoreboard",
"screen",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe shop",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar dish",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch",
"stove",
"strainer",
"streetcar",
"stretcher",
"studio couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglass",
"sunglasses",
"sunscreen",
"suspension bridge",
"swab",
"sweatshirt",
"swimming trunks",
"swing",
"switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy",
"television",
"tennis ball",
"thatch",
"theater curtain",
"thimble",
"thresher",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toyshop",
"tractor",
"trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright",
"vacuum",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"warplane",
"washbasin",
"washer",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool",
"worm fence",
"wreck",
"yawl",
"yurt",
"web site",
"comic book",
"crossword puzzle",
"street sign",
"traffic light",
"book jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"ice lolly",
"French loaf",
"bagel",
"pretzel",
"cheeseburger",
"hotdog",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce",
"dough",
"meat loaf",
"pizza",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeside",
"promontory",
"sandbar",
"seashore",
"valley",
"volcano",
"ballplayer",
"groom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"hip",
"buckeye",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn",
"earthstar",
"hen-of-the-woods",
"bolete",
"ear",
"toilet tissue",
]
class PyTorchTensorHolder(pycuda.driver.PointerHolderBase):
def __init__(self, tensor):
super(PyTorchTensorHolder, self).__init__()
self.tensor = tensor
def get_pointer(self):
return self.tensor.data_ptr()
class HostDeviceMem:
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class TensorRTContext:
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
@classmethod
def build_serialized_engine_onnx(cls, model_file):
builder = trt.Builder(cls.TRT_LOGGER)
network = builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
)
config = builder.create_builder_config()
parser = trt.OnnxParser(network, cls.TRT_LOGGER)
config.max_workspace_size = 1 * 1 << 30 # 1GB
# Load the Onnx model and parse it in order to populate the TensorRT network.
with open(model_file, "rb") as model:
if not parser.parse(model.read()):
print("ERROR: Failed to parse the ONNX file.")
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
return builder.build_serialized_network(network, config)
def __init__(self, trt_nn_file: str, gpu_id: int) -> None:
self.device = cuda.Device(gpu_id)
self.cuda_context = self.device.retain_primary_context()
self.push_cuda_ctx()
self.stream = cuda.Stream()
self.logger = TensorRTContext.TRT_LOGGER
self.runtime = trt.Runtime(self.logger)
f = open(trt_nn_file, "rb")
self.engine = self.runtime.deserialize_cuda_engine(f.read())
self.inputs, self.outputs, self.bindings = self.allocate_buffer()
self.context = self.engine.create_execution_context()
def __del__(self) -> None:
self.pop_cuda_ctx()
def push_cuda_ctx(self) -> None:
self.cuda_context.push()
def pop_cuda_ctx(self) -> None:
self.cuda_context.pop()
def allocate_buffer(self):
bindings = []
inputs = []
outputs = []
for binding in self.engine:
size = (
trt.volume(self.engine.get_binding_shape(binding))
* self.engine.max_batch_size
)
dtype = trt.nptype(self.engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
if self.engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings
def run_inference(self, tensor_image) -> str:
# Copy from PyTorch tensor to plain CUDA memory
cuda.memcpy_dtod(
self.bindings[0],
PyTorchTensorHolder(tensor_image),
tensor_image.nelement() * tensor_image.element_size(),
)
# Run inference
self.context.execute_async_v2(
bindings=self.bindings, stream_handle=self.stream.handle
)
# Copy outputs from GPU
for out in self.outputs:
cuda.memcpy_dtoh_async(out.host, out.device, self.stream)
# Find most probable image type and return resnet categoy description
[result] = [out.host for out in self.outputs]
return resnet_categories[np.argmax(result)]
# Resnet expects images to be 3 channel planar RGB of 224x224 size at least.
target_w, target_h = 224, 224
def out(command):
result = run(command, text=True, shell=True, stdout=PIPE, stderr=STDOUT)
return result.stdout
def Resnet50ExportToOnxx(nn_onxx: str, nn_trt: str) -> None:
nn_onxx_exists = os.path.exists(nn_onxx) and os.path.getsize(nn_onxx)
nn_trt_exists = os.path.exists(nn_trt) and os.path.getsize(nn_trt)
if nn_onxx_exists and nn_trt_exists:
print("Both ONXX and TRT files exist. Skipping the export.")
return
torch.manual_seed(0)
resnet50 = torchvision.models.resnet50(pretrained=True)
resnet50.eval()
input_data = torch.randn(1, 3, target_h, target_w, dtype=torch.float32)
input_names = ["input"]
output_names = ["output"]
print("Exporting resnet50 to onxx file...")
torch.onnx.export(
resnet50,
input_data,
nn_onxx,
input_names=input_names,
output_names=output_names,
verbose=False,
opset_version=9,
)
print("Exporting resnet50 to trt file...")
engine = TensorRTContext.build_serialized_engine_onnx(nn_onxx)
with open(nn_trt, "wb") as f:
f.write(engine)
def infer_on_video(gpu_id: int, input_video: str, trt_nn_file: str):
# Init TRT stuff
cuda.init()
trt_ctx = TensorRTContext(trt_nn_file, gpu_id)
# Init HW decoder, convertor, resizer + tensor that video frames will be
# exported to
nvDec = nvc.PyNvDecoder(input_video, gpu_id)
to_yuv = nvc.PySurfaceConverter(
nvDec.Width(),
nvDec.Height(),
nvc.PixelFormat.NV12,
nvc.PixelFormat.YUV420,
gpu_id,
)
to_dim = nvc.PySurfaceResizer(target_w, target_h, nvc.PixelFormat.YUV420, gpu_id)
to_rgb = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.YUV420, nvc.PixelFormat.RGB, gpu_id
)
to_pln = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.RGB, nvc.PixelFormat.RGB_PLANAR, gpu_id
)
# Use most widespread bt601 and mpeg just for illustration purposes.
cc_ctx = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_601, nvc.ColorRange.MPEG)
# Decoding cycle + inference on video frames.
while True:
# Decode 1 compressed video frame to CUDA memory.
nv12_surface = nvDec.DecodeSingleSurface()
if nv12_surface.Empty():
print("Can not decode frame")
break
# Convert from NV12 to YUV420.
# This extra step is required because not all NV12 -> RGB conversions
# implemented in NPP support all color spaces and ranges.
yuv420 = to_yuv.Execute(nv12_surface, cc_ctx)
if yuv420.Empty():
print("Can not convert nv12 -> yuv420")
break
# Downscale YUV420.
yuv_small = to_dim.Execute(yuv420)
if yuv_small.Empty():
print("Can not downscale yuv420 surface")
break
# Convert from YUV420 to interleaved RGB.
rgb24_small = to_rgb.Execute(yuv_small, cc_ctx)
if rgb24_small.Empty():
print("Can not convert yuv420 -> rgb")
break
# Convert to planar RGB.
rgb24_planar = to_pln.Execute(rgb24_small, cc_ctx)
if rgb24_planar.Empty():
print("Can not convert rgb -> rgb planar")
break
# Export to PyTorch tensor
surf_plane = rgb24_planar.PlanePtr()
img_tensor = pnvc.makefromDevicePtrUint8(
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
img_tensor.resize_(3, target_h, target_w)
img_tensor = img_tensor.type(dtype=torch.cuda.FloatTensor)
img_tensor = torch.divide(img_tensor, 255.0)
data_transforms = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
surface_tensor = data_transforms(img_tensor)
# Run inference
img_type = trt_ctx.run_inference(surface_tensor)
# Output result
print("Image type: ", img_type)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Provide gpu id and path to input video file.")
exit
gpu_id = int(sys.argv[1])
input_video = sys.argv[2]
onnx_file = "./resnet50.onnx"
trt_file = "./resnet50.trt"
Resnet50ExportToOnxx(onnx_file, trt_file)
infer_on_video(gpu_id, input_video, trt_file)
| VideoProcessingFramework-master | samples/SampleTensorRTResnet.py |
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import pycuda.driver as cuda
import PyNvCodec as nvc
import numpy as np
from threading import Thread
class Worker(Thread):
def __init__(self, gpuID: int, width: int, height: int, rawFilePath: str):
Thread.__init__(self)
res = str(width) + "x" + str(height)
# Retain primary CUDA device context and create separate stream per thread.
self.ctx = cuda.Device(gpuID).retain_primary_context()
self.ctx.push()
self.str = cuda.Stream()
self.ctx.pop()
# Initialize color conversion context.
# Accurate color rendition doesn't matter in this sample so just use
# most common bt601 and mpeg.
self.cc_ctx = nvc.ColorspaceConversionContext(
color_space=nvc.ColorSpace.BT_601, color_range=nvc.ColorRange.MPEG
)
self.nvUpl = nvc.PyFrameUploader(
width, height, nvc.PixelFormat.YUV420, self.ctx.handle, self.str.handle
)
self.nvCvt = nvc.PySurfaceConverter(
width,
height,
nvc.PixelFormat.YUV420,
nvc.PixelFormat.NV12,
self.ctx.handle,
self.str.handle,
)
self.nvEnc = nvc.PyNvEncoder(
{"preset": "P1", "codec": "h264", "s": res},
self.ctx.handle,
self.str.handle,
)
self.rawFile = open(rawFilePath, "rb")
self.encFrame = np.ndarray(shape=(0), dtype=np.uint8)
def run(self):
try:
while True:
frameSize = self.nvEnc.Width() * self.nvEnc.Height() * 3 / 2
rawFrame = np.fromfile(self.rawFile, np.uint8, count=int(frameSize))
if not (rawFrame.size):
print("No more video frames.")
break
rawSurface = self.nvUpl.UploadSingleFrame(rawFrame)
if rawSurface.Empty():
print("Failed to upload video frame to GPU.")
break
cvtSurface = self.nvCvt.Execute(rawSurface, self.cc_ctx)
if cvtSurface.Empty():
print("Failed to do color conversion.")
break
self.nvEnc.EncodeSingleSurface(cvtSurface, self.encFrame)
# Encoder is asynchronous, so we need to flush it
success = self.nvEnc.Flush(self.encFrame)
except Exception as e:
print(getattr(e, "message", str(e)))
def create_threads(gpu_id: int, width: int, height: int, input: str, num_threads: int):
cuda.init()
thread_pool = []
for i in range(0, num_threads):
thread = Worker(gpu_id, width, height, input)
thread.start()
thread_pool.append(thread)
for thread in thread_pool:
thread.join()
if __name__ == "__main__":
print("This sample encodes multiple videos simultaneously from same YUV file.")
print("Usage: SampleDecode.py $gpu_id $width $height $input_file $num_threads")
if len(sys.argv) < 6:
print("Provide input CLI arguments as shown above")
exit(1)
gpu_id = int(sys.argv[1])
width = int(sys.argv[2])
height = int(sys.argv[3])
input = sys.argv[4]
num_threads = int(sys.argv[5])
create_threads(gpu_id, width, height, input, num_threads)
| VideoProcessingFramework-master | samples/SampleEncodeMultiThread.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import multiprocessing
import sys
import os
import threading
from typing import Dict
import time
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
from io import BytesIO
from multiprocessing import Process
import subprocess
import uuid
import json
def get_stream_params(url: str) -> Dict:
cmd = [
"ffprobe",
"-v",
"quiet",
"-print_format",
"json",
"-show_format",
"-show_streams",
url,
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
bio = BytesIO(stdout)
json_out = json.load(bio)
params = {}
if not "streams" in json_out:
return {}
for stream in json_out["streams"]:
if stream["codec_type"] == "video":
params["width"] = stream["width"]
params["height"] = stream["height"]
params["framerate"] = float(eval(stream["avg_frame_rate"]))
codec_name = stream["codec_name"]
is_h264 = True if codec_name == "h264" else False
is_hevc = True if codec_name == "hevc" else False
if not is_h264 and not is_hevc:
raise ValueError(
"Unsupported codec: "
+ codec_name
+ ". Only H.264 and HEVC are supported in this sample."
)
else:
params["codec"] = (
nvc.CudaVideoCodec.H264 if is_h264 else nvc.CudaVideoCodec.HEVC
)
pix_fmt = stream["pix_fmt"]
is_yuv420 = pix_fmt == "yuv420p"
is_yuv444 = pix_fmt == "yuv444p"
# YUVJ420P and YUVJ444P are deprecated but still wide spread, so handle
# them as well. They also indicate JPEG color range.
is_yuvj420 = pix_fmt == "yuvj420p"
is_yuvj444 = pix_fmt == "yuvj444p"
if is_yuvj420:
is_yuv420 = True
params["color_range"] = nvc.ColorRange.JPEG
if is_yuvj444:
is_yuv444 = True
params["color_range"] = nvc.ColorRange.JPEG
if not is_yuv420 and not is_yuv444:
raise ValueError(
"Unsupported pixel format: "
+ pix_fmt
+ ". Only YUV420 and YUV444 are supported in this sample."
)
else:
params["format"] = (
nvc.PixelFormat.NV12 if is_yuv420 else nvc.PixelFormat.YUV444
)
# Color range default option. We may have set when parsing
# pixel format, so check first.
if "color_range" not in params:
params["color_range"] = nvc.ColorRange.MPEG
# Check actual value.
if "color_range" in stream:
color_range = stream["color_range"]
if color_range == "pc" or color_range == "jpeg":
params["color_range"] = nvc.ColorRange.JPEG
# Color space default option:
params["color_space"] = nvc.ColorSpace.BT_601
# Check actual value.
if "color_space" in stream:
color_space = stream["color_space"]
if color_space == "bt709":
params["color_space"] = nvc.ColorSpace.BT_709
return params
return {}
def rtsp_client(url: str, name: str, gpu_id: int, length_seconds: int) -> None:
# Get stream parameters
params = get_stream_params(url)
if not len(params):
raise ValueError("Can not get " + url + " streams params")
w = params["width"]
h = params["height"]
f = params["format"]
c = params["codec"]
g = gpu_id
# Prepare ffmpeg arguments
if nvc.CudaVideoCodec.H264 == c:
codec_name = "h264"
elif nvc.CudaVideoCodec.HEVC == c:
codec_name = "hevc"
bsf_name = codec_name + "_mp4toannexb,dump_extra=all"
cmd = [
"ffmpeg",
"-hide_banner",
"-i",
url,
"-c:v",
"copy",
"-bsf:v",
bsf_name,
"-f",
codec_name,
"pipe:1",
]
# Run ffmpeg in subprocess and redirect it's output to pipe
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Create HW decoder class
nvdec = nvc.PyNvDecoder(w, h, f, c, g)
# Amount of bytes we read from pipe first time.
read_size = 4096
# Total bytes read and total frames decded to get average data rate
rt = 0
fd = 0
# Main decoding loop, will not flush intentionally because don't know the
# amount of frames available via RTSP.
t0 = time.time()
print("running stream")
while True:
if (time.time() - t0) > length_seconds:
print(f"Listend for {length_seconds}seconds")
break
# Pipe read underflow protection
if not read_size:
read_size = int(rt / fd)
# Counter overflow protection
rt = read_size
fd = 1
# Read data.
# Amount doesn't really matter, will be updated later on during decode.
bits = proc.stdout.read(read_size)
if not len(bits):
print("Can't read data from pipe")
break
else:
rt += len(bits)
# Decode
enc_packet = np.frombuffer(buffer=bits, dtype=np.uint8)
pkt_data = nvc.PacketData()
try:
surf = nvdec.DecodeSurfaceFromPacket(enc_packet, pkt_data)
if not surf.Empty():
fd += 1
# Shifts towards underflow to avoid increasing vRAM consumption.
if pkt_data.bsl < read_size:
read_size = pkt_data.bsl
# Print process ID every second or so.
fps = int(params["framerate"])
if not fd % fps:
print(name)
# Handle HW exceptions in simplest possible way by decoder respawn
except nvc.HwResetException:
nvdec = nvc.PyNvDecoder(w, h, f, c, g)
continue
if __name__ == "__main__":
print("This sample decodes multiple videos in parallel on given GPU.")
print("It doesn't do anything beside decoding, output isn't saved.")
print("Usage: SampleDecodeRTSP.py $gpu_id $url1 ... $urlN .")
if len(sys.argv) < 3:
print("Provide gpu ID and input URL(s).")
exit(1)
gpuID = int(sys.argv[1])
listen_length_seconds = int(sys.argv[2])
urls = []
for i in range(3, len(sys.argv)):
urls.append(sys.argv[i])
pool = []
for url in urls:
client = Process(
target=rtsp_client,
args=(url, str(uuid.uuid4()), gpuID, listen_length_seconds),
)
client.start()
pool.append(client)
for client in pool:
client.join()
| VideoProcessingFramework-master | samples/SampleDecodeRTSP.py |
#
# Copyright 2021 Kognia Sports Intelligence
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import torch
import torchvision.transforms as T
import PyNvCodec as nvc
try:
import PytorchNvCodec as pnvc
except ImportError as err:
raise (f"""Could not import `PytorchNvCodec`: {err}.
Please make sure it is installed! Run
`pip install git+https://github.com/NVIDIA/VideoProcessingFramework#subdirectory=src/PytorchNvCodec` or
`pip install src/PytorchNvCodec` if using a local copy of the VideoProcessingFramework repository""") # noqa
import numpy as np
class cconverter:
"""
Colorspace conversion chain.
"""
def __init__(self, width: int, height: int, gpu_id: int):
self.gpu_id = gpu_id
self.w = width
self.h = height
self.chain = []
def add(self, src_fmt: nvc.PixelFormat, dst_fmt: nvc.PixelFormat) -> None:
self.chain.append(
nvc.PySurfaceConverter(self.w, self.h, src_fmt, dst_fmt, self.gpu_id)
)
def run(self, src_surface: nvc.Surface) -> nvc.Surface:
surf = src_surface
cc = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_601, nvc.ColorRange.MPEG)
for cvt in self.chain:
surf = cvt.Execute(surf, cc)
if surf.Empty():
raise RuntimeError("Failed to perform color conversion")
return surf.Clone(self.gpu_id)
def surface_to_tensor(surface: nvc.Surface) -> torch.Tensor:
"""
Converts planar rgb surface to cuda float tensor.
"""
if surface.Format() != nvc.PixelFormat.RGB_PLANAR:
raise RuntimeError("Surface shall be of RGB_PLANAR pixel format")
surf_plane = surface.PlanePtr()
img_tensor = pnvc.DptrToTensor(
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
if img_tensor is None:
raise RuntimeError("Can not export to tensor.")
img_tensor.resize_(3, int(surf_plane.Height() / 3), surf_plane.Width())
img_tensor = img_tensor.type(dtype=torch.cuda.FloatTensor)
img_tensor = torch.divide(img_tensor, 255.0)
img_tensor = torch.clamp(img_tensor, 0.0, 1.0)
return img_tensor
def tensor_to_surface(img_tensor: torch.tensor, gpu_id: int) -> nvc.Surface:
"""
Converts cuda float tensor to planar rgb surface.
"""
if len(img_tensor.shape) != 3 and img_tensor.shape[0] != 3:
raise RuntimeError("Shape of the tensor must be (3, height, width)")
tensor_w, tensor_h = img_tensor.shape[2], img_tensor.shape[1]
img = torch.clamp(img_tensor, 0.0, 1.0)
img = torch.multiply(img, 255.0)
img = img.type(dtype=torch.cuda.ByteTensor)
surface = nvc.Surface.Make(nvc.PixelFormat.RGB_PLANAR, tensor_w, tensor_h, gpu_id)
surf_plane = surface.PlanePtr()
pnvc.TensorToDptr(
img,
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
return surface
def main(gpu_id, encFilePath, dstFilePath):
dstFile = open(dstFilePath, "wb")
nvDec = nvc.PyNvDecoder(encFilePath, gpu_id)
w = nvDec.Width()
h = nvDec.Height()
res = str(w) + "x" + str(h)
nvEnc = nvc.PyNvEncoder(
{"preset": "P4", "codec": "h264", "s": res, "bitrate": "10M"}, gpu_id
)
# Surface converters
to_rgb = cconverter(w, h, gpu_id)
to_rgb.add(nvc.PixelFormat.NV12, nvc.PixelFormat.YUV420)
to_rgb.add(nvc.PixelFormat.YUV420, nvc.PixelFormat.RGB)
to_rgb.add(nvc.PixelFormat.RGB, nvc.PixelFormat.RGB_PLANAR)
to_nv12 = cconverter(w, h, gpu_id)
to_nv12.add(nvc.PixelFormat.RGB_PLANAR, nvc.PixelFormat.RGB)
to_nv12.add(nvc.PixelFormat.RGB, nvc.PixelFormat.YUV420)
to_nv12.add(nvc.PixelFormat.YUV420, nvc.PixelFormat.NV12)
# Encoded video frame
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
while True:
# Decode NV12 surface
src_surface = nvDec.DecodeSingleSurface()
if src_surface.Empty():
break
# Convert to planar RGB
rgb_pln = to_rgb.run(src_surface)
if rgb_pln.Empty():
break
# PROCESS YOUR TENSOR HERE.
# THIS DUMMY PROCESSING JUST ADDS RANDOM ROTATION.
src_tensor = surface_to_tensor(rgb_pln)
dst_tensor = T.RandomRotation(degrees=(-1, 1))(src_tensor)
surface_rgb = tensor_to_surface(dst_tensor, gpu_id)
# Convert back to NV12
dst_surface = to_nv12.run(surface_rgb)
if src_surface.Empty():
break
# Encode
success = nvEnc.EncodeSingleSurface(dst_surface, encFrame)
if success:
byteArray = bytearray(encFrame)
dstFile.write(byteArray)
# Encoder is asynchronous, so we need to flush it
while True:
success = nvEnc.FlushSinglePacket(encFrame)
if success:
byteArray = bytearray(encFrame)
dstFile.write(byteArray)
else:
break
if __name__ == "__main__":
print("This sample transcode and process with pytorch an input video on given GPU.")
print("Usage: SamplePyTorch.py $gpu_id $input_file $output_file.")
if len(sys.argv) < 4:
print("Provide gpu ID, path to input and output files")
exit(1)
gpu_id = int(sys.argv[1])
encFilePath = sys.argv[2]
decFilePath = sys.argv[3]
main(gpu_id, encFilePath, decFilePath)
| VideoProcessingFramework-master | samples/SamplePyTorch.py |
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import pycuda.driver as cuda
import PyNvCodec as nvc
import numpy as np
from threading import Thread
class Worker(Thread):
def __init__(self, gpuID, encFile):
Thread.__init__(self)
# Retain primary CUDA device context and create separate stream per thread.
self.ctx = cuda.Device(gpuID).retain_primary_context()
self.ctx.push()
self.str = cuda.Stream()
self.ctx.pop()
# Create Decoder with given CUDA context & stream.
self.nvDec = nvc.PyNvDecoder(encFile, self.ctx.handle, self.str.handle)
width, height = self.nvDec.Width(), self.nvDec.Height()
hwidth, hheight = int(width / 2), int(height / 2)
# Determine colorspace conversion parameters.
# Some video streams don't specify these parameters so default values
# are most widespread bt601 and mpeg.
cspace, crange = self.nvDec.ColorSpace(), self.nvDec.ColorRange()
if nvc.ColorSpace.UNSPEC == cspace:
cspace = nvc.ColorSpace.BT_601
if nvc.ColorRange.UDEF == crange:
crange = nvc.ColorRange.MPEG
self.cc_ctx = nvc.ColorspaceConversionContext(cspace, crange)
print("Color space: ", str(cspace))
print("Color range: ", str(crange))
# Initialize colorspace conversion chain
if self.nvDec.ColorSpace() != nvc.ColorSpace.BT_709:
self.nvYuv = nvc.PySurfaceConverter(
width,
height,
self.nvDec.Format(),
nvc.PixelFormat.YUV420,
self.ctx.handle,
self.str.handle,
)
else:
self.nvYuv = None
if self.nvYuv:
self.nvCvt = nvc.PySurfaceConverter(
width,
height,
self.nvYuv.Format(),
nvc.PixelFormat.RGB,
self.ctx.handle,
self.str.handle,
)
else:
self.nvCvt = nvc.PySurfaceConverter(
width,
height,
self.nvDec.Format(),
nvc.PixelFormat.RGB,
self.ctx.handle,
self.str.handle,
)
self.nvRes = nvc.PySurfaceResizer(
hwidth, hheight, self.nvCvt.Format(), self.ctx.handle, self.str.handle
)
self.nvDwn = nvc.PySurfaceDownloader(
hwidth, hheight, self.nvRes.Format(), self.ctx.handle, self.str.handle
)
self.num_frame = 0
def run(self):
try:
while True:
try:
self.rawSurface = self.nvDec.DecodeSingleSurface()
if self.rawSurface.Empty():
print("No more video frames")
break
except nvc.HwResetException:
print("Continue after HW decoder was reset")
continue
if self.nvYuv:
self.yuvSurface = self.nvYuv.Execute(self.rawSurface, self.cc_ctx)
self.cvtSurface = self.nvCvt.Execute(self.yuvSurface, self.cc_ctx)
else:
self.cvtSurface = self.nvCvt.Execute(self.rawSurface, self.cc_ctx)
if self.cvtSurface.Empty():
print("Failed to do color conversion")
break
self.resSurface = self.nvRes.Execute(self.cvtSurface)
if self.resSurface.Empty():
print("Failed to resize surface")
break
self.rawFrame = np.ndarray(
shape=(self.resSurface.HostSize()), dtype=np.uint8
)
success = self.nvDwn.DownloadSingleSurface(
self.resSurface, self.rawFrame
)
if not (success):
print("Failed to download surface")
break
self.num_frame += 1
if 0 == self.num_frame % self.nvDec.Framerate():
print(f"Thread {self.ident} at frame {self.num_frame}")
except Exception as e:
print(getattr(e, "message", str(e)))
fout.close()
def create_threads(gpu_id, input_file, num_threads):
cuda.init()
thread_pool = []
for i in range(0, num_threads):
thread = Worker(gpu_id, input_file)
thread.start()
thread_pool.append(thread)
for thread in thread_pool:
thread.join()
if __name__ == "__main__":
print(
"This sample decodes video streams in parallel threads. It does not save output."
)
print("GPU-accelerated color conversion and resize are also applied.")
print("This sample may serve as a stability test.")
print("Usage: python SampleDecodeMultiThread.py $gpu_id $input $num_threads")
if len(sys.argv) < 4:
print("Provide input CLI arguments as shown above")
exit(1)
gpu_id = int(sys.argv[1])
input_file = sys.argv[2]
num_threads = int(sys.argv[3])
create_threads(gpu_id, input_file, num_threads)
| VideoProcessingFramework-master | samples/SampleDecodeMultiThread.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import numpy as np
from enum import Enum
import PyNvCodec as nvc
import sys
import os
import logging
import argparse
import pathlib
logger = logging.getLogger(__file__)
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
logger.error("CUDA_PATH environment variable is not set.")
logger.error("Can't set CUDA DLLs search path.")
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
logger.error("PATH environment variable is not set.")
exit(1)
total_num_frames = 444
def encode(gpuID, decFilePath, encFilePath, width, height, codec, format):
decFile = open(decFilePath, "rb")
encFile = open(encFilePath, "wb")
res = str(width) + "x" + str(height)
pixel_format = nvc.PixelFormat.NV12
profile = "high"
if format == 'yuv444':
pixel_format = nvc.PixelFormat.YUV444
profile = "high_444"
elif format == 'yuv444_10bit':
pixel_format = nvc.PixelFormat.YUV444_10bit
profile = "high_444_10bit"
elif format == 'yuv420_10bit':
pixel_format = nvc.PixelFormat.YUV420_10bit
profile = "high_420_10bit"
nvEnc = nvc.PyNvEncoder(
{
"preset": "P5",
"tuning_info": "high_quality",
"codec": codec,
"profile": profile,
"s": res,
"bitrate": "10M",
},
gpuID,
pixel_format
)
frameSize = nvEnc.GetFrameSizeInBytes()
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
# Number of frames we've sent to encoder
framesSent = 0
# Number of frames we've received from encoder
framesReceived = 0
# Number of frames we've got from encoder during flush.
# This number is included in number of received frames.
# We use separate counter to check if encoder receives packets one by one
# during flush.
framesFlushed = 0
while framesSent < total_num_frames:
rawFrame = np.fromfile(decFile, np.uint8, count=frameSize)
if not (rawFrame.size):
print("No more input frames")
break
success = nvEnc.EncodeSingleFrame(rawFrame, encFrame, sync=False)
framesSent += 1
if success:
encByteArray = bytearray(encFrame)
encFile.write(encByteArray)
framesReceived += 1
# Encoder is asynchronous, so we need to flush it
while True:
success = nvEnc.FlushSinglePacket(encFrame)
if (success) and (framesReceived < total_num_frames):
encByteArray = bytearray(encFrame)
encFile.write(encByteArray)
framesReceived += 1
framesFlushed += 1
else:
break
print(
framesReceived,
"/",
total_num_frames,
" frames encoded and written to output file.",
)
print(framesFlushed, " frame(s) received during encoder flush.")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This sample encodes first " + str(total_num_frames) + "frames of input raw NV12 file to H.264 video on given "
"GPU." + "\n "
+ "It reconfigures encoder on-the fly to illustrate bitrate change,"
+ " IDR frame force and encoder reset.\n"
)
parser.add_argument(
"gpu_id",
type=int,
help="GPU id, check nvidia-smi",
)
parser.add_argument(
"raw_file_path",
type=pathlib.Path,
help="raw video file (read from)",
)
parser.add_argument(
"encoded_file_path",
type=pathlib.Path,
help="encoded video file (write to)",
)
parser.add_argument(
"width",
type=int,
help="width",
)
parser.add_argument(
"height",
type=int,
help="height",
)
parser.add_argument(
"codec",
type=str,
nargs='?',
default="h264",
help="supported codec",
)
parser.add_argument(
"surfaceformat",
type=str,
nargs='?',
default="nv12",
help="supported format",
)
args = parser.parse_args()
gpuID = args.gpu_id
decFilePath = args.raw_file_path
encFilePath = args.encoded_file_path
width = args.width
height = args.height
codec = args.codec
surfaceformat = args.surfaceformat
encode(gpuID, decFilePath, encFilePath, width, height, codec, surfaceformat)
exit(0)
| VideoProcessingFramework-master | samples/SampleEncode.py |
#
# Copyright 2023 @royinx
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
from typing import Any
import PyNvCodec as nvc
import numpy as np
import cupy as cp
class cconverter:
"""
Colorspace conversion chain.
"""
def __init__(self, width: int, height: int, gpu_id: int):
self.gpu_id = gpu_id
self.w = width
self.h = height
self.chain = []
def add(self, src_fmt: nvc.PixelFormat, dst_fmt: nvc.PixelFormat) -> None:
self.chain.append(
nvc.PySurfaceConverter(self.w, self.h, src_fmt, dst_fmt, self.gpu_id)
)
def run(self, src_surface: nvc.Surface) -> nvc.Surface:
surf = src_surface
cc = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_601, nvc.ColorRange.MPEG)
for cvt in self.chain:
surf = cvt.Execute(surf, cc)
if surf.Empty():
raise RuntimeError("Failed to perform color conversion")
return surf.Clone(self.gpu_id)
class CupyNVC:
def get_memptr(self, surface: nvc.Surface) -> int:
return surface.PlanePtr().GpuMem()
def SurfaceToArray(self, surface: nvc.Surface) -> cp.array:
"""
Converts surface to cupy unit8 tensor.
- surface: nvc.Surface
- return: cp.array (height, width, 3)
"""
if surface.Format() != nvc.PixelFormat.RGB:
raise RuntimeError("Surface shall be of RGB PLANAR format , got {}".format(surface.Format()))
plane = surface.PlanePtr()
# cuPy array zero copy non ownned
height, width, pitch = (plane.Height(), plane.Width(), plane.Pitch())
cupy_mem = cp.cuda.UnownedMemory(self.get_memptr(surface), height * width * 1, surface)
cupy_memptr = cp.cuda.MemoryPointer(cupy_mem, 0)
cupy_frame = cp.ndarray((height, width // 3, 3), cp.uint8, cupy_memptr, strides=(pitch, 3, 1)) # RGB
return cupy_frame
def _memcpy(self, surface: nvc.Surface, img_array: cp.array) -> None:
cp.cuda.runtime.memcpy2DAsync(self.get_memptr(surface),
surface.Pitch(),
img_array.data.ptr,
surface.Width(),
surface.Width(),
surface.Height()*3,
cp.cuda.runtime.memcpyDeviceToDevice,
0) # null_stream.ptr: 0
return
def ArrayToSurface(self, img_array: cp.array, gpu_id: int) -> nvc.Surface:
"""
Converts cupy ndarray to rgb surface.
- surface: cp.array
- return: nvc.Surface
"""
img_array = img_array.astype(cp.uint8)
img_array = cp.transpose(img_array, (2,0,1)) # HWC to CHW
img_array = cp.ascontiguousarray(img_array)
_ ,tensor_h , tensor_w= img_array.shape
surface = nvc.Surface.Make(nvc.PixelFormat.RGB_PLANAR, tensor_w, tensor_h, gpu_id)
self._memcpy(surface, img_array)
return surface
def grayscale(img_array: cp.array) -> cp.array:
img_array = cp.matmul(img_array, cp.array([0.299, 0.587, 0.114]).T)
img_array = cp.expand_dims(img_array, axis=-1)
img_array = cp.tile(img_array, (1,1,3)) # view as 3 channel image (packed RGB: HWC)
return img_array
def contrast_boost(img_array: cp.array) -> cp.array:
"""
histogram equalization
"""
channel_min = cp.quantile(img_array, 0.05, axis=(0,1))
channel_max = cp.quantile(img_array, 0.95, axis=(0,1))
img_array = img_array.astype(cp.float32)
for c, (cmin, cmax) in enumerate(zip(channel_min, channel_max)):
img_array[c] = cp.clip(img_array[c], cmin, cmax)
img_array = img_array- channel_min.reshape(1,1,-1)
img_array /= (channel_max - channel_min).reshape(1,1,-1)
img_array = cp.multiply(img_array, 255.0)
return img_array
def main(gpu_id: int, encFilePath: str, dstFilePath: str):
dstFile = open(dstFilePath, "wb")
nvDec = nvc.PyNvDecoder(encFilePath, gpu_id)
cpnvc = CupyNVC()
w = nvDec.Width()
h = nvDec.Height()
res = str(w) + "x" + str(h)
nvEnc = nvc.PyNvEncoder(
{"preset": "P4", "codec": "h264", "s": res, "bitrate": "10M"}, gpu_id
)
# Surface converters
to_rgb = cconverter(w, h, gpu_id)
to_rgb.add(nvc.PixelFormat.NV12, nvc.PixelFormat.YUV420)
to_rgb.add(nvc.PixelFormat.YUV420, nvc.PixelFormat.RGB)
to_nv12 = cconverter(w, h, gpu_id)
to_nv12.add(nvc.PixelFormat.RGB_PLANAR, nvc.PixelFormat.RGB)
to_nv12.add(nvc.PixelFormat.RGB, nvc.PixelFormat.YUV420)
to_nv12.add(nvc.PixelFormat.YUV420, nvc.PixelFormat.NV12)
# Encoded video frame
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
while True:
# Decode NV12 surface
src_surface = nvDec.DecodeSingleSurface()
if src_surface.Empty():
break
# Convert to packed RGB: HWC , planar CHW
rgb_sur = to_rgb.run(src_surface)
if rgb_sur.Empty():
break
# PROCESS YOUR TENSOR HERE.
# THIS DUMMY PROCESSING JUST ADDS RANDOM ROTATION.
src_array = cpnvc.SurfaceToArray(rgb_sur)
dst_array = contrast_boost(src_array)
dst_array = grayscale(dst_array)
surface_rgb = cpnvc.ArrayToSurface(dst_array, gpu_id)
# Convert back to NV12
dst_surface = to_nv12.run(surface_rgb)
if src_surface.Empty():
break
# Encode
success = nvEnc.EncodeSingleSurface(dst_surface, encFrame)
if success:
byteArray = bytearray(encFrame)
dstFile.write(byteArray)
# Encoder is asynchronous, so we need to flush it
while True:
success = nvEnc.FlushSinglePacket(encFrame)
if success:
byteArray = bytearray(encFrame)
dstFile.write(byteArray)
else:
break
if __name__ == "__main__":
if len(sys.argv) < 4:
print("This sample transcode and process with pytorch an input video on given GPU.")
print("Provide gpu ID, path to input and output files")
print("Usage: SamplePyTorch.py $gpu_id $input_file $output_file.")
print("Example: \npython3 samples/SampleCupy.py 0 tests/test.mp4 tests/dec_test.mp4")
exit(1)
gpu_id = int(sys.argv[1])
encFilePath = sys.argv[2]
decFilePath = sys.argv[3]
main(gpu_id, encFilePath, decFilePath)
| VideoProcessingFramework-master | samples/SampleCupy.py |
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import cv2
def load_remap(remap_file):
remap_x, remap_y = np.load(remap_file, allow_pickle=True).values()
if remap_x.shape != remap_y.shape:
raise ValueError(
"remap_x.shape != remap_y.shape: ", remap_x.shape, " != ", remap_y.shape
)
if not remap_x.flags["C_CONTIGUOUS"]:
remap_x = np.ascontiguousarray(remap_x, dtype=remap_x.dtype)
if not remap_y.flags["C_CONTIGUOUS"]:
remap_y = np.ascontiguousarray(remap_y, dtype=remap_y.dtype)
print("----> load remap_x: ", remap_x.shape, remap_x.dtype, remap_x.strides)
print("----> load remap_y: ", remap_y.shape, remap_y.dtype, remap_y.strides)
return remap_x, remap_y
total_num_frames = 4
def decode(gpuID, encFilePath, remapFilePath):
nvDec = nvc.PyNvDecoder(encFilePath, gpuID)
w = nvDec.Width()
h = nvDec.Height()
to_rgb = nvc.PySurfaceConverter(
w, h, nvc.PixelFormat.NV12, nvc.PixelFormat.RGB, gpuID
)
cc1 = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_709, nvc.ColorRange.JPEG)
# init remaper
remap_x, remap_y = load_remap(remapFilePath)
remap_h, remap_w = remap_x.shape
nv_remap = nvc.PySurfaceRemaper(remap_x, remap_y, nvc.PixelFormat.RGB, gpuID)
nv_dwn = nvc.PySurfaceDownloader(remap_w, remap_h, nvc.PixelFormat.RGB, gpuID)
dec_frame = 0
while dec_frame < total_num_frames:
rawSurface = nvDec.DecodeSingleSurface()
if rawSurface.Empty():
print("DecodeSingleSurface Failed.")
break
rgb24_origin = to_rgb.Execute(rawSurface, cc1)
if rgb24_origin.Empty():
print("Convert to rgb Failed.")
break
rgb24_remap = nv_remap.Execute(rgb24_origin)
if rgb24_remap.Empty():
print("Remap Failed.")
break
rawFrameRGB = np.ndarray(shape=(remap_h, remap_w, 3), dtype=np.uint8)
if not nv_dwn.DownloadSingleSurface(rgb24_remap, rawFrameRGB):
print("DownloadSingleSurface Failed.")
break
undistort_img = cv2.cvtColor(rawFrameRGB, cv2.COLOR_RGB2BGR)
print("dump image shape: ", undistort_img.shape)
cv2.imwrite("%s.jpg" % dec_frame, undistort_img)
dec_frame += 1
if __name__ == "__main__":
print(
"This sample decodes first ",
total_num_frames,
" frames from input video and undistort them.",
)
print("Usage: SampleRemap.py $gpu_id $input_file $remap_npz_file")
if len(sys.argv) < 4:
print("Provide gpu_id, path to input, path to remap file")
exit(1)
gpu_id = int(sys.argv[1])
encFilePath = sys.argv[2]
remapFilePath = sys.argv[3]
decode(gpu_id, encFilePath, remapFilePath)
| VideoProcessingFramework-master | samples/SampleRemap.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import PyNvCodec as nvc
import os
import tqdm
import argparse
from pathlib import Path
from enum import Enum
import numpy as np
import logging
logger = logging.getLogger(__file__)
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
logger.error("CUDA_PATH environment variable is not set.")
logger.error("Can't set CUDA DLLs search path.")
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
logger.error("PATH environment variable is not set.")
exit(1)
class InitMode(Enum):
# Decoder will be created with built-in demuxer.
BUILTIN = (0,)
# Decoder will be created with standalone FFmpeg VPF demuxer.
STANDALONE = 1
class DecodeStatus(Enum):
# Decoding error.
DEC_ERR = (0,)
# Frame was submitted to decoder.
# No frames are ready for display yet.
DEC_SUBM = (1,)
# Frame was submitted to decoder.
# There's a frame ready for display.
DEC_READY = 2
class NvDecoder:
def __init__(
self,
gpu_id: int,
enc_file: str,
dec_file: str,
dmx_mode=InitMode.STANDALONE,
):
# Save mode, we will need this later
self.init_mode = dmx_mode
if self.init_mode == InitMode.STANDALONE:
# Initialize standalone demuxer.
self.nv_dmx = nvc.PyFFmpegDemuxer(enc_file)
# Initialize decoder.
self.nv_dec = nvc.PyNvDecoder(
self.nv_dmx.Width(),
self.nv_dmx.Height(),
self.nv_dmx.Format(),
self.nv_dmx.Codec(),
gpu_id,
)
else:
# Initialize decoder with built-in demuxer.
self.nv_dmx = None
self.nv_dec = nvc.PyNvDecoder(enc_file, gpu_id)
# Frame to seek to next time decoding function is called.
# Negative values means 'don't use seek'. Non-negative values mean
# seek frame number.
self.sk_frm = int(-1)
# Total amount of decoded frames
self.num_frames_decoded = int(0)
# Numpy array to store decoded frames pixels
self.frame_nv12 = np.ndarray(shape=(0), dtype=np.uint8)
# Output file
self.out_file = open(dec_file, "wb")
# Encoded video packet
self.packet = np.ndarray(shape=(0), dtype=np.uint8)
# Encoded packet data
self.packet_data = nvc.PacketData()
# Seek mode
self.seek_mode = nvc.SeekMode.PREV_KEY_FRAME
# Returns decoder creation mode
def mode(self) -> InitMode:
return self.init_mode
# Returns video width in pixels
def width(self) -> int:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.Width()
else:
return self.nv_dec.Width()
# Returns video height in pixels
def height(self) -> int:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.Height()
else:
return self.nv_dec.Height()
# Returns number of decoded frames.
def dec_frames(self) -> int:
return self.num_frames_decoded
# Returns frame rate
def framerate(self) -> float:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.Framerate()
else:
return self.nv_dec.Framerate()
# Returns average frame rate
def avg_framerate(self) -> float:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.AvgFramerate()
else:
return self.nv_dec.AvgFramerate()
# Returns True if video has various frame rate, False otherwise
def is_vfr(self) -> bool:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.IsVFR()
else:
return self.nv_dec.IsVFR()
# Returns number of frames in video.
def stream_num_frames(self) -> int:
if self.mode() == InitMode.STANDALONE:
return self.nv_dmx.Numframes()
else:
return self.nv_dec.Numframes()
# Seek for particular frame number.
def seek(
self,
seek_frame: int,
seek_mode: nvc.SeekMode,
seek_criteria: str = "ts",
) -> None:
# Next time we decode frame decoder will seek for this frame first.
self.sk_frm = seek_frame
self.seek_mode = seek_mode
self.seek_criteria = seek_criteria
self.num_frames_decoded = 0
def decode_frame_standalone(self, verbose=False) -> DecodeStatus:
status = DecodeStatus.DEC_ERR
try:
# Check if we need to seek first.
if self.sk_frm >= 0:
if self.sk_frm.is_integer():
self.sk_frm = int(self.sk_frm)
logger.info(f"Seeking for the {self.seek_criteria} {self.sk_frm}")
seek_ctx = nvc.SeekContext(
**{"seek_" + self.seek_criteria: self.sk_frm},
mode=self.seek_mode
)
self.sk_frm = -1
if not self.nv_dmx.Seek(seek_ctx, self.packet):
return status
logger.info("We are at frame with pts {str(seek_ctx.out_frame_pts)}")
# Otherwise we just demux next packet.
elif not self.nv_dmx.DemuxSinglePacket(self.packet):
return status
# Send encoded packet to Nvdec.
# Nvdec is async so it may not return decoded frame immediately.
frame_ready = self.nv_dec.DecodeFrameFromPacket(
self.frame_nv12, self.packet
)
if frame_ready:
self.num_frames_decoded += 1
status = DecodeStatus.DEC_READY
else:
status = DecodeStatus.DEC_SUBM
# Get last demuxed packet data.
# It stores info such as pts, duration etc.
self.nv_dmx.LastPacketData(self.packet_data)
if verbose:
logger.info(f"frame pts (decode order) :{self.packet_data.pts}")
logger.info(f"frame dts (decode order) :{self.packet_data.dts}")
logger.info(f"frame pos (decode order) :{self.packet_data.pos}")
logger.info(
f"frame duration (decode order) :{self.packet_data.duration}"
)
except Exception as e:
logger.info(f"{getattr(e, 'message', str(e))}")
return status
def decode_frame_builtin(self, verbose=False) -> DecodeStatus:
status = DecodeStatus.DEC_ERR
try:
frame_ready = False
frame_cnt_inc = 0
if self.sk_frm >= 0:
logger.info("Seeking for the frame ", str(self.sk_frm))
seek_ctx = nvc.SeekContext(
int(self.sk_frm), self.seek_mode, self.seek_criteria
)
self.sk_frm = -1
frame_ready = self.nv_dec.DecodeSingleFrame(
self.frame_nv12, seek_ctx, self.packet_data
)
frame_cnt_inc = seek_ctx.num_frames_decoded
else:
frame_ready = self.nv_dec.DecodeSingleFrame(
self.frame_nv12, self.packet_data
)
frame_cnt_inc = 1
# Nvdec is sync in this mode so if frame isn't returned it means
# EOF or error.
if frame_ready:
self.num_frames_decoded += 1
status = DecodeStatus.DEC_READY
if verbose:
logger.info(f"Decoded {frame_cnt_inc} frames internally")
else:
return status
if verbose:
logger.info(f"frame pts (display order) :{self.packet_data.pts}")
logger.info(f"frame dts (display order) :{self.packet_data.dts}")
logger.info(f"frame pos (display order) :{self.packet_data.pos}")
logger.info(
f"frame duration (display order) :{self.packet_data.duration}"
)
except Exception as e:
logger.info(f"{getattr(e, 'message', str(e))}")
return status
# Decode single video frame
def decode_frame(self, verbose=False) -> DecodeStatus:
if self.mode() == InitMode.STANDALONE:
return self.decode_frame_standalone(verbose)
else:
return self.decode_frame_builtin(verbose)
# Send empty packet to decoder to flush decoded frames queue.
def flush_frame(self, verbose=False) -> None:
ret = self.nv_dec.FlushSingleFrame(self.frame_nv12)
if ret:
self.num_frames_decoded += 1
return ret
# Write current video frame to output file.
def dump_frame(self) -> None:
bits = bytearray(self.frame_nv12)
self.out_file.write(bits)
# Decode all available video frames and write them to output file.
def decode(self, frames_to_decode=-1, verbose=False, dump_frames=True) -> None:
# Main decoding cycle
pbar = tqdm.tqdm(total=frames_to_decode, ascii=False, unit=" frames")
pbar.set_description("Decoding ")
while (
(self.dec_frames() < frames_to_decode) if (frames_to_decode > 0) else True
):
status = self.decode_frame(verbose)
if status == DecodeStatus.DEC_ERR:
break
elif dump_frames and status == DecodeStatus.DEC_READY:
self.dump_frame()
pbar.update()
# Check if we need flush the decoder
need_flush = (
(self.dec_frames() < frames_to_decode) if (frames_to_decode > 0) else True
)
# Flush decoded frames queue.
# This is needed only if decoder is initialized without built-in
# demuxer and we're not limited in amount of frames to decode.
while need_flush and (self.mode() == InitMode.STANDALONE):
if not self.flush_frame(verbose):
break
elif dump_frames:
self.dump_frame()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This sample decodes input video to raw NV12 file on given GPU."
)
parser.add_argument(
"-g",
"--gpu-id",
type=int,
required=True,
help="GPU id, check nvidia-smi",
)
parser.add_argument(
"-e",
"--encoded-file-path",
type=Path,
required=True,
help="Encoded video file (read from)",
)
parser.add_argument(
"-r",
"--raw-file-path",
type=Path,
required=True,
help="Raw NV12 video file (write to)",
)
parser.add_argument(
"-v", "--verbose", default=False, action="store_true", help="Verbose"
)
args = parser.parse_args()
dec = NvDecoder(
args.gpu_id,
args.encoded_file_path.as_posix(),
args.raw_file_path.as_posix(),
)
dec.decode(verbose=args.verbose)
exit(0)
| VideoProcessingFramework-master | samples/SampleDecode.py |
import sys
import logging
SERVICE_LOGGING_FORMAT = (
"[{filename:s}][{funcName:s}:{lineno:d}]" + "[{levelname:s}] {message:s}"
)
SERVICE_LOGGING_STREAM = sys.stdout
def get_logger(logger_name, log_level="info"):
SERVICE_LOGGING_LEVEL = getattr(logging, log_level.upper(), None)
logger = logging.getLogger(logger_name)
logger.setLevel(SERVICE_LOGGING_LEVEL)
ch = logging.StreamHandler(SERVICE_LOGGING_STREAM)
formatter = logging.Formatter(SERVICE_LOGGING_FORMAT, style="{")
ch.setFormatter(formatter)
ch.setLevel(SERVICE_LOGGING_LEVEL)
logger.addHandler(ch)
logger.propagate = False
return logger
logger = get_logger(__file__)
| VideoProcessingFramework-master | samples/utils.py |
#
# Copyright 2022 @Yves33, @sandhawalia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import os
import sys
import time
import argparse
import numpy as np
import pycuda
from pathlib import Path
import ctypes
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import PyNvCodec as nvc
from utils import get_logger
logger = get_logger(__file__)
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
logger.error("CUDA_PATH environment variable is not set.")
logger.error("Can't set CUDA DLLs search path.")
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
logger.error("PATH environment variable is not set.")
exit(1)
class FPSLogger:
def __init__(self, interval):
self.interval = interval
self.framecount = 0
self.seconds = time.time_ns() / 1e9
def log(self, titlebar=True, fmt="fps : {0}"):
self.framecount += 1
if self.seconds + self.interval < time.time_ns() / 1e9:
self.fps = self.framecount / self.interval
self.framecount = 0
self.seconds = time.time_ns() / 1e9
if titlebar:
glutSetWindowTitle(fmt.format(self.fps))
else:
logger.info(fmt.format(self.fps))
class OpenGLApplication:
def __init__(
self,
encoded_video_file: str,
gpu_id: int = 0,
width: int = 500,
height: int = 500,
):
self.cpu = False
# Setup up display window
self.setup_display(width, height)
# Loading drivers + compiling shaders, Done once?
self.setup_opengl()
# Setup decoder and downsampler
self.setup_vpf(encoded_video_file, gpu_id)
#
self.create_textures()
#
self.cuda_gl_handshake()
def setup_display(self, width, height):
logger.info(f"Setting up display {width}x{height}")
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
glutCreateWindow(b"Simple PyOpenGL example")
logger.info(f"Done setting up display {width}x{height}")
def setup_opengl(self):
self.program = self.compile_shaders()
import pycuda.autoinit
import pycuda.gl.autoinit
self.vao = GLuint()
glCreateVertexArrays(1, self.vao)
def setup_vpf(self, encoded_video_file, gpu_id):
self.nv_dec = nvc.PyNvDecoder(encoded_video_file, gpu_id)
self.width, self.height = self.nv_dec.Width(), self.nv_dec.Height()
self.cc_ctx = nvc.ColorspaceConversionContext(
self.nvDec.ColorSpace(), self.nv_dec.ColorRange()
)
if self.nv_dec.ColorSpace() != nvc.ColorSpace.BT_709:
self.nv_yuv = nvc.PySurfaceConverter(
self.width, self.height, self.nv_dec.Format(), nvc.PixelFormat.YUV420, 0
)
self.nv_cvt = nvc.PySurfaceConverter(
self.width, self.height, self.nvYuv.Format(), nvc.PixelFormat.RGB, 0
)
else:
self.nv_yuv = None
self.nv_cvt = nvc.PySurfaceConverter(
self.width, self.height, self.nv_dec.Format(), nvc.PixelFormat.RGB, 0
)
self.nv_down = nvc.PySurfaceDownloader(
self.width, self.height, self.nv_cvt.Format(), 0
)
self.data = np.zeros((self.width * self.height, 3), np.uint8)
def create_textures(self):
## create texture for GL display
self.texture = glGenTextures(1)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.texture)
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RGB,
self.width,
self.height,
0,
GL_RGB,
GL_UNSIGNED_BYTE,
ctypes.c_void_p(0),
)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glBindTexture(GL_TEXTURE_2D, 0)
self.cuda_img = pycuda.gl.RegisteredImage(
int(self.texture), GL_TEXTURE_2D, pycuda.gl.graphics_map_flags.NONE
) # WRITE_DISCARD)
def cuda_gl_handshake(self):
self.pbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.pbo)
glBufferData(GL_ARRAY_BUFFER, self.data, GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
import pycuda.autoinit
import pycuda.gl.autoinit
self.cuda_pbo = pycuda.gl.RegisteredBuffer(int(self.pbo))
self.vao = 0
glGenVertexArrays(1, self.vao)
glBindVertexArray(self.vao)
def compile_shaders(self):
vertex_shader_source = """
#version 450 core
out vec2 uv;
void main( void)
{
// Declare a hard-coded array of positions
const vec2 vertices[4] = vec2[4](vec2(-0.5, 0.5),
vec2( 0.5, 0.5),
vec2( 0.5, -0.5),
vec2(-0.5, -0.5));
// Index into our array using gl_VertexID
uv=vertices[gl_VertexID]+vec2(0.5,0.5);
gl_Position = vec4(2*vertices[gl_VertexID],1.0,1.0);
}
"""
vertex_shader = glCreateShader(GL_VERTEX_SHADER)
glShaderSource(vertex_shader, vertex_shader_source)
glCompileShader(vertex_shader)
fragment_shader_source = """
#version 450 core
uniform sampler2D s;
in vec2 uv;
out vec4 color;
void main(void)
{
color = vec4(texture(s, uv));
}
"""
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)
glShaderSource(fragment_shader, fragment_shader_source)
glCompileShader(fragment_shader)
program = glCreateProgram()
glAttachShader(program, vertex_shader)
glAttachShader(program, fragment_shader)
glLinkProgram(program)
# --- Clean up now that we don't need these shaders anymore.
glDeleteShader(vertex_shader)
glDeleteShader(fragment_shader)
return program
def render(self, method: str = "GPUTEX"):
glClearBufferfv(GL_COLOR, 0, (0, 0, 0))
## bind program
glUseProgram(self.program)
## get one frame
rawSurface = self.nv_dec.DecodeSingleSurface()
if self.nv_yuv != None:
yuvSurface = self.nv_yuv.Execute(rawSurface, self.cc_ctx)
cvtSurface = self.nv_cvt.Execute(yuvSurface, self.cc_ctx)
else:
cvtSurface = self.nv_cvt.Execute(rawSurface, self.cc_ctx)
## texture update through cpu and system memory
if self.cpu:
## Download surface data to CPU, then update GL texture with these data
success = self.nv_down.DownloadSingleSurface(cvtSurface, self.data)
if not success:
logger.warn("Could not download Cuda Surface to CPU")
return
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.texture)
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
self.width,
self.height,
GL_RGB,
GL_UNSIGNED_BYTE,
self.data,
)
else:
## cuda copy from surface.Plane_Ptr() to pbo, then update texture from PBO
src_plane = cvtSurface.PlanePtr()
buffer_mapping = self.cuda_pbo.map()
buffptr, buffsize = buffer_mapping.device_ptr_and_size()
cpy = pycuda.driver.Memcpy2D()
cpy.set_src_device(src_plane.GpuMem())
cpy.set_dst_device(buffptr)
cpy.width_in_bytes = src_plane.Width()
cpy.src_pitch = src_plane.Pitch()
cpy.dst_pitch = self.width * 3
cpy.height = src_plane.Height()
cpy(aligned=True)
# pycuda.driver.Context.synchronize() ## not required?
buffer_mapping.unmap()
## opengl update texture from pbo
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.texture)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, int(self.pbo))
glTexSubImage2D(
GL_TEXTURE_2D,
0,
0,
0,
self.width,
self.height,
GL_RGB,
GL_UNSIGNED_BYTE,
ctypes.c_void_p(0),
)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.texture)
## send uniforms to program and draw quad
glUniform(glGetUniformLocation(self.program, b"s"), 0)
glDrawArrays(GL_QUADS, 0, 4)
## Display
glutSwapBuffers()
def keypressed(self, key, x, y):
if key.decode("utf-8") == "q":
glutLeaveMainLoop()
elif key.decode("utf-8") == "c":
self.cpu = True
elif key.decode("utf-8") == "g":
self.cpu = False
def animate(self):
glutPostRedisplay()
def run(self, verbose: bool = False):
glutIdleFunc(self.animate)
glutDisplayFunc(self.render)
glutMainLoop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This sample decodes input video to OpenGL Texture.\n"
+ "Requires the GL Utility Toolkit (GLUT) and pyCUDA compiled with GL support\n"
+ "Controls: c -> toggles cpu path (CUDA->cpu->OpenGL)\n"
+ " g -> toggles gpu path (CUDA->OpenGL)\n"
+ " q -> exit demo."
)
parser.add_argument(
"-g",
"--gpu-id",
type=int,
required=True,
help="GPU id, check nvidia-smi",
)
parser.add_argument(
"-e",
"--encoded-file-path",
type=Path,
required=True,
help="Encoded video file (read from)",
)
parser.add_argument(
"-v", "--verbose", default=False, action="store_true", help="Verbose"
)
args = parser.parse_args()
app = OpenGLApplication(
args.gpu_id,
args.encoded_file_path.as_posix(),
)
app.run(verbose=args.verbose)
exit(0)
| VideoProcessingFramework-master | samples/SampleOpenGL.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == 'nt':
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file = sys.stderr)
print("Can't set CUDA DLLs search path.", file = sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(';')
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file = sys.stderr)
exit(1)
import pycuda.driver as cuda
import PyNvCodec as nvc
import numpy as np
import torch
import nvcv
def decode(gpuID, encFilePath, decFilePath):
cuda.init()
cuda_ctx = cuda.Device(gpuID).retain_primary_context()
cuda_ctx.push()
cuda_str = cuda.Stream()
cuda_ctx.pop()
decFile = open(decFilePath, "wb")
nvDmx = nvc.PyFFmpegDemuxer(encFilePath)
nvDec = nvc.PyNvDecoder(nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), nvDmx.Codec(), cuda_ctx.handle, cuda_str.handle)
nvDwn = nvc.PySurfaceDownloader(nvDmx.Width(), nvDmx.Height(), nvDmx.Format(), cuda_ctx.handle, cuda_str.handle)
res = str(nvDmx.Width()) + 'x' + str(nvDmx.Height())
nvEnc = nvc.PyNvEncoder({'preset': 'P5', 'tuning_info': 'high_quality', 'codec': 'h264',
'profile': 'high', 's': res, 'bitrate': '10M'}, cuda_ctx.handle, cuda_str.handle)
packet = np.ndarray(shape=(0), dtype=np.uint8)
frameSize = int(nvDmx.Width() * nvDmx.Height() * 3 / 2)
rawFrame = np.ndarray(shape=(frameSize), dtype=np.uint8)
pdata_in, pdata_out = nvc.PacketData(), nvc.PacketData()
encFrame = np.ndarray(shape=(0), dtype=np.uint8)
# Determine colorspace conversion parameters.
# Some video streams don't specify these parameters so default values
# are most widespread bt601 and mpeg.
cspace, crange = nvDmx.ColorSpace(), nvDmx.ColorRange()
if nvc.ColorSpace.UNSPEC == cspace:
cspace = nvc.ColorSpace.BT_601
if nvc.ColorRange.UDEF == crange:
crange = nvc.ColorRange.MPEG
cc_ctx = nvc.ColorspaceConversionContext(cspace, crange)
print('Color space: ', str(cspace))
print('Color range: ', str(crange))
while True:
# Demuxer has sync design, it returns packet every time it's called.
# If demuxer can't return packet it usually means EOF.
torch.cuda.nvtx.range_push("DemuxSinglePacket")
success = nvDmx.DemuxSinglePacket(packet)
torch.cuda.nvtx.range_pop()
if not success:
break
# Get last packet data to obtain frame timestamp
torch.cuda.nvtx.range_push("LastPacketData")
nvDmx.LastPacketData(pdata_in)
torch.cuda.nvtx.range_pop()
# Decoder is async by design.
# As it consumes packets from demuxer one at a time it may not return
# decoded surface every time the decoding function is called.
torch.cuda.nvtx.range_push("nvDec.DecodeSurfaceFromPacket")
surface_nv12 = nvDec.DecodeSurfaceFromPacket(pdata_in, packet, pdata_out ,True)
torch.cuda.nvtx.range_pop()
if surface_nv12.width == 0 and surface_nv12.height == 0:
continue
torch.cuda.nvtx.range_push("nvEnc.EncodeSingleSurface")
success = nvEnc.EncodeFromNVCVImage(surface_nv12, encFrame)
torch.cuda.nvtx.range_pop()
if (success):
bits = bytearray(encFrame)
decFile.write(bits)
print("Interop test successfull")
break;
if __name__ == "__main__":
print("This sample decodes input video to raw YUV420 file on given GPU.")
print("Usage: SampleDecode.py $gpu_id $input_file $output_file.")
if(len(sys.argv) < 4):
print("Provide gpu ID, path to input and output files")
exit(1)
gpuID = int(sys.argv[1])
encFilePath = sys.argv[2]
decFilePath = sys.argv[3]
decode(gpuID, encFilePath, decFilePath)
| VideoProcessingFramework-master | samples/SampleTypeConversionTest.py |
#
# Copyright 2020 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
import numpy as np
import argparse
from pathlib import Path
def decode(encFilePath, decFilePath):
decFile = open(decFilePath, "wb")
nvDec = nvc.PyFfmpegDecoder(encFilePath, {})
rawFrameYUV = np.ndarray(shape=(0), dtype=np.uint8)
while True:
success = nvDec.DecodeSingleFrame(rawFrameYUV)
if success:
bits = bytearray(rawFrameYUV)
decFile.write(bits)
else:
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"This sample decodes input video to raw YUV file using libavcodec SW decoder."
)
parser.add_argument(
"-e",
"--encoded-file-path",
type=Path,
required=True,
help="Encoded video file (read from)",
)
parser.add_argument(
"-r",
"--raw-file-path",
type=Path,
required=True,
help="Raw YUV video file (write to)",
)
parser.add_argument(
"-v", "--verbose", default=False, action="store_true", help="Verbose"
)
args = parser.parse_args()
decode(args.encoded_file_path.as_posix(), args.raw_file_path.as_posix())
| VideoProcessingFramework-master | samples/SampleDecodeSw.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import queue
import sys
import os
import argparse
from pathlib import Path
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
from math import log10, sqrt
import PyNvCodec as nvc
import numpy as np
from inspect import signature
def measure_psnr(gt: np.ndarray, dist: np.ndarray) -> float:
"""
Measures the distance between frames using PSNR metric.
Parameters
----------
gt: Ground Truth picture
dist: Distorted picture
"""
mse = np.mean((gt - dist) ** 2)
if mse == 0:
return 100.0
max_pixel = 255.0
psnr = 20 * log10(max_pixel / sqrt(mse))
return psnr
def single_frame_encode_measure(
raw_frame: np.ndarray,
nvEnc: nvc.PyNvEncoder,
nvDec: nvc.PyNvDecoder,
vq_assess_func,
frame_queue,
fout,
) -> float:
"""
Encodes single input frame and does visual quality estimation with given
function.
Parameters
----------
raw_frame: input raw frame in NV12 format
nvEnc: PyNvEncoder class to be used for encoding
nvDec: PyNvDecoder class to be used for getting recon frame
vq_assess_func: Function to access visual quality
frame_queue: Queue which is used to store input raw frames temporarily
fout Handle to file used to store recon frames
"""
# Video quality assessment function shall has certain signature.
# In this sample PSNR is used for the sake of simplicity.
sig = signature(vq_assess_func)
assert str(sig) == "(gt: numpy.ndarray, dist: numpy.ndarray) -> float"
recon_frame = np.ndarray(shape=(0), dtype=np.uint8)
enc_packet = np.ndarray(shape=(0), dtype=np.uint8)
enc_done = False
if raw_frame.size:
# Put frame into queue. This is required because Nvenc has some latency
# to it and it doesn't return encoded frame immediately.
frame_queue.put(raw_frame)
# Encode it. Nvenc doesn't return recon frame (that's a HW limitation).
# To over come it, we encode and then decode by hand.
nvEnc.EncodeSingleFrame(raw_frame, enc_packet)
if not enc_packet.size:
# This isn't error. In the begining raw frames will be sent to HW
# but not ready yet.
return None
else:
# No more input frames. However, due to mentioned Nvenc latecy, there
# are some frames left in the Nvenc queue. Hence we flush it.
nvEnc.FlushSinglePacket(enc_packet)
if not enc_packet.size:
# All frames were sent to Nvenc and received.
enc_done = True
if not enc_done:
# Encoder isn't done yet. Continue sending frames to HW.
success = nvDec.DecodeFrameFromPacket(recon_frame, enc_packet)
else:
# All the frames are received from Nvenc. However, Nvdec is async by
# design as well. Hence now we need to flush it as well to receive all
# the frames we sent earlier.
success = nvDec.FlushSingleFrame(recon_frame)
if success:
# Nvenc accept frames in display order and Nvdec returns frames in same
# order as well. Hence no reordering here, usual in-order frame queue
# is used to compare recon frame we got from Nvdec against ground truth
# frame stored in queue.
gt_frame = frame_queue.get()
if gt_frame.size:
# Store recon frames to disk if necessary.
if fout:
byte_array = bytearray(recon_frame)
fout.write(byte_array)
# Measure the distance between ground truth and recon frames.
return vq_assess_func(gt_frame, recon_frame)
else:
# Something goes wrong if we're here. We've got a frame from Nvdec
# but raw frame queue is empty which shall not happen.
raise RuntimeError("unexpected empty queue.")
else:
return None
def main(gpu_id: int, input: str, output: str, width: int, height: int, verbose: bool):
res = str(width) + "x" + str(height)
decFile = open(input, "rb")
frameSize = int(width * height * 3 / 2)
frameQueue = queue.Queue()
fout = open(output, "wb") if output else None
nvDec = nvc.PyNvDecoder(
width, height, nvc.PixelFormat.NV12, nvc.CudaVideoCodec.H264, gpu_id
)
nvEnc = nvc.PyNvEncoder(
{
"preset": "P4",
"tuning_info": "high_quality",
"codec": "h264",
"profile": "high",
"s": res,
"bitrate": "10M",
},
gpu_id,
)
while True:
rawFrame = np.fromfile(decFile, np.uint8, count=frameSize)
score = single_frame_encode_measure(
rawFrame, nvEnc, nvDec, measure_psnr, frameQueue, fout
)
if score:
print("VQ score: ", "%.2f" % score)
if verbose:
print("Frame queue size: ", frameQueue.qsize())
if not frameQueue.qsize():
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"""This samples assesses Nvenc compression quality using PSNR metric.
Input file must be NV12 raw.""",
add_help=False,
)
parser.add_argument(
"-g",
type=int,
required=True,
help="GPU id, check nvidia-smi",
)
parser.add_argument(
"-i",
type=Path,
required=True,
help="Path to input raw file",
)
parser.add_argument(
"-o",
type=Path,
required=False,
help="Path to reconstructed raw file",
)
parser.add_argument(
"-w",
type=int,
required=True,
help="Raw file width",
)
parser.add_argument(
"-h",
type=int,
required=True,
help="Raw file height",
)
parser.add_argument("-v", default=False, action="store_true", help="Verbose mode")
args = parser.parse_args()
main(
args.g,
args.i.as_posix(),
args.o.as_posix() if args.o else None,
args.w,
args.h,
args.v,
)
| VideoProcessingFramework-master | samples/SampleMeasureVideoQuality.py |
#
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
import cv2
import numpy as np
import torch
import torchvision
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
try:
import PytorchNvCodec as pnvc
except ImportError as err:
raise (f"""Could not import `PytorchNvCodec`: {err}.
Please make sure it is installed! Run
`pip install git+https://github.com/NVIDIA/VideoProcessingFramework#subdirectory=src/PytorchNvCodec` or
`pip install src/PytorchNvCodec` if using a local copy of the VideoProcessingFramework repository""") # noqa
coco_names = [
"__background__",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"N/A",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"N/A",
"backpack",
"umbrella",
"N/A",
"N/A",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"N/A",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"N/A",
"dining table",
"N/A",
"N/A",
"toilet",
"N/A",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"N/A",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def tensor_to_mat(img_tensor: torch.tensor):
"""Convert planar RGB cuda float tensor to OpenCV uint8 rgb Mat"""
img_r = img_tensor[0].cpu().numpy()
img_g = img_tensor[1].cpu().numpy()
img_b = img_tensor[2].cpu().numpy()
img_rgb = np.empty((img_r.shape[0], img_r.shape[1], 3), "uint8")
img_rgb[..., 0] = img_r * 255
img_rgb[..., 1] = img_g * 255
img_rgb[..., 2] = img_b * 255
return img_rgb
COLORS = np.random.uniform(0, 255, size=(len(coco_names), 3))
def draw_boxes(boxes, classes, labels, image):
"""
Draws the bounding box around a detected object.
"""
out_image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
for i, box in enumerate(boxes):
color = COLORS[labels[i]]
cv2.rectangle(
out_image, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2
)
cv2.putText(
out_image,
classes[i],
(int(box[0]), int(box[1] + 15)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
color,
2,
lineType=cv2.LINE_AA,
)
return out_image
def run_inference_on_video(gpu_id: int, input_video: str):
# Init resnet
model = torchvision.models.detection.ssd300_vgg16(pretrained=True)
model.eval()
model.to("cuda")
# Init HW decoder
nvDec = nvc.PyNvDecoder(input_video, gpu_id)
# NN expects images to be 3 channel planar RGB.
# No requirements for input image resolution, it will be rescaled internally.
target_w, target_h = nvDec.Width(), nvDec.Height()
# Converter from NV12 which is Nvdec native pixel fomat.
to_rgb = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.NV12, nvc.PixelFormat.RGB, gpu_id
)
# Converter from RGB to planar RGB because that's the way
# pytorch likes to store the data in it's tensors.
to_pln = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.RGB, nvc.PixelFormat.RGB_PLANAR, gpu_id
)
# Use bt709 and jpeg just for illustration purposes.
cc_ctx = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_709, nvc.ColorRange.JPEG)
# Decoding cycle + inference on video frames.
while True:
# Decode 1 compressed video frame to CUDA memory.
nv12_surface = nvDec.DecodeSingleSurface()
if nv12_surface.Empty():
print("Can not decode frame")
break
# Convert NV12 > RGB.
rgb24_small = to_rgb.Execute(nv12_surface, cc_ctx)
if rgb24_small.Empty():
print("Can not convert nv12 -> rgb")
break
# Convert RGB > planar RGB.
rgb24_planar = to_pln.Execute(rgb24_small, cc_ctx)
if rgb24_planar.Empty():
print("Can not convert rgb -> rgb planar")
break
# Export to PyTorch tensor.
surf_plane = rgb24_planar.PlanePtr()
img_tensor = pnvc.makefromDevicePtrUint8(
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
# This step is essential because rgb24_planar.PlanePtr() returns a simple
# 2D CUDA pitched memory allocation. Here we convert it the way
# pytorch expects it's tensor data to be arranged.
img_tensor.resize_(3, target_h, target_w)
img_tensor = img_tensor.type(dtype=torch.cuda.FloatTensor)
img_tensor = torch.divide(img_tensor, 255.0)
data_transforms = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
surface_tensor = data_transforms(img_tensor)
input_batch = surface_tensor.unsqueeze(0).to("cuda")
# Run inference.
with torch.no_grad():
outputs = model(input_batch)
# Collect segmentation results.
pred_classes = [coco_names[i] for i in outputs[0]["labels"].cpu().numpy()]
pred_scores = outputs[0]["scores"].detach().cpu().numpy()
pred_bboxes = outputs[0]["boxes"].detach().cpu().numpy()
boxes = pred_bboxes[pred_scores >= 0.5].astype(np.int32)
# Convert tensor to OpenCV Mat, draw labels and boxes.
img_rgb = tensor_to_mat(img_tensor)
image = draw_boxes(boxes, pred_classes, outputs[0]["labels"], img_rgb)
# Show in GUI.
cv2.imshow("Decode image", image)
k = cv2.waitKey(1000 // 30)
if k == 27:
print("ESC")
cv2.destroyAllWindows()
break
if cv2.getWindowProperty("Decode image", cv2.WND_PROP_VISIBLE) == -1:
break
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Provide gpu ID, paths to input video file.")
exit
gpu_id = int(sys.argv[1])
input_video = sys.argv[2]
run_inference_on_video(gpu_id, input_video)
| VideoProcessingFramework-master | samples/SampleTorchSegmentation.py |
#
# Copyright 2019 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import pycuda.driver as cuda
import PyNvCodec as nvc
import numpy as np
def decode(gpuID, encFilePath, decFilePath):
cuda.init()
cuda_ctx = cuda.Device(gpuID).retain_primary_context()
cuda_ctx.push()
cuda_str = cuda.Stream()
cuda_ctx.pop()
decFile = open(decFilePath, "wb")
nvDmx = nvc.PyFFmpegDemuxer(encFilePath)
nvDec = nvc.PyNvDecoder(
nvDmx.Width(),
nvDmx.Height(),
nvDmx.Format(),
nvDmx.Codec(),
cuda_ctx.handle,
cuda_str.handle,
)
nvCvt = nvc.PySurfaceConverter(
nvDmx.Width(),
nvDmx.Height(),
nvDmx.Format(),
nvc.PixelFormat.YUV420,
cuda_ctx.handle,
cuda_str.handle,
)
nvDwn = nvc.PySurfaceDownloader(
nvDmx.Width(), nvDmx.Height(), nvCvt.Format(), cuda_ctx.handle, cuda_str.handle
)
packet = np.ndarray(shape=(0), dtype=np.uint8)
frameSize = int(nvDmx.Width() * nvDmx.Height() * 3 / 2)
rawFrame = np.ndarray(shape=(frameSize), dtype=np.uint8)
pdata_in, pdata_out = nvc.PacketData(), nvc.PacketData()
# Determine colorspace conversion parameters.
# Some video streams don't specify these parameters so default values
# are most widespread bt601 and mpeg.
cspace, crange = nvDmx.ColorSpace(), nvDmx.ColorRange()
if nvc.ColorSpace.UNSPEC == cspace:
cspace = nvc.ColorSpace.BT_601
if nvc.ColorRange.UDEF == crange:
crange = nvc.ColorRange.MPEG
cc_ctx = nvc.ColorspaceConversionContext(cspace, crange)
print("Color space: ", str(cspace))
print("Color range: ", str(crange))
while True:
# Demuxer has sync design, it returns packet every time it's called.
# If demuxer can't return packet it usually means EOF.
if not nvDmx.DemuxSinglePacket(packet):
break
# Get last packet data to obtain frame timestamp
nvDmx.LastPacketData(pdata_in)
# Decoder is async by design.
# As it consumes packets from demuxer one at a time it may not return
# decoded surface every time the decoding function is called.
surface_nv12 = nvDec.DecodeSurfaceFromPacket(pdata_in, packet, pdata_out)
if not surface_nv12.Empty():
surface_yuv420 = nvCvt.Execute(surface_nv12, cc_ctx)
if surface_yuv420.Empty():
break
if not nvDwn.DownloadSingleSurface(surface_yuv420, rawFrame):
break
bits = bytearray(rawFrame)
decFile.write(bits)
# Now we flush decoder to emtpy decoded frames queue.
while True:
surface_nv12 = nvDec.FlushSingleSurface()
if surface_nv12.Empty():
break
surface_yuv420 = nvCvt.Execute(surface_nv12, cc_ctx)
if surface_yuv420.Empty():
break
if not nvDwn.DownloadSingleSurface(surface_yuv420, rawFrame):
break
bits = bytearray(rawFrame)
decFile.write(bits)
if __name__ == "__main__":
print("This sample decodes input video to raw YUV420 file on given GPU.")
print("Usage: SampleDecode.py $gpu_id $input_file $output_file.")
if len(sys.argv) < 4:
print("Provide gpu ID, path to input and output files")
exit(1)
gpuID = int(sys.argv[1])
encFilePath = sys.argv[2]
decFilePath = sys.argv[3]
decode(gpuID, encFilePath, decFilePath)
| VideoProcessingFramework-master | samples/SampleDemuxDecode.py |
#
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Starting from Python 3.8 DLL search policy has changed.
# We need to add path to CUDA DLLs explicitly.
import sys
import os
import torch
import torchvision
if os.name == "nt":
# Add CUDA_PATH env variable
cuda_path = os.environ["CUDA_PATH"]
if cuda_path:
os.add_dll_directory(cuda_path)
else:
print("CUDA_PATH environment variable is not set.", file=sys.stderr)
print("Can't set CUDA DLLs search path.", file=sys.stderr)
exit(1)
# Add PATH as well for minor CUDA releases
sys_path = os.environ["PATH"]
if sys_path:
paths = sys_path.split(";")
for path in paths:
if os.path.isdir(path):
os.add_dll_directory(path)
else:
print("PATH environment variable is not set.", file=sys.stderr)
exit(1)
import PyNvCodec as nvc
try:
import PytorchNvCodec as pnvc
except ImportError as err:
raise (f"""Could not import `PytorchNvCodec`: {err}.
Please make sure it is installed! Run
`pip install git+https://github.com/NVIDIA/VideoProcessingFramework#subdirectory=src/PytorchNvCodec` or
`pip install src/PytorchNvCodec` if using a local copy of the VideoProcessingFramework repository""") # noqa
categories = [
"tench",
"goldfish",
"great white shark",
"tiger shark",
"hammerhead",
"electric ray",
"stingray",
"cock",
"hen",
"ostrich",
"brambling",
"goldfinch",
"house finch",
"junco",
"indigo bunting",
"robin",
"bulbul",
"jay",
"magpie",
"chickadee",
"water ouzel",
"kite",
"bald eagle",
"vulture",
"great grey owl",
"European fire salamander",
"common newt",
"eft",
"spotted salamander",
"axolotl",
"bullfrog",
"tree frog",
"tailed frog",
"loggerhead",
"leatherback turtle",
"mud turtle",
"terrapin",
"box turtle",
"banded gecko",
"common iguana",
"American chameleon",
"whiptail",
"agama",
"frilled lizard",
"alligator lizard",
"Gila monster",
"green lizard",
"African chameleon",
"Komodo dragon",
"African crocodile",
"American alligator",
"triceratops",
"thunder snake",
"ringneck snake",
"hognose snake",
"green snake",
"king snake",
"garter snake",
"water snake",
"vine snake",
"night snake",
"boa constrictor",
"rock python",
"Indian cobra",
"green mamba",
"sea snake",
"horned viper",
"diamondback",
"sidewinder",
"trilobite",
"harvestman",
"scorpion",
"black and gold garden spider",
"barn spider",
"garden spider",
"black widow",
"tarantula",
"wolf spider",
"tick",
"centipede",
"black grouse",
"ptarmigan",
"ruffed grouse",
"prairie chicken",
"peacock",
"quail",
"partridge",
"African grey",
"macaw",
"sulphur-crested cockatoo",
"lorikeet",
"coucal",
"bee eater",
"hornbill",
"hummingbird",
"jacamar",
"toucan",
"drake",
"red-breasted merganser",
"goose",
"black swan",
"tusker",
"echidna",
"platypus",
"wallaby",
"koala",
"wombat",
"jellyfish",
"sea anemone",
"brain coral",
"flatworm",
"nematode",
"conch",
"snail",
"slug",
"sea slug",
"chiton",
"chambered nautilus",
"Dungeness crab",
"rock crab",
"fiddler crab",
"king crab",
"American lobster",
"spiny lobster",
"crayfish",
"hermit crab",
"isopod",
"white stork",
"black stork",
"spoonbill",
"flamingo",
"little blue heron",
"American egret",
"bittern",
"crane",
"limpkin",
"European gallinule",
"American coot",
"bustard",
"ruddy turnstone",
"red-backed sandpiper",
"redshank",
"dowitcher",
"oystercatcher",
"pelican",
"king penguin",
"albatross",
"grey whale",
"killer whale",
"dugong",
"sea lion",
"Chihuahua",
"Japanese spaniel",
"Maltese dog",
"Pekinese",
"Shih-Tzu",
"Blenheim spaniel",
"papillon",
"toy terrier",
"Rhodesian ridgeback",
"Afghan hound",
"basset",
"beagle",
"bloodhound",
"bluetick",
"black-and-tan coonhound",
"Walker hound",
"English foxhound",
"redbone",
"borzoi",
"Irish wolfhound",
"Italian greyhound",
"whippet",
"Ibizan hound",
"Norwegian elkhound",
"otterhound",
"Saluki",
"Scottish deerhound",
"Weimaraner",
"Staffordshire bullterrier",
"American Staffordshire terrier",
"Bedlington terrier",
"Border terrier",
"Kerry blue terrier",
"Irish terrier",
"Norfolk terrier",
"Norwich terrier",
"Yorkshire terrier",
"wire-haired fox terrier",
"Lakeland terrier",
"Sealyham terrier",
"Airedale",
"cairn",
"Australian terrier",
"Dandie Dinmont",
"Boston bull",
"miniature schnauzer",
"giant schnauzer",
"standard schnauzer",
"Scotch terrier",
"Tibetan terrier",
"silky terrier",
"soft-coated wheaten terrier",
"West Highland white terrier",
"Lhasa",
"flat-coated retriever",
"curly-coated retriever",
"golden retriever",
"Labrador retriever",
"Chesapeake Bay retriever",
"German short-haired pointer",
"vizsla",
"English setter",
"Irish setter",
"Gordon setter",
"Brittany spaniel",
"clumber",
"English springer",
"Welsh springer spaniel",
"cocker spaniel",
"Sussex spaniel",
"Irish water spaniel",
"kuvasz",
"schipperke",
"groenendael",
"malinois",
"briard",
"kelpie",
"komondor",
"Old English sheepdog",
"Shetland sheepdog",
"collie",
"Border collie",
"Bouvier des Flandres",
"Rottweiler",
"German shepherd",
"Doberman",
"miniature pinscher",
"Greater Swiss Mountain dog",
"Bernese mountain dog",
"Appenzeller",
"EntleBucher",
"boxer",
"bull mastiff",
"Tibetan mastiff",
"French bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo dog",
"malamute",
"Siberian husky",
"dalmatian",
"affenpinscher",
"basenji",
"pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"chow",
"keeshond",
"Brabancon griffon",
"Pembroke",
"Cardigan",
"toy poodle",
"miniature poodle",
"standard poodle",
"Mexican hairless",
"timber wolf",
"white wolf",
"red wolf",
"coyote",
"dingo",
"dhole",
"African hunting dog",
"hyena",
"red fox",
"kit fox",
"Arctic fox",
"grey fox",
"tabby",
"tiger cat",
"Persian cat",
"Siamese cat",
"Egyptian cat",
"cougar",
"lynx",
"leopard",
"snow leopard",
"jaguar",
"lion",
"tiger",
"cheetah",
"brown bear",
"American black bear",
"ice bear",
"sloth bear",
"mongoose",
"meerkat",
"tiger beetle",
"ladybug",
"ground beetle",
"long-horned beetle",
"leaf beetle",
"dung beetle",
"rhinoceros beetle",
"weevil",
"fly",
"bee",
"ant",
"grasshopper",
"cricket",
"walking stick",
"cockroach",
"mantis",
"cicada",
"leafhopper",
"lacewing",
"dragonfly",
"damselfly",
"admiral",
"ringlet",
"monarch",
"cabbage butterfly",
"sulphur butterfly",
"lycaenid",
"starfish",
"sea urchin",
"sea cucumber",
"wood rabbit",
"hare",
"Angora",
"hamster",
"porcupine",
"fox squirrel",
"marmot",
"beaver",
"guinea pig",
"sorrel",
"zebra",
"hog",
"wild boar",
"warthog",
"hippopotamus",
"ox",
"water buffalo",
"bison",
"ram",
"bighorn",
"ibex",
"hartebeest",
"impala",
"gazelle",
"Arabian camel",
"llama",
"weasel",
"mink",
"polecat",
"black-footed ferret",
"otter",
"skunk",
"badger",
"armadillo",
"three-toed sloth",
"orangutan",
"gorilla",
"chimpanzee",
"gibbon",
"siamang",
"guenon",
"patas",
"baboon",
"macaque",
"langur",
"colobus",
"proboscis monkey",
"marmoset",
"capuchin",
"howler monkey",
"titi",
"spider monkey",
"squirrel monkey",
"Madagascar cat",
"indri",
"Indian elephant",
"African elephant",
"lesser panda",
"giant panda",
"barracouta",
"eel",
"coho",
"rock beauty",
"anemone fish",
"sturgeon",
"gar",
"lionfish",
"puffer",
"abacus",
"abaya",
"academic gown",
"accordion",
"acoustic guitar",
"aircraft carrier",
"airliner",
"airship",
"altar",
"ambulance",
"amphibian",
"analog clock",
"apiary",
"apron",
"ashcan",
"assault rifle",
"backpack",
"bakery",
"balance beam",
"balloon",
"ballpoint",
"Band Aid",
"banjo",
"bannister",
"barbell",
"barber chair",
"barbershop",
"barn",
"barometer",
"barrel",
"barrow",
"baseball",
"basketball",
"bassinet",
"bassoon",
"bathing cap",
"bath towel",
"bathtub",
"beach wagon",
"beacon",
"beaker",
"bearskin",
"beer bottle",
"beer glass",
"bell cote",
"bib",
"bicycle-built-for-two",
"bikini",
"binder",
"binoculars",
"birdhouse",
"boathouse",
"bobsled",
"bolo tie",
"bonnet",
"bookcase",
"bookshop",
"bottlecap",
"bow",
"bow tie",
"brass",
"brassiere",
"breakwater",
"breastplate",
"broom",
"bucket",
"buckle",
"bulletproof vest",
"bullet train",
"butcher shop",
"cab",
"caldron",
"candle",
"cannon",
"canoe",
"can opener",
"cardigan",
"car mirror",
"carousel",
"carpenter's kit",
"carton",
"car wheel",
"cash machine",
"cassette",
"cassette player",
"castle",
"catamaran",
"CD player",
"cello",
"cellular telephone",
"chain",
"chainlink fence",
"chain mail",
"chain saw",
"chest",
"chiffonier",
"chime",
"china cabinet",
"Christmas stocking",
"church",
"cinema",
"cleaver",
"cliff dwelling",
"cloak",
"clog",
"cocktail shaker",
"coffee mug",
"coffeepot",
"coil",
"combination lock",
"computer keyboard",
"confectionery",
"container ship",
"convertible",
"corkscrew",
"cornet",
"cowboy boot",
"cowboy hat",
"cradle",
"crane",
"crash helmet",
"crate",
"crib",
"Crock Pot",
"croquet ball",
"crutch",
"cuirass",
"dam",
"desk",
"desktop computer",
"dial telephone",
"diaper",
"digital clock",
"digital watch",
"dining table",
"dishrag",
"dishwasher",
"disk brake",
"dock",
"dogsled",
"dome",
"doormat",
"drilling platform",
"drum",
"drumstick",
"dumbbell",
"Dutch oven",
"electric fan",
"electric guitar",
"electric locomotive",
"entertainment center",
"envelope",
"espresso maker",
"face powder",
"feather boa",
"file",
"fireboat",
"fire engine",
"fire screen",
"flagpole",
"flute",
"folding chair",
"football helmet",
"forklift",
"fountain",
"fountain pen",
"four-poster",
"freight car",
"French horn",
"frying pan",
"fur coat",
"garbage truck",
"gasmask",
"gas pump",
"goblet",
"go-kart",
"golf ball",
"golfcart",
"gondola",
"gong",
"gown",
"grand piano",
"greenhouse",
"grille",
"grocery store",
"guillotine",
"hair slide",
"hair spray",
"half track",
"hammer",
"hamper",
"hand blower",
"hand-held computer",
"handkerchief",
"hard disc",
"harmonica",
"harp",
"harvester",
"hatchet",
"holster",
"home theater",
"honeycomb",
"hook",
"hoopskirt",
"horizontal bar",
"horse cart",
"hourglass",
"iPod",
"iron",
"jack-o'-lantern",
"jean",
"jeep",
"jersey",
"jigsaw puzzle",
"jinrikisha",
"joystick",
"kimono",
"knee pad",
"knot",
"lab coat",
"ladle",
"lampshade",
"laptop",
"lawn mower",
"lens cap",
"letter opener",
"library",
"lifeboat",
"lighter",
"limousine",
"liner",
"lipstick",
"Loafer",
"lotion",
"loudspeaker",
"loupe",
"lumbermill",
"magnetic compass",
"mailbag",
"mailbox",
"maillot",
"maillot",
"manhole cover",
"maraca",
"marimba",
"mask",
"matchstick",
"maypole",
"maze",
"measuring cup",
"medicine chest",
"megalith",
"microphone",
"microwave",
"military uniform",
"milk can",
"minibus",
"miniskirt",
"minivan",
"missile",
"mitten",
"mixing bowl",
"mobile home",
"Model T",
"modem",
"monastery",
"monitor",
"moped",
"mortar",
"mortarboard",
"mosque",
"mosquito net",
"motor scooter",
"mountain bike",
"mountain tent",
"mouse",
"mousetrap",
"moving van",
"muzzle",
"nail",
"neck brace",
"necklace",
"nipple",
"notebook",
"obelisk",
"oboe",
"ocarina",
"odometer",
"oil filter",
"organ",
"oscilloscope",
"overskirt",
"oxcart",
"oxygen mask",
"packet",
"paddle",
"paddlewheel",
"padlock",
"paintbrush",
"pajama",
"palace",
"panpipe",
"paper towel",
"parachute",
"parallel bars",
"park bench",
"parking meter",
"passenger car",
"patio",
"pay-phone",
"pedestal",
"pencil box",
"pencil sharpener",
"perfume",
"Petri dish",
"photocopier",
"pick",
"pickelhaube",
"picket fence",
"pickup",
"pier",
"piggy bank",
"pill bottle",
"pillow",
"ping-pong ball",
"pinwheel",
"pirate",
"pitcher",
"plane",
"planetarium",
"plastic bag",
"plate rack",
"plow",
"plunger",
"Polaroid camera",
"pole",
"police van",
"poncho",
"pool table",
"pop bottle",
"pot",
"potter's wheel",
"power drill",
"prayer rug",
"printer",
"prison",
"projectile",
"projector",
"puck",
"punching bag",
"purse",
"quill",
"quilt",
"racer",
"racket",
"radiator",
"radio",
"radio telescope",
"rain barrel",
"recreational vehicle",
"reel",
"reflex camera",
"refrigerator",
"remote control",
"restaurant",
"revolver",
"rifle",
"rocking chair",
"rotisserie",
"rubber eraser",
"rugby ball",
"rule",
"running shoe",
"safe",
"safety pin",
"saltshaker",
"sandal",
"sarong",
"sax",
"scabbard",
"scale",
"school bus",
"schooner",
"scoreboard",
"screen",
"screw",
"screwdriver",
"seat belt",
"sewing machine",
"shield",
"shoe shop",
"shoji",
"shopping basket",
"shopping cart",
"shovel",
"shower cap",
"shower curtain",
"ski",
"ski mask",
"sleeping bag",
"slide rule",
"sliding door",
"slot",
"snorkel",
"snowmobile",
"snowplow",
"soap dispenser",
"soccer ball",
"sock",
"solar dish",
"sombrero",
"soup bowl",
"space bar",
"space heater",
"space shuttle",
"spatula",
"speedboat",
"spider web",
"spindle",
"sports car",
"spotlight",
"stage",
"steam locomotive",
"steel arch bridge",
"steel drum",
"stethoscope",
"stole",
"stone wall",
"stopwatch",
"stove",
"strainer",
"streetcar",
"stretcher",
"studio couch",
"stupa",
"submarine",
"suit",
"sundial",
"sunglass",
"sunglasses",
"sunscreen",
"suspension bridge",
"swab",
"sweatshirt",
"swimming trunks",
"swing",
"switch",
"syringe",
"table lamp",
"tank",
"tape player",
"teapot",
"teddy",
"television",
"tennis ball",
"thatch",
"theater curtain",
"thimble",
"thresher",
"throne",
"tile roof",
"toaster",
"tobacco shop",
"toilet seat",
"torch",
"totem pole",
"tow truck",
"toyshop",
"tractor",
"trailer truck",
"tray",
"trench coat",
"tricycle",
"trimaran",
"tripod",
"triumphal arch",
"trolleybus",
"trombone",
"tub",
"turnstile",
"typewriter keyboard",
"umbrella",
"unicycle",
"upright",
"vacuum",
"vase",
"vault",
"velvet",
"vending machine",
"vestment",
"viaduct",
"violin",
"volleyball",
"waffle iron",
"wall clock",
"wallet",
"wardrobe",
"warplane",
"washbasin",
"washer",
"water bottle",
"water jug",
"water tower",
"whiskey jug",
"whistle",
"wig",
"window screen",
"window shade",
"Windsor tie",
"wine bottle",
"wing",
"wok",
"wooden spoon",
"wool",
"worm fence",
"wreck",
"yawl",
"yurt",
"web site",
"comic book",
"crossword puzzle",
"street sign",
"traffic light",
"book jacket",
"menu",
"plate",
"guacamole",
"consomme",
"hot pot",
"trifle",
"ice cream",
"ice lolly",
"French loaf",
"bagel",
"pretzel",
"cheeseburger",
"hotdog",
"mashed potato",
"head cabbage",
"broccoli",
"cauliflower",
"zucchini",
"spaghetti squash",
"acorn squash",
"butternut squash",
"cucumber",
"artichoke",
"bell pepper",
"cardoon",
"mushroom",
"Granny Smith",
"strawberry",
"orange",
"lemon",
"fig",
"pineapple",
"banana",
"jackfruit",
"custard apple",
"pomegranate",
"hay",
"carbonara",
"chocolate sauce",
"dough",
"meat loaf",
"pizza",
"potpie",
"burrito",
"red wine",
"espresso",
"cup",
"eggnog",
"alp",
"bubble",
"cliff",
"coral reef",
"geyser",
"lakeside",
"promontory",
"sandbar",
"seashore",
"valley",
"volcano",
"ballplayer",
"groom",
"scuba diver",
"rapeseed",
"daisy",
"yellow lady's slipper",
"corn",
"acorn",
"hip",
"buckeye",
"coral fungus",
"agaric",
"gyromitra",
"stinkhorn",
"earthstar",
"hen-of-the-woods",
"bolete",
"ear",
"toilet tissue",
]
def run_inference_on_video(gpu_id: int, input_video: str):
# Init resnet
model = torchvision.models.resnet50(pretrained=True)
model.eval()
model.to("cuda")
# Resnet expects images to be 3 channel planar RGB of 224x244 size at least.
target_w, target_h = 224, 224
# Init HW decoder, convertor, resizer + tensor that video frames will be
# exported to
nvDec = nvc.PyNvDecoder(input_video, gpu_id)
to_yuv = nvc.PySurfaceConverter(
nvDec.Width(),
nvDec.Height(),
nvc.PixelFormat.NV12,
nvc.PixelFormat.YUV420,
gpu_id,
)
to_dim = nvc.PySurfaceResizer(target_w, target_h, nvc.PixelFormat.YUV420, gpu_id)
to_rgb = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.YUV420, nvc.PixelFormat.RGB, gpu_id
)
to_pln = nvc.PySurfaceConverter(
target_w, target_h, nvc.PixelFormat.RGB, nvc.PixelFormat.RGB_PLANAR, gpu_id
)
# Use most widespread bt601 and mpeg just for illustration purposes.
cc_ctx = nvc.ColorspaceConversionContext(nvc.ColorSpace.BT_601, nvc.ColorRange.MPEG)
# Decoding cycle + inference on video frames.
while True:
# Decode 1 compressed video frame to CUDA memory.
nv12_surface = nvDec.DecodeSingleSurface()
if nv12_surface.Empty():
print("Can not decode frame")
break
# Convert from NV12 to YUV420.
# This extra step is required because not all NV12 -> RGB conversions
# implemented in NPP support all color spaces and ranges.
yuv420 = to_yuv.Execute(nv12_surface, cc_ctx)
if yuv420.Empty():
print("Can not convert nv12 -> yuv420")
break
# Downscale YUV420.
yuv_small = to_dim.Execute(yuv420)
if yuv_small.Empty():
print("Can not downscale yuv420 surface")
break
# Convert from YUV420 to interleaved RGB.
rgb24_small = to_rgb.Execute(yuv_small, cc_ctx)
if rgb24_small.Empty():
print("Can not convert yuv420 -> rgb")
break
# Convert to planar RGB.
rgb24_planar = to_pln.Execute(rgb24_small, cc_ctx)
if rgb24_planar.Empty():
print("Can not convert rgb -> rgb planar")
break
# Export to PyTorch tensor
surf_plane = rgb24_planar.PlanePtr()
img_tensor = pnvc.makefromDevicePtrUint8(
surf_plane.GpuMem(),
surf_plane.Width(),
surf_plane.Height(),
surf_plane.Pitch(),
surf_plane.ElemSize(),
)
img_tensor.resize_(3, target_h, target_w)
img_tensor = img_tensor.type(dtype=torch.cuda.FloatTensor)
img_tensor = torch.divide(img_tensor, 255.0)
data_transforms = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
surface_tensor = data_transforms(img_tensor)
input_batch = surface_tensor.unsqueeze(0).to("cuda")
# Run inference.
with torch.no_grad():
output = model(input_batch)
probabilities = torch.nn.functional.softmax(output[0], dim=0)
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
print()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Provide gpu ID, paths to input video file.")
exit
gpu_id = int(sys.argv[1])
input_video = sys.argv[2]
run_inference_on_video(gpu_id, input_video)
| VideoProcessingFramework-master | samples/SampleTorchResnet.py |
"""
"""
__author__ = "NVIDIA"
__copyright__ = "Copyright 2022, NVIDIA"
__credits__ = []
__license__ = "Apache 2.0"
__version__ = "0.1.0"
__maintainer__ = "NVIDIA"
__email__ = "TODO"
__status__ = "Production"
try:
import torch # has to be imported to have libc10 available
# Import native module
from _PytorchNvCodec import *
except ImportError:
import distutils.sysconfig
from os.path import join, dirname
raise RuntimeError("Failed to import native module _PytorchNvCodec! "
f"Please check whether \"{join(dirname(__file__), '_PytorchNvCodec' + distutils.sysconfig.get_config_var('EXT_SUFFIX'))}\"" # noqa
" exists and can find all library dependencies (CUDA, ffmpeg).\n"
"On Unix systems, you can use `ldd` on the file to see whether it can find all dependencies.\n"
"On Windows, you can use \"dumpbin /dependents\" in a Visual Studio command prompt or\n"
"https://github.com/lucasg/Dependencies/releases.")
| VideoProcessingFramework-master | src/PytorchNvCodec/__init__.py |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
if __name__ == "__main__":
setup(
name="PytorchNvCodec",
install_requires=["torch"],
ext_modules=[CUDAExtension("_PytorchNvCodec", ["src/PytorchNvCodec.cpp"])],
packages=["PytorchNvCodec"],
cmdclass={"build_ext": BuildExtension},
package_dir={"": "../"},
cmake_install_dir="../",
)
| VideoProcessingFramework-master | src/PytorchNvCodec/setup.py |
"""
"""
__author__ = "NVIDIA"
__copyright__ = "Copyright 2022, NVIDIA"
__credits__ = []
__license__ = "Apache 2.0"
__version__ = "0.1.0"
__maintainer__ = "NVIDIA"
__email__ = "TODO"
__status__ = "Production"
try:
# Import native module
from ._PyNvCodec import * # noqa
except ImportError:
import distutils.sysconfig
from os.path import join, dirname
raise RuntimeError("Failed to import native module _PyNvCodec! "
f"Please check whether \"{join(dirname(__file__), '_PyNvCodec' + distutils.sysconfig.get_config_var('EXT_SUFFIX'))}\"" # noqa
" exists and can find all library dependencies (CUDA, ffmpeg).\n"
"On Unix systems, you can use `ldd` on the file to see whether it can find all dependencies.\n"
"On Windows, you can use \"dumpbin /dependents\" in a Visual Studio command prompt or\n"
"https://github.com/lucasg/Dependencies/releases.")
| VideoProcessingFramework-master | src/PyNvCodec/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point, main file to run to launch jobs with the HP tool."""
import math
import hydra
import omegaconf
from autoconfig.search_config import search_config
from autoconfig.utils import convert_to_cli
from omegaconf import OmegaConf
OmegaConf.register_new_resolver("multiply", lambda x, y: x * y, replace=True)
OmegaConf.register_new_resolver("divide_ceil", lambda x, y: int(math.ceil(x / y)), replace=True)
OmegaConf.register_new_resolver("divide_floor", lambda x, y: int(x // y), replace=True)
@hydra.main(config_path="conf", config_name="config")
def main(cfg: omegaconf.dictconfig.DictConfig) -> None:
"""
Main function in the entire pipeline, it reads the config using hydra and calls search_config.
:param omegaconf.dictconfig.DictConfig cfg: OmegaConf object, read using the @hydra.main decorator.
:return: None
"""
hydra_args = convert_to_cli(cfg)
search_config(cfg=cfg, hydra_args=hydra_args)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | auto_configurator/main.py |
NeMo-Megatron-Launcher-master | auto_configurator/tests/__init__.py |
|
NeMo-Megatron-Launcher-master | auto_configurator/tests/base_configs_tests/__init__.py |
|
from omegaconf import OmegaConf
class TestBaseConfigs:
def test_gpt3_base_config(self):
conf = OmegaConf.load("base_configs/gpt3.yaml")
s = """
run:
name: gpt3_126m
results_dir: ${base_results_dir}/${.name}
time_limit: "1-00:00:00"
dependency: "singleton"
trainer:
num_nodes: 8
devices: 8
accelerator: gpu
precision: bf16
logger: False
enable_checkpointing: False
replace_sampler_ddp: False
max_epochs: null
max_steps: 600000
max_time: "00:23:30:00"
log_every_n_steps: 1
val_check_interval: 50
limit_val_batches: 1
limit_test_batches: 1
accumulate_grad_batches: 1
gradient_clip_val: 1.0
exp_manager:
explicit_log_dir: ${training.run.results_dir}/results
exp_dir: null
name: megatron_gpt
create_wandb_logger: False
wandb_logger_kwargs:
project: nemo_gpt3
name: ${training.run.name}
resume_if_exists: True
resume_ignore_no_checkpoint: True
create_checkpoint_callback: True
checkpoint_callback_params:
monitor: val_loss
save_top_k: 10
mode: min
always_save_nemo: False
save_nemo_on_train_end: False
filename: 'megatron_gpt--{val_loss:.2f}-{step}-{consumed_samples}'
model_parallel_size: ${multiply:${training.model.tensor_model_parallel_size}, ${training.model.pipeline_model_parallel_size}}
log_step_timing: True
step_timing_kwargs:
sync_cuda: True
buffer_size: 5
model:
# model parallelism
micro_batch_size: 4
global_batch_size: 256
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
virtual_pipeline_model_parallel_size: null
resume_from_checkpoint: null
# model architecture
encoder_seq_length: 2048
max_position_embeddings: 2048
num_layers: 12
hidden_size: 768
ffn_hidden_size: ${multiply:4, ${.hidden_size}}
num_attention_heads: 12
init_method_std: 0.023
hidden_dropout: 0.1
kv_channels: null
apply_query_key_layer_scaling: True
layernorm_epsilon: 1e-5
make_vocab_size_divisible_by: 128
pre_process: True
post_process: True
persist_layer_norm: True
gradient_as_bucket_view: True
sync_batch_comm: False
# Fusion
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce
gradient_accumulation_fusion: True # Fuse weight gradient accumulation to GEMMs
bias_activation_fusion: True # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function.
bias_dropout_add_fusion: True # Use a kernel that fuses the bias addition, dropout and residual connection addition.
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
activations_checkpoint_granularity: selective
activations_checkpoint_method: block
activations_checkpoint_num_layers: 0
num_micro_batches_with_partial_activation_checkpoints: null
activations_checkpoint_layers_per_pipeline: null
sequence_parallel: True
tokenizer:
library: 'megatron'
type: 'GPT2BPETokenizer'
model: null
delimiter: null
vocab_file: ${data_dir}/bpe/vocab.json
merge_file: ${data_dir}/bpe/merges.txt
# precision
native_amp_init_scale: 4294967296
native_amp_growth_interval: 1000
hysteresis: 2
fp32_residual_connection: False
fp16_lm_cross_entropy: False
# Megatron O2-style half-precision
megatron_amp_O2: True
grad_allreduce_chunk_size_mb: 125
## Transformer Engine
transformer_engine: False
fp8: False # enables fp8 in TransformerLayer forward
fp8_e4m3: False # sets fp8_format = recipe.Format.E4M3
fp8_hybrid: False # sets fp8_format = recipe.Format.HYBRID
fp8_margin: 0 # scaling margin
fp8_interval: 1 # scaling update interval
fp8_amax_history_len: 1 # Number of steps for which amax history is recorded per tensor
fp8_amax_compute_algo: most_recent # 'most_recent' or 'max'. Algorithm for computing amax from history
use_emha: False
# miscellaneous
seed: 1234
use_cpu_initialization: False
onnx_safe: False
apex_transformer_log_level: 30
# Nsys profiling options
nsys_profile:
enabled: False
trace: [nvtx,cuda]
start_step: 10 # Global batch to start profiling
end_step: 10 # Global batch to end profiling
ranks: [0] # Global rank IDs to profile
gen_shape: False # Generate model and kernel details including input shapes
optim:
name: distributed_fused_adam
overlap_grad_sync: False
bucket_cap_mb: ${training.model.grad_allreduce_chunk_size_mb}
lr: 6e-4
weight_decay: 0.1
betas:
- 0.9
- 0.95
sched:
name: CosineAnnealing
warmup_steps: 636
constant_steps: 100000
min_lr: 6e-5
data:
data_impl: mmap
splits_string: "99990,8,2"
seq_length: 2048
skip_warmup: True
num_workers: 2
dataloader_type: single
reset_position_ids: False
reset_attention_mask: False
eod_mask_loss: False
index_mapping_dir: null
data_prefix:
- 1.0
- ${data_dir}/my-gpt3_00_text_document
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"base_configs/gpt3.yaml must be set to {expected} but it currently is {conf}."
def test_t5_base_config(self):
conf = OmegaConf.load("base_configs/t5.yaml")
s = """
run:
name: t5_220m
results_dir: ${base_results_dir}/${.name}
time_limit: "7-00:00:00"
dependency: "singleton"
name: megatron_t5
restore_from_path: null # used when starting from a .nemo file
trainer:
num_nodes: 4
devices: 8
accelerator: gpu
precision: bf16
logger: False # logger provided by exp_manager
enable_checkpointing: False
replace_sampler_ddp: False
max_epochs: null
max_steps: 1000000 # consumed_samples = global_step * global_batch_size
max_time: "06:23:30:00"
log_every_n_steps: 1
val_check_interval: 50
limit_val_batches: 1
limit_test_batches: 1
accumulate_grad_batches: 1
gradient_clip_val: 1.0
exp_manager:
explicit_log_dir: ${training.run.results_dir}/results
exp_dir: null
name: megatron_t5
create_wandb_logger: False
wandb_logger_kwargs:
project: nemo_t5
name: ${training.run.name}
resume_if_exists: True
resume_ignore_no_checkpoint: True
create_checkpoint_callback: True
checkpoint_callback_params:
monitor: val_loss
save_top_k: 10
mode: min
always_save_nemo: False # saves nemo file during validation, not implemented for model parallel
save_nemo_on_train_end: False # not recommended when training large models on clusters with short time limits
filename: 'megatron_t5--{val_loss:.2f}-{step}-{consumed_samples}'
model_parallel_size: ${multiply:${training.model.tensor_model_parallel_size}, ${training.model.pipeline_model_parallel_size}}
log_step_timing: True
step_timing_kwargs:
sync_cuda: True
buffer_size: 5
model:
# model parallelism
micro_batch_size: 64
global_batch_size: 2048 # will use more micro batches to reach global batch size
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
resume_from_checkpoint: null # manually set the checkpoint file to load from
pipeline_model_parallel_split_rank: ${divide_floor:${.pipeline_model_parallel_size}, 2}
# model architecture
make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency.
pre_process: True # add embedding
post_process: True # add pooler
megatron_amp_O2: True # use AMP with O2 style mixed precision instead of native amp on-the-fly weight autocasting.
grad_allreduce_chunk_size_mb: 125
gradient_as_bucket_view: True # Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory)
sync_batch_comm: False
seq_length: 512
max_position_embeddings: ${.seq_length}
encoder:
num_layers: 12
hidden_size: 768
ffn_hidden_size: 2048 # Transformer FFN hidden size. 4 * hidden_size.
num_attention_heads: 12
kv_channels: 64 # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
init_method_std: 0.015 # Standard deviation of the zero mean normal distribution used for weight initialization.')
hidden_dropout: 0.1 # Dropout probability for hidden state transformer.
attention_dropout: 0.1 # Dropout probability in the attention layer.
position_embedding_type: 'learned_absolute' # Position embedding type. Options ['learned_absolute', 'relative']
relative_attention_num_buckets: 32 # Relative position number of buckets for computing the bias
relative_attention_max_distance: 128 # max_distance to keep relative distance in the attention_num_buckets.
relative_position_bias_self_attention_only: True # Whether to only use relative position bias for self attention only.
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
layernorm_epsilon: 1e-5
persist_layer_norm: True # Use of persistent fused layer norm kernel.
bias_activation_fusion: True # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function.
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
bias_dropout_add_fusion: True # Use a kernel that fuses the bias addition, dropout and residual connection addition.
bias: True # Whether to use bias terms in all weight matrices.
normalization: 'layernorm' # Normalization layer to use. Options are 'layernorm', 'rmsnorm'
arch: 'transformer' # Options: ['transformer', 'perceiver']
activation: 'geglu' # Options ['gelu', 'geglu', 'swiglu', 'reglu']
headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head.
transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer']
openai_gelu: False # Use OpenAI's GELU instead of the default GeLU
# miscellaneous
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
fp32_residual_connection: False # Use FP32 for residual connections.
# activations checkpointing
activations_checkpoint_granularity: full
activations_checkpoint_method: block # 'uniform', 'block'
activations_checkpoint_num_layers: 0
decoder:
num_layers: 12
hidden_size: 768
ffn_hidden_size: 2048 # Transformer FFN hidden size. 4 * hidden_size.
num_attention_heads: 12
kv_channels: 64 # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
init_method_std: 0.015 # Standard deviation of the zero mean normal distribution used for weight initialization.')
hidden_dropout: 0.1 # Dropout probability for hidden state transformer.
attention_dropout: 0.1 # Dropout probability in the attention layer.
position_embedding_type: 'learned_absolute' # Position embedding type. Options ['learned_absolute', 'relative']
relative_attention_num_buckets: 32 # Relative position number of buckets for computing the bias
relative_attention_max_distance: 128 # max_distance to keep relative distance in the attention_num_buckets.
relative_position_bias_self_attention_only: True # Whether to only use relative position bias for self attention only.
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
layernorm_epsilon: 1e-5
persist_layer_norm: True # Use of persistent fused layer norm kernel.
bias_activation_fusion: True # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function.
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
bias_dropout_add_fusion: True # Use a kernel that fuses the bias addition, dropout and residual connection addition.
bias: True # Whether to use bias terms in all weight matrices.
normalization: 'layernorm' # Normalization layer to use. Options are 'layernorm', 'rmsnorm'
arch: 'transformer' # Options: ['transformer', 'perceiver']
activation: 'geglu' # Options ['gelu', 'geglu', 'swiglu', 'reglu']
headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head.
transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer']
openai_gelu: False # Use OpenAI's GELU instead of the default GeLU
# miscellaneous
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
fp32_residual_connection: False # Use FP32 for residual connections.
# activations checkpointing
activations_checkpoint_granularity: full
activations_checkpoint_method: block # 'uniform', 'block'
activations_checkpoint_num_layers: 0
tokenizer:
library: 'megatron'
type: 'BertWordPieceCase'
model: null
vocab_file: ${data_dir}/bpe/vocab.txt
merge_file: null
num_sentinel_tokens: 100
# precision
native_amp_init_scale: 4294967296 # 2 ** 32
native_amp_growth_interval: 1000
fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16
# miscellaneous
seed: 1234
use_cpu_initialization: False # Init weights on the CPU (slow for large models)
apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this
# embedding sharing
share_token_embeddings: True # If True share encoder/decoder embeddings
share_decoder_tokens_head_embeddings: True # If True share decoder embeddings and decoder projection to logits
nsys_profile:
enabled: False
trace: [nvtx,cuda]
start_step: 10 # Global batch to start profiling
end_step: 10 # Global batch to end profiling
ranks: [0] # Global rank IDs to profile
gen_shape: False # Generate model and kernel details including input shapes
optim:
name: distributed_fused_adam
overlap_grad_sync: False
bucket_cap_mb: ${training.model.grad_allreduce_chunk_size_mb}
lr: 0.0001
betas:
- 0.9
- 0.999
eps: 1e-8
weight_decay: 0.01
sched:
name: WarmupAnnealing
min_lr: 0.00001
last_epoch: -1
warmup_ratio: 0.01
data:
data_impl: mmap
splits_string: "90,5,5"
seq_length: 512
seq_length_dec: 128
skip_warmup: True
num_workers: 4
dataloader_type: single # cyclic
masked_lm_prob: 0.15
dataset_type: 't5'
short_seq_prob: 0.0
max_ngram_size: 10
mean_ngram_size: null
geometric_dist: True
permutation: False
whole_word_masking: True
favor_longer_ngrams: False
respect_document_boundaries: True # If true, a single training exampl cannot cross document boundaries, increasing the fraction of <pad> tokens within a batch.
index_mapping_dir: null # path to save index mapping .npy files, by default will save in the same location as data_prefix
data_prefix: # Should be weight path weight path... for a blended dataset
- 1.0
- ${data_dir}/my-t5_00_text_document
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"base_configs/t5.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_base_config(self):
conf = OmegaConf.load("base_configs/mt5.yaml")
s = """
run:
name: mt5_170m
results_dir: ${base_results_dir}/${.name}
time_limit: "7-00:00:00"
dependency: "singleton"
preprocessed_dir: ${data_dir}/mc4/preprocessed # used for auto data blending
blending_alpha: 0.7 # blending ratio across different languages; language sampling ratio ~L^alpha
name: megatron_mt5
restore_from_path: null # used when starting from a .nemo file
trainer:
num_nodes: 4
devices: 8
accelerator: gpu
precision: bf16
logger: False # logger provided by exp_manager
enable_checkpointing: False
replace_sampler_ddp: False
max_epochs: null
max_steps: 1000000 # consumed_samples = global_step * global_batch_size
max_time: "06:23:30:00"
log_every_n_steps: 1
val_check_interval: 50
limit_val_batches: 1
limit_test_batches: 1
accumulate_grad_batches: 1
gradient_clip_val: 1.0
exp_manager:
explicit_log_dir: ${training.run.results_dir}/results
exp_dir: null
name: megatron_mt5
create_wandb_logger: False
wandb_logger_kwargs:
project: nemo_mt5
name: ${training.run.name}
resume_if_exists: True
resume_ignore_no_checkpoint: True
create_checkpoint_callback: True
checkpoint_callback_params:
monitor: val_loss
save_top_k: 10
mode: min
always_save_nemo: False # saves nemo file during validation, not implemented for model parallel
save_nemo_on_train_end: False # not recommended when training large models on clusters with short time limits
filename: 'megatron_mt5--{val_loss:.2f}-{step}-{consumed_samples}'
model_parallel_size: ${multiply:${training.model.tensor_model_parallel_size}, ${training.model.pipeline_model_parallel_size}}
log_step_timing: True
step_timing_kwargs:
sync_cuda: True
buffer_size: 5
model:
# model parallelism
micro_batch_size: 64
global_batch_size: 2048 # will use more micro batches to reach global batch size
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
resume_from_checkpoint: null # manually set the checkpoint file to load from
pipeline_model_parallel_split_rank: ${divide_floor:${.pipeline_model_parallel_size}, 2}
# model architecture
make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency.
pre_process: True # add embedding
post_process: True # add pooler
megatron_amp_O2: True # use AMP with O2 style mixed precision instead of native amp on-the-fly weight autocasting.
grad_allreduce_chunk_size_mb: 125
gradient_as_bucket_view: True # Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory)
sync_batch_comm: False
seq_length: 512
max_position_embeddings: ${.seq_length}
encoder:
num_layers: 8
hidden_size: 512
ffn_hidden_size: 1024 # Transformer FFN hidden size. 4 * hidden_size.
num_attention_heads: 6
kv_channels: 64 # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
init_method_std: 0.015 # Standard deviation of the zero mean normal distribution used for weight initialization.')
hidden_dropout: 0.1 # Dropout probability for hidden state transformer.
attention_dropout: 0.1 # Dropout probability in the attention layer.
position_embedding_type: 'learned_absolute' # Position embedding type. Options ['learned_absolute', 'relative']
relative_attention_num_buckets: 32 # Relative position number of buckets for computing the bias
relative_attention_max_distance: 128 # max_distance to keep relative distance in the attention_num_buckets.
relative_position_bias_self_attention_only: True # Whether to only use relative position bias for self attention only.
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
layernorm_epsilon: 1e-5
persist_layer_norm: True # Use of persistent fused layer norm kernel.
bias_activation_fusion: True # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function.
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
bias_dropout_add_fusion: True # Use a kernel that fuses the bias addition, dropout and residual connection addition.
bias: True # Whether to use bias terms in all weight matrices.
normalization: 'layernorm' # Normalization layer to use. Options are 'layernorm', 'rmsnorm'
arch: 'transformer' # Options: ['transformer', 'perceiver']
activation: 'geglu' # Options ['gelu', 'geglu', 'swiglu', 'reglu']
headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head.
transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer']
openai_gelu: False # Use OpenAI's GELU instead of the default GeLU
# miscellaneous
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
fp32_residual_connection: False # Use FP32 for residual connections.
# activations checkpointing
activations_checkpoint_granularity: full
activations_checkpoint_method: block # 'uniform', 'block'
activations_checkpoint_num_layers: 0
decoder:
num_layers: 8
hidden_size: 512
ffn_hidden_size: 1024 # Transformer FFN hidden size. 4 * hidden_size.
num_attention_heads: 6
kv_channels: 64 # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
init_method_std: 0.015 # Standard deviation of the zero mean normal distribution used for weight initialization.')
hidden_dropout: 0.1 # Dropout probability for hidden state transformer.
attention_dropout: 0.1 # Dropout probability in the attention layer.
position_embedding_type: 'learned_absolute' # Position embedding type. Options ['learned_absolute', 'relative']
relative_attention_num_buckets: 32 # Relative position number of buckets for computing the bias
relative_attention_max_distance: 128 # max_distance to keep relative distance in the attention_num_buckets.
relative_position_bias_self_attention_only: True # Whether to only use relative position bias for self attention only.
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
layernorm_epsilon: 1e-5
persist_layer_norm: True # Use of persistent fused layer norm kernel.
bias_activation_fusion: True # Use a kernel that fuses the bias addition from weight matrices with the subsequent activation function.
grad_div_ar_fusion: True # Fuse grad division into torch.distributed.all_reduce
masked_softmax_fusion: True # Use a kernel that fuses the attention softmax with it's mask.
bias_dropout_add_fusion: True # Use a kernel that fuses the bias addition, dropout and residual connection addition.
bias: True # Whether to use bias terms in all weight matrices.
normalization: 'layernorm' # Normalization layer to use. Options are 'layernorm', 'rmsnorm'
arch: 'transformer' # Options: ['transformer', 'perceiver']
activation: 'geglu' # Options ['gelu', 'geglu', 'swiglu', 'reglu']
headscale: False # Whether to learn extra parameters that scale the output of the each self-attention head.
transformer_block_type: 'pre_ln' # Options ['pre_ln', 'post_ln', 'normformer']
openai_gelu: False # Use OpenAI's GELU instead of the default GeLU
# miscellaneous
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
fp32_residual_connection: False # Use FP32 for residual connections.
# activations checkpointing
activations_checkpoint_granularity: full
activations_checkpoint_method: block # 'uniform', 'block'
activations_checkpoint_num_layers: 0
tokenizer:
library: 'sentencepiece'
type: null
model: ${data_dir}/mc4/bpe/mt5_tokenizer.model
vocab_file: null
merge_file: null
num_sentinel_tokens: 100
# precision
native_amp_init_scale: 4294967296 # 2 ** 32
native_amp_growth_interval: 1000
fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16
# miscellaneous
seed: 1234
use_cpu_initialization: False # Init weights on the CPU (slow for large models)
apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this
# embedding sharing
share_token_embeddings: True # If True share encoder/decoder embeddings
share_decoder_tokens_head_embeddings: True # If True share decoder embeddings and decoder projection to logits
nsys_profile:
enabled: False
trace: [nvtx,cuda]
start_step: 10 # Global batch to start profiling
end_step: 10 # Global batch to end profiling
ranks: [0] # Global rank IDs to profile
gen_shape: False # Generate model and kernel details including input shapes
optim:
name: distributed_fused_adam
overlap_grad_sync: False
bucket_cap_mb: ${training.model.grad_allreduce_chunk_size_mb}
lr: 0.0001
betas:
- 0.9
- 0.999
eps: 1e-8
weight_decay: 0.01
sched:
name: WarmupAnnealing
min_lr: 0.00001
last_epoch: -1
warmup_ratio: 0.01
data:
data_impl: mmap
splits_string: "90,5,5"
seq_length: 512
seq_length_dec: 128
skip_warmup: True
num_workers: 8
dataloader_type: single # cyclic
masked_lm_prob: 0.15
dataset_type: 't5'
short_seq_prob: 0.0
max_ngram_size: 10
mean_ngram_size: null
geometric_dist: True
permutation: False
whole_word_masking: False
favor_longer_ngrams: False
respect_document_boundaries: True # If true, a single training exampl cannot cross document boundaries, increasing the fraction of <pad> tokens within a batch.
index_mapping_dir: null # path to save index mapping .npy files, by default will save in the same location as data_prefix
data_prefix: null # Should be weight path weight path... for a blended dataset. If null will automatically blend all language files in mC4_dir.
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"base_configs/mt5.yaml must be set to {expected} but it currently is {conf}."
def test_bert_base_config(self):
conf = OmegaConf.load("base_configs/bert.yaml")
s = """
run:
name: bert_110m
results_dir: ${base_results_dir}/${.name}
time_limit: "1-00:00:00"
dependency: "singleton"
name: megatron_bert
restore_from_path: null # used when starting from a .nemo file
trainer:
devices: 8
num_nodes: 8
accelerator: gpu
precision: bf16
logger: False # logger provided by exp_manager
enable_checkpointing: False
replace_sampler_ddp: False
max_epochs: -1 # PTL default. In practice we don't usually train for more than 1 epoch.
max_steps: 100000 # consumed_samples = global_step * micro_batch_size * data_parallel_size * accumulate_grad_batches
max_time: "00:23:30:00"
log_every_n_steps: 1
val_check_interval: 50
limit_val_batches: 1
limit_test_batches: 1
accumulate_grad_batches: 1
gradient_clip_val: 1.0
exp_manager:
explicit_log_dir: ${training.run.results_dir}/results
exp_dir: null
name: megatron_bert
create_wandb_logger: False
wandb_logger_kwargs:
project: nemo_bert
name: ${training.run.name}
resume_if_exists: True
resume_ignore_no_checkpoint: True
create_checkpoint_callback: True
checkpoint_callback_params:
monitor: val_loss
save_top_k: 10
mode: min
always_save_nemo: False # saves nemo file during validation, not implemented for model parallel
filename: 'megatron_bert--{val_loss:.2f}-{step}-{consumed_samples}'
model_parallel_size: ${multiply:${training.model.tensor_model_parallel_size}, ${training.model.pipeline_model_parallel_size}}
log_step_timing: True
step_timing_kwargs:
sync_cuda: True
buffer_size: 5
model:
# model parallelism
global_batch_size: 256
micro_batch_size: 4
tensor_model_parallel_size: 1
pipeline_model_parallel_size: 1
virtual_pipeline_model_parallel_size: null # interleaved pipeline
# model architecture
encoder_seq_length: 512
max_position_embeddings: ${.encoder_seq_length}
num_layers: 12
hidden_size: 768
ffn_hidden_size: ${multiply:4, ${.hidden_size}} #3072 # Transformer FFN hidden size. Usually 4 * hidden_size.
num_attention_heads: 12
init_method_std: 0.02 # Standard deviation of the zero mean normal distribution used for weight initialization.')
hidden_dropout: 0.1 # Dropout probability for hidden state transformer.
kv_channels: null # Projection weights dimension in multi-head attention. Set to hidden_size // num_attention_heads if null
apply_query_key_layer_scaling: True # scale Q * K^T by 1 / layer-number.
layernorm_epsilon: 1e-5
make_vocab_size_divisible_by: 128 # Pad the vocab size to be divisible by this value for computation efficiency.
pre_process: True # add embedding
post_process: True # add pooler
bert_binary_head: False # BERT binary head
tokenizer:
library: 'megatron'
type: 'BertWordPieceLowerCase'
model: null
vocab_file: ${data_dir}/vocab.txt
merge_file: null
# precision
native_amp_init_scale: 4294967296 # 2 ** 32
native_amp_growth_interval: 1000
fp32_residual_connection: False # Move residual connections to fp32
fp16_lm_cross_entropy: False # Move the cross entropy unreduced loss calculation for lm head to fp16
# Megatron O2-style half-precision
megatron_amp_O2: True # Enable O2-level automatic mixed precision using main parameters
grad_allreduce_chunk_size_mb: 125
grad_div_ar_fusion: False
# miscellaneous
seed: 1234
use_cpu_initialization: False # Init weights on the CPU (slow for large models)
onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter.
gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory)
# Activations checkpointing
activations_checkpoint_granularity: selective
activations_checkpoint_method: block # 'uniform', 'block'
activations_checkpoint_num_layers: 1
num_micro_batches_with_partial_activation_checkpoints: null
activations_checkpoint_layers_per_pipeline: null
sequence_parallel: True
data:
data_prefix:
- 1.0
- ${data_dir}/my-t5_00_bert_tokenizer_text_document
index_mapping_dir: null # path to save index mapping .npy files, by default will save in the same location as data_prefix
data_impl: mmap
splits_string: 900,50,50
seq_length: 512 #${model.encoder_seq_length}
skip_warmup: True
num_workers: 2
dataloader_type: single # cyclic
reset_position_ids: False # Reset position ids after end-of-document token
reset_attention_mask: False # Reset attention mask after end-of-document token
eod_mask_loss: False # Mask loss for the end of document tokens
masked_lm_prob: 0.15 # Probability of replacing a token with mask.
short_seq_prob: 0.1 # Probability of producing a short sequence.
optim:
name: distributed_fused_adam
overlap_grad_sync: False
bucket_cap_mb: ${training.model.grad_allreduce_chunk_size_mb}
lr: 2e-4
weight_decay: 0.01
betas:
- 0.9
- 0.98
sched:
name: CosineAnnealing
warmup_steps: 500
constant_steps: 50000
min_lr: 2e-5
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"base_configs/bert.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/base_configs_tests/test_base_configs.py |
from omegaconf import OmegaConf
class TestConfig:
def test_config(self):
conf = OmegaConf.load("conf/config.yaml")
s = """
defaults:
- _self_
- cluster: bcm
- search_config: gpt3/5b
- override hydra/job_logging: stdout
hydra:
run:
dir: .
output_subdir: null
run_training_hp_search: True
run_inference_hp_search: True
cluster_type: bcm # bcm or bcp
auto_configurator_path: ??? # Path to the location of auto_configurator codebase.
launcher_scripts_path: ${auto_configurator_path}/../launcher_scripts
fastertransformer_path: ${auto_configurator_path}/../FasterTransformer
base_results_dir: ${auto_configurator_path}/results
data_dir: ${launcher_scripts_path}/data
training_container: nvcr.io/ea-bignlp/nemofw-training:23.07-py3
container_mounts:
- null
wandb:
enable: False
api_key_file: null
project: nemo-megatron-autoconfig
search_config_value: ${hydra:runtime.choices.search_config}
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/config.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_main_config.py |
NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/__init__.py |
|
from omegaconf import OmegaConf
class TestGPT3Config:
def test_gpt3_config_0_126b(self):
conf = OmegaConf.load("conf/search_config/gpt3/0.126b.yaml")
s = """
train_settings:
model_size_in_b: 0.126 # unit in billion parameters
num_nodes: 8
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 2 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_0.126b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [1,2]
pipeline_parallel_sizes: [1]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256,512]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/0.126b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_0_843b(self):
conf = OmegaConf.load("conf/search_config/gpt3/0.843b.yaml")
s = """
train_settings:
model_size_in_b: 0.843 # unit in billion parameters
num_nodes: 8
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 2 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_0.843b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [1,2]
pipeline_parallel_sizes: [1]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256,512]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/0.843b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_2b(self):
conf = OmegaConf.load("conf/search_config/gpt3/2b.yaml")
s = """
train_settings:
model_size_in_b: 2 # unit in billion parameters
num_nodes: 8
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 2 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_2b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [1,2]
pipeline_parallel_sizes: [1]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256,512]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/2b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_5b(self):
conf = OmegaConf.load("conf/search_config/gpt3/5b.yaml")
s = """
train_settings:
model_size_in_b: 5 # unit in billion parameters
num_nodes: 16
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 5 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_5b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [1,2,4]
pipeline_parallel_sizes: [1,2]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/5b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_8b(self):
conf = OmegaConf.load("conf/search_config/gpt3/8b.yaml")
s = """
train_settings:
model_size_in_b: 8 # unit in billion parameters
num_nodes: 16
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 5 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_8b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [1,2,4]
pipeline_parallel_sizes: [1,2]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/8b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_20b(self):
conf = OmegaConf.load("conf/search_config/gpt3/20b.yaml")
s = """
train_settings:
model_size_in_b: 20.0
num_nodes: 64
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 7
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 20
tflops_per_gpu: 140
num_tokens_in_b: 300
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_20b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [2,4,8]
pipeline_parallel_sizes: [1,2,4]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/20b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_43b(self):
conf = OmegaConf.load("conf/search_config/gpt3/43b.yaml")
s = """
train_settings:
model_size_in_b: 43 # unit in billion parameters
num_nodes: 128
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 13 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_43b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [4,8,16]
pipeline_parallel_sizes: [1,2,4,8]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128,256]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/43b.yaml must be set to {expected} but it currently is {conf}."
def test_gpt3_config_175b(self):
conf = OmegaConf.load("conf/search_config/gpt3/175b.yaml")
s = """
train_settings:
model_size_in_b: 175 # unit in billion parameters
num_nodes: 128
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 10 # unit in days
limit_search_runs: 50 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 30 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 300 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 51200
seq_length: 2048 # available seq_length list for GPT-3 models: [2048, 4096, 8192]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
inference_settings:
run:
model_type: gpt3
model_train_name: gpt3_175b
gpus_per_node: 8
data_type: "fp16" # fp32|fp16|bf16
time_limit: 0:30:00
results_dir: ${base_results_dir}/${search_config_value}_${search_config.train_settings.gpu_memory_gb}gb
tensor_parallel_sizes: [4,8,16]
pipeline_parallel_sizes: [1,2,4,8]
benchmark:
input_len: 60
output_len: 20
batch_sizes: [4,8,16,32,64,128]
beam_width: 1
topk: 4
topp: 0.0
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/gpt3/175b.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_gpt3_config.py |
from omegaconf import OmegaConf
class TestClusterConfig:
def test_cluster_bcm_config(self):
conf = OmegaConf.load("conf/cluster/bcm.yaml")
s = """
partition: null
account: null
exclusive: True
gpus_per_task: null
gpus_per_node: 8
mem: 0
job_name_prefix: "nemo_megatron_autoconfig:"
srun_args:
- "--no-container-mount-home"
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/cluster/bcm.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_cluster_config.py |
from omegaconf import OmegaConf
class TestmT5Config:
def test_mt5_config_0_17b(self):
conf = OmegaConf.load("conf/search_config/mt5/0.17b.yaml")
s = """
train_settings:
model_size_in_b: 0.17
num_nodes: 4
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 4
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 12
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/0.17b.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_config_0_39b(self):
conf = OmegaConf.load("conf/search_config/mt5/0.39b.yaml")
s = """
train_settings:
model_size_in_b: 0.39
num_nodes: 8
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 5
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 15
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/0.39b.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_config_3_2b(self):
conf = OmegaConf.load("conf/search_config/mt5/3.2b.yaml")
s = """
train_settings:
model_size_in_b: 3.2
num_nodes: 20
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 14
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 35
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/3.2b.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_config_11_9b(self):
conf = OmegaConf.load("conf/search_config/mt5/11.9b.yaml")
s = """
train_settings:
model_size_in_b: 11.9
num_nodes: 20
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 50
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 50
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/11.9b.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_config_24_65b(self):
conf = OmegaConf.load("conf/search_config/mt5/24.65b.yaml")
s = """
train_settings:
model_size_in_b: 24.65
num_nodes: 40
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 55
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 60
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/24.65b.yaml must be set to {expected} but it currently is {conf}."
def test_mt5_config_42_54b(self):
conf = OmegaConf.load("conf/search_config/mt5/42.54b.yaml")
s = """
train_settings:
model_size_in_b: 42.54
num_nodes: 40
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 90
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 80
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 250000
seq_length: 512 # available seq_length list for MT5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/mt5/42.54b.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_mt5_config.py |
from omegaconf import OmegaConf
class TestT5Config:
def test_t5_config_0_22b(self):
conf = OmegaConf.load("conf/search_config/t5/0.22b.yaml")
s = """
train_settings:
model_size_in_b: 0.22
num_nodes: 4
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 4
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 10
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 29000
seq_length: 512 # available seq_length list for T5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/t5/0.22b.yaml must be set to {expected} but it currently is {conf}."
def test_t5_config_2_8b(self):
conf = OmegaConf.load("conf/search_config/t5/2.8b.yaml")
s = """
train_settings:
model_size_in_b: 2.8
num_nodes: 20
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 16
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 35
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 29000
seq_length: 512 # available seq_length list for T5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/t5/2.8b.yaml must be set to {expected} but it currently is {conf}."
def test_t5_config_11b(self):
conf = OmegaConf.load("conf/search_config/t5/11b.yaml")
s = """
train_settings:
model_size_in_b: 11
num_nodes: 20
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 45
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 50
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 29000
seq_length: 512 # available seq_length list for T5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/t5/11b.yaml must be set to {expected} but it currently is {conf}."
def test_t5_config_23_5b(self):
conf = OmegaConf.load("conf/search_config/t5/23.5b.yaml")
s = """
train_settings:
model_size_in_b: 23.5
num_nodes: 40
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 48
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 80
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 29000
seq_length: 512 # available seq_length list for T5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/t5/23.5b.yaml must be set to {expected} but it currently is {conf}."
def test_t5_config_41_2b(self):
conf = OmegaConf.load("conf/search_config/t5/41.2b.yaml")
s = """
train_settings:
model_size_in_b: 41.2
num_nodes: 40
gpus_per_node: 8
gpu_memory_gb: 80
max_training_days: 85
limit_search_runs: 100
output_top_n: 10
max_steps_per_run: 50
max_minutes_per_run: 90
tflops_per_gpu: 140
num_tokens_in_b: 1000
vocab_size: 29000
seq_length: 512 # available seq_length list for T5 models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb
tensor_parallel_sizes: auto
pipeline_parallel_sizes: auto
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto
act_ckpt_layers: auto
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/t5/41.2b.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_t5_config.py |
from omegaconf import OmegaConf
class TestBERTConfig:
def test_bert_config_0_11b(self):
conf = OmegaConf.load("conf/search_config/bert/0.11b.yaml")
s = """
train_settings:
model_size_in_b: 0.11 # unit in billion parameters
num_nodes: 8
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 2 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 1800 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 30522
seq_length: 512 # available seq_length list for BERT models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/bert/0.11b.yaml must be set to {expected} but it currently is {conf}."
def test_bert_config_4b(self):
conf = OmegaConf.load("conf/search_config/bert/4b.yaml")
s = """
train_settings:
model_size_in_b: 4 # unit in billion parameters
num_nodes: 16
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 7 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 20 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 1800 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 30522
seq_length: 512 # available seq_length list for BERT models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/bert/4b.yaml must be set to {expected} but it currently is {conf}."
def test_bert_config_20b(self):
conf = OmegaConf.load("conf/search_config/bert/20b.yaml")
s = """
train_settings:
model_size_in_b: 20 # unit in billion parameters
num_nodes: 64
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 12 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 30 # minutes per run for the grid search.
tflops_per_gpu: 140 # Estimated tflops per GPU.
num_tokens_in_b: 1800 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 30522
seq_length: 512 # available seq_length list for BERT models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/bert/20b.yaml must be set to {expected} but it currently is {conf}."
def test_bert_config_100b(self):
conf = OmegaConf.load("conf/search_config/bert/100b.yaml")
s = """
train_settings:
model_size_in_b: 100 # unit in billion parameters
num_nodes: 128
gpus_per_node: 8
gpu_memory_gb: 80 # Memory per GPU, in GB. Currently 40GB and 80GB A100s supported.
max_training_days: 50 # unit in days
limit_search_runs: 100 # Max number of runs to be launched in parallel for grid search.
output_top_n: 10 # The result will print the top N fastest training configs.
max_steps_per_run: 50 # Max steps per run for the grid search.
max_minutes_per_run: 40 # minutes per run for the grid search.
tflops_per_gpu: 150 # Estimated tflops per GPU.
num_tokens_in_b: 1800 # Unit in billions, typically 300B for GPT3 models.
vocab_size: 30522
seq_length: 512 # available seq_length list for BERT models: [512]
custom_config: null # path to custom .yaml model config instead of using auto-generated
logs: ${base_results_dir}/${search_config_value}_${.gpu_memory_gb}gb # Example base_results_dir/gpt3/126m
tensor_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8]
pipeline_parallel_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 10]
min_model_parallel_size: auto # auto to use our recommendation, or a value for the minimum desired parallelism
max_model_parallel_size: auto # auto to use our recommendation, or a value for the maximum desired parallelism
micro_batch_sizes: auto # auto to use our recommendation, or a list, such as [1, 2, 4, 8, 16]
act_ckpt_layers: auto # auto to use our recommendation, or a list, such as [0, 1, 2, 3]
"""
expected = OmegaConf.create(s)
assert (
expected == conf
), f"conf/search_config/bert/20b.yaml must be set to {expected} but it currently is {conf}."
| NeMo-Megatron-Launcher-master | auto_configurator/tests/config_tests/test_bert_config.py |
import os
import pytest
from omegaconf import OmegaConf
import autoconfig.training_config as tc
class TestCalculateTpPpMbsGrid:
margin = 0.05
@pytest.mark.parametrize(
"model_size,layers,seq_length,model_name,train_cfg,expected",
[
# GPT-3 tests
(0.126, 12, 2048, "gpt3", {"tensor_parallel_sizes": [1,2,4,5], "pipeline_parallel_sizes": [2,4,8], "micro_batch_sizes": [4,8,32], "gpu_memory_gb": 80, "min_model_parallel_size": 1, "max_model_parallel_size": 32}, {"tp": [1,2,4,5], "pp": [2,4,8], "mbs": [4,8,32], "min_par": 1, "max_par": 32}),
(0.126, 12, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(0.126, 12, 8192, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1,2], "mbs": [1,2,4], "min_par": 1, "max_par": 8}),
(0.843, 12, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(0.843, 12, 16384, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4], "pp": [1], "mbs": [1,2], "min_par": 1, "max_par": 8}),
(0.843, 12, 32768, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4], "pp": [1,2], "mbs": [1], "min_par": 1, "max_par": 8}),
(2, 12, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(2, 12, 8192, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1,2], "mbs": [1,2,4], "min_par": 1, "max_par": 8}),
(2.5, 24, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(5.0, 24, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(5.0, 24, 16384, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4], "pp": [1,2], "mbs": [1], "min_par": 1, "max_par": 8}),
(8.0, 24, 4096, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1,2], "mbs": [1,2,4], "min_par": 1, "max_par": 8}),
(10.0, 24, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4,8], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(10.0, 24, 32768, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2], "mbs": [1], "min_par": 4, "max_par": 16}),
(20.0, 44, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1,2,4], "mbs": [1,2,4], "min_par": 4, "max_par": 8}),
(20.0, 44, 8192, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2,4], "mbs": [1], "min_par": 8, "max_par": 32}),
(43.0, 52, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4,8], "pp": [1,2,4], "mbs": [1,2,4], "min_par": 8, "max_par": 32}),
(43.0, 52, 4096, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [2,4], "mbs": [1,2], "min_par": 8, "max_par": 32}),
(175.0, 96, 2048, "gpt3", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [8], "pp": [4,6,8,12,16], "mbs": [1,2,4], "min_par": 32, "max_par": 256}),
# T5 tests
(0.22, 12, 512, "t5", {"tensor_parallel_sizes": [1,2,4,5], "pipeline_parallel_sizes": [2,4,8], "micro_batch_sizes": [4,8,32], "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4,5], "pp": [2,4,8], "mbs": [4,8,32], "min_par": 1, "max_par": 8}),
(0.22, 12, 512, "t5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [16,32,64,128], "min_par": 1, "max_par": 8}),
(3.0, 24, 512, "t5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [4,6,8,12,16,24,32,48], "min_par": 1, "max_par": 8}),
(11.0, 24, 512, "t5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1], "mbs": [2,4,6,8,12,16,24], "min_par": 1, "max_par": 8}),
(23.0, 36, 512, "t5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2], "mbs": [1,2,4,6,8], "min_par": 4, "max_par": 16}),
(41.0, 48, 512, "t5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2,4], "mbs": [1,2,4,6,8], "min_par": 8, "max_par": 32}),
# mT5 tests
(0.17, 6, 512, "mt5", {"tensor_parallel_sizes": [1,2,4,5], "pipeline_parallel_sizes": [2,4,8], "micro_batch_sizes": [4,8,32], "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4,5], "pp": [2,4,8], "mbs": [4,8,32], "min_par": 1, "max_par": 8}),
(0.17, 6, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [16,32,64,128], "min_par": 1, "max_par": 8}),
(0.39, 12, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [16,32,64,128], "min_par": 1, "max_par": 8}),
(3.2, 24, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [4,6,8,12,16,24,32,48], "min_par": 1, "max_par": 8}),
(11.9, 24, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1], "mbs": [2,4,6,8,12,16,24], "min_par": 1, "max_par": 8}),
(24.65, 36, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2], "mbs": [1,2,4,6,8], "min_par": 4, "max_par": 16}),
(42.54, 48, 512, "mt5", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1,2,4], "mbs": [1,2,4,6,8], "min_par": 8, "max_par": 32}),
# BERT tests
(0.11, 12, 512, "bert", {"tensor_parallel_sizes": [1,2,4,5], "pipeline_parallel_sizes": [2,4,8], "micro_batch_sizes": [4,8,32], "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4,5], "pp": [2,4,8], "mbs": [4,8,32], "min_par": 1, "max_par": 8}),
(0.11, 12, 512, "bert", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(2.5, 24, 512, "bert", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [1,2,4], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 1, "max_par": 8}),
(5.0, 24, 512, "bert", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4,8], "pp": [1], "mbs": [1,2,3,4,6,8], "min_par": 2, "max_par": 8}),
(10.0, 24, 512, "bert", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [2,4,8], "pp": [1], "mbs": [1,2,3,4,6], "min_par": 2, "max_par": 8}),
(20.0, 48, 512, "bert", {"tensor_parallel_sizes": "auto", "pipeline_parallel_sizes": "auto", "micro_batch_sizes": "auto", "gpu_memory_gb": 80, "min_model_parallel_size": "auto", "max_model_parallel_size": "auto"}, {"tp": [4,8], "pp": [1], "mbs": [1,2,3,4], "min_par": 4, "max_par": 8}),
],
)
def test_calculate_tp_pp_mbs_grid(self, model_size, layers, seq_length, model_name, train_cfg, expected):
params = {
"model_size_in_b": model_size,
"num_layers": layers,
"seq_length": seq_length,
"model_name": model_name,
"train_cfg": train_cfg,
}
tp, pp, mbs, min_par, max_par = tc._calculate_tp_pp_mbs_grid(**params)
assert tp == expected["tp"], f"TP should be {expected['tp']} but it is {tp}."
assert pp == expected["pp"], f"PP should be {expected['pp']} but it is {pp}."
assert mbs == expected["mbs"], f"MBS should be {expected['mbs']} but it is {mbs}."
assert min_par == expected["min_par"], f"Minimum paralellism should be {expected['min_par']} but it is {min_par}."
assert max_par == expected["max_par"], f"Minimum paralellism should be {expected['max_par']} but it is {max_par}." | NeMo-Megatron-Launcher-master | auto_configurator/tests/code_tests/test_training_config.py |
import os
import pytest
from omegaconf import OmegaConf
import autoconfig.utils as ut
class TestCalculateModelSize:
margin = 0.05
@pytest.mark.parametrize(
"vocab,seq_len,hs,layers,ffn,kv,att,model_name,expected",
[
# GPT-3 tests
(51200, 2048, 768, 12, 768*4, None, 12, "gpt3", 0.126),
(51200, 4096, 1536, 26, 1536*4, None, 16, "gpt3", 0.843),
(51200, 4096, 2560, 24, 2560*4, None, 32, "gpt3", 2.0),
(51200, 2048, 4096, 24, 4096*4, None, 32, "gpt3", 5.0),
(51200, 2048, 5120, 24, 5120*4, None, 40, "gpt3", 8.0),
(51200, 2048, 6144, 44, 6144*4, None, 48, "gpt3", 20.0),
(51200, 8192, 8192, 52, 8192*4, None, 64, "gpt3", 43.0),
(51200, 2048, 12288, 96, 12288*4, None, 96, "gpt3", 175.0),
# T5 tests
(29000, 512, 768, 12, 2048, 64, 12, "t5", 0.22),
(29000, 512, 2048, 24, 5120, 64, 32, "t5", 2.8),
(29000, 512, 4096, 24, 10240, 64, 64, "t5", 11.0),
(29000, 512, 5120, 36, 10880, 64, 80, "t5", 23.5),
(29000, 512, 6144, 48, 10880, 64, 96, "t5", 41.2),
# mT5 tests
(250000, 512, 512, 8, 1024, 64, 6, "mt5", 0.17),
(250000, 512, 768, 12, 2048, 64, 12, "mt5", 0.39),
(250000, 512, 2048, 24, 5120, 64, 32, "mt5", 3.2),
(250000, 512, 4096, 24, 10240, 64, 64, "mt5", 11.9),
(250000, 512, 5120, 36, 10880, 64, 80, "mt5", 24.65),
(250000, 512, 6144, 48, 10880, 64, 96, "mt5", 42.65),
# BERT tests
(30522, 512, 768, 12, 768*4, None, 12, "bert", 0.11),
(30522, 512, 2560, 48, 2560*4, None, 40, "bert", 4.0),
(30522, 512, 6144, 44, 6144*4, None, 96, "bert", 20.0),
(30522, 512, 9216, 96, 9216*4, None, 96, "bert", 100.0),
],
)
def test_calculate_model_size(self, vocab, seq_len, hs, layers, ffn, kv, att, model_name, expected):
params = {
"vocab_size": vocab,
"seq_length": seq_len,
"hidden_size": hs,
"num_layers": layers,
"ffn_size": ffn,
"kv_channels": kv,
"att_heads": att,
"model_name": model_name,
}
output_size = ut._calculate_model_size(**params)
assert output_size == pytest.approx(
expected=expected, rel=self.margin
), f"Output of _calculate_model_size should be approximately {expected}, "
f"but it is {output_size}. Inputs: vocab_size={vocab}, seq_length={seq_len}, "
f"hidden_size={hs}, num_layers={layers}, ffn_size={ffn}, kv_channels={kv}, "
f"att_heads={att}, model_name={model_name}. "
def test_calculate_model_size_not_implemented_error(self):
params = {
"vocab_size": 100,
"seq_length": 100,
"hidden_size": 100,
"num_layers": 10,
"ffn_size": 100,
"kv_channels": 10,
"att_heads": 10,
"model_name": "incorrect_model",
}
with pytest.raises(NotImplementedError):
output_size = ut._calculate_model_size(**params)
class TestCalculatemodelSizeParams:
margin = 0.05
@pytest.mark.parametrize(
"model_size,vocab,seq_len,model_name,expected",
[
# GPT-3 tests
(0.126, 51200, 2048, "gpt3", {"layers": 12, "hs": 768, "att": 12, "ffn": None, "kv": None, "lr": 6e-4}),
(0.843, 51200, 4096, "gpt3", {"layers": 26, "hs": 1536, "att": 16, "ffn": None, "kv": None, "lr": 2.5e-4}),
(2.0, 51200, 4096, "gpt3", {"layers": 24, "hs": 2560, "att": 32, "ffn": None, "kv": None, "lr": 1.6e-4}),
(5.0, 51200, 2048, "gpt3", {"layers": 24, "hs": 4096, "att": 32, "ffn": None, "kv": None, "lr": 1.2e-4}),
(8.0, 51200, 2048, "gpt3", {"layers": 24, "hs": 5120, "att": 40, "ffn": None, "kv": None, "lr": 1e-4}),
(20.0, 51200, 2048, "gpt3", {"layers": 44, "hs": 6144, "att": 48, "ffn": None, "kv": None, "lr": 1e-4}),
(43.0, 51200, 2048, "gpt3", {"layers": 52, "hs": 8192, "att": 64, "ffn": None, "kv": None, "lr": 0.8e-4}),
(175.0, 51200, 2048, "gpt3", {"layers": 96, "hs": 12288, "att": 96, "ffn": None, "kv": None, "lr": 0.6e-4}),
# T5 tests
(0.22, 29000, 512, "t5", {"layers": 12, "hs": 768, "att": 12, "ffn": 2048, "kv": 64, "lr": 0.0001}),
(2.8, 29000, 512, "t5", {"layers": 24, "hs": 2048, "att": 32, "ffn": 5120, "kv": 64, "lr": 0.0001}),
(11.0, 29000, 512, "t5", {"layers": 24, "hs": 4096, "att": 64, "ffn": 10240, "kv": 64, "lr": 0.0001}),
(23.5, 29000, 512, "t5", {"layers": 36, "hs": 5120, "att": 80, "ffn": 10880, "kv": 64, "lr": 0.0001}),
(41.2, 29000, 512, "t5", {"layers": 48, "hs": 6144, "att": 96, "ffn": 10880, "kv": 64, "lr": 0.0001}),
# mT5 tests
(0.17, 250000, 512, "mt5", {"layers": 8, "hs": 512, "att": 6, "ffn": 1024, "kv": 64, "lr": 0.0001}),
(0.39, 250000, 512, "mt5", {"layers": 12, "hs": 768, "att": 12, "ffn": 2048, "kv": 64, "lr": 0.0001}),
(3.2, 250000, 512, "mt5", {"layers": 24, "hs": 2048, "att": 32, "ffn": 5120, "kv": 64, "lr": 0.0001}),
(11.9, 250000, 512, "mt5", {"layers": 24, "hs": 4096, "att": 64, "ffn": 10240, "kv": 64, "lr": 0.0001}),
(24.65, 250000, 512, "mt5", {"layers": 36, "hs": 5120, "att": 80, "ffn": 10880, "kv": 64, "lr": 0.0001}),
(42.65, 250000, 512, "mt5", {"layers": 48, "hs": 6144, "att": 96, "ffn": 10880, "kv": 64, "lr": 0.0001}),
# BERT tests
(0.11, 30522, 512, "bert", {"layers": 12, "hs": 768, "att": 12, "ffn": 4*768, "kv": None, "lr": 2e-4}),
(4.0, 30522, 512, "bert", {"layers": 48, "hs": 2560, "att": 32, "ffn": 4*2560, "kv": None, "lr": 1e-4}),
(20.0, 30522, 512, "bert", {"layers": 44, "hs": 6144, "att": 48, "ffn": 4*6144, "kv": None, "lr": 1e-4}),
(100.0, 30522, 512, "bert", {"layers": 96, "hs": 9216, "att": 96, "ffn": 4*9216, "kv": None, "lr": 1e-4}),
],
)
def test_calculate_model_size_params(self, model_size, vocab, seq_len, model_name, expected):
params = {
"model_size_in_b": model_size,
"vocab_size": vocab,
"seq_length": seq_len,
"model_name": model_name,
}
layers, hs, att, ffn, kv, lr = ut.calculate_model_size_params(**params)
assert layers == expected["layers"], f"utils.calculate_model_size_params returned layers={layers} but layers={expected['layers']} is expected."
assert hs == expected["hs"], f"utils.calculate_model_size_params returned hidden_size={hs} but hidden_size{expected['hs']} is expected."
assert att == expected["att"], f"utils.calculate_model_size_params returned attention_heads={att} but attention_heads{expected['att']} is expected."
assert ffn == expected["ffn"], f"utils.calculate_model_size_params returned ffn_hidden_size={ffn} but ffn_hidden_size={expected['ffn']} is expected."
assert kv == expected["kv"], f"utils.calculate_model_size_params returned kv_channels={kv} but kv_channels={expected['kv']} is expected."
assert lr == expected["lr"], f"utils.calculate_model_size_params returned lr={lr} but lr={expected['lr']} is expected."
def test_calculate_model_size_params_not_implemented_error(self):
params = {
"model_size_in_b": 2.0,
"vocab_size": 100,
"seq_length": 100,
"model_name": "incorrect",
}
with pytest.raises(NotImplementedError):
out = ut.calculate_model_size_params(**params)
| NeMo-Megatron-Launcher-master | auto_configurator/tests/code_tests/test_utils.py |
NeMo-Megatron-Launcher-master | auto_configurator/tests/code_tests/__init__.py |
|
import os
import pytest
from omegaconf import OmegaConf
import autoconfig.base_config as bc
class TestEstimateModelSize:
margin = 0.05
@pytest.mark.parametrize(
"training_days,gpus,tflops,tokens,model_name,expected",
[
# GPT-3 tests
# T5 tests
(10, 4 * 8, 140, 1000, "t5", 0.48),
(10, 8 * 8, 140, 1000, "t5", 0.97),
(15, 8 * 8, 140, 1000, "t5", 1.45),
(15, 16 * 8, 140, 1000, "t5", 2.9),
(15, 20 * 8, 140, 1000, "t5", 3.6),
(20, 20 * 8, 140, 1000, "t5", 4.8),
(20, 32 * 8, 140, 1000, "t5", 7.7),
(30, 32 * 8, 140, 1000, "t5", 11.6),
(30, 40 * 8, 140, 1000, "t5", 14.5),
(30, 48 * 8, 140, 1000, "t5", 17.4),
(30, 60 * 8, 140, 1000, "t5", 21.8),
(30, 80 * 8, 140, 1000, "t5", 29.0),
(50, 80 * 8, 140, 1000, "t5", 48.4),
# mT5 tests
],
)
def test_estimate_model_size(
self, training_days, gpus, tflops, tokens, model_name, expected
):
params = {
"max_training_days": training_days,
"gpu_count": gpus,
"tflops_per_gpu": tflops,
"num_tokens_in_b": tokens,
"model_name": model_name,
}
output_size = bc._estimate_model_size(**params)
assert output_size == pytest.approx(
expected=expected, rel=self.margin
), f"Output of _estimate_model_size should be approximately {expected}, "
f"but it is {output_size}. Inputs: max_training_days={training_days}, gpu_count={gpus}, "
f"tflops_per_gpu={tflops}, num_tokens_in_b={tokens}, model_name={model_name}."
def test_estimate_training_time_not_implemented_error(self):
params = {
"max_training_days": 1,
"gpu_count": 8,
"tflops_per_gpu": 140,
"num_tokens_in_b": 300,
"model_name": "invalid_name",
}
output_size = bc._estimate_model_size(**params)
assert output_size == None
class TestEstimateTrainingTime:
margin = 0.05
@pytest.mark.parametrize(
"model_size,gpus,tflops,tokens,model_name,expected",
[
# GPT-3 tests
(0.126, 8 * 8, 140, 300, "gpt3", 0.4),
(0.843, 8 * 8, 140, 300, "gpt3", 2.6),
(2, 8 * 8, 140, 300, "gpt3", 6),
(5, 20 * 8, 140, 300, "gpt3", 6),
(8, 20 * 8, 140, 300, "gpt3", 9.9),
(20, 80 * 8, 140, 300, "gpt3", 6),
(43, 80 * 8, 140, 300, "gpt3", 13),
(175, 128 * 8, 140, 300, "gpt3", 35),
# T5 tests
(0.22, 4 * 8, 140, 1000, "t5", 4.5),
(2.8, 20 * 8, 140, 1000, "t5", 11.6),
(11, 20 * 8, 140, 1000, "t5", 45.5),
(23.5, 40 * 8, 140, 1000, "t5", 48.6),
(41.2, 40 * 8, 140, 1000, "t5", 85.1),
# mT5 tests
(0.17, 4 * 8, 140, 1000, "mt5", 4.0),
(0.39, 8 * 8, 140, 1000, "mt5", 4.6),
(3.2, 20 * 8, 140, 1000, "mt5", 15.2),
(11.9, 20 * 8, 140, 1000, "mt5", 56.6),
(24.65, 40 * 8, 140, 1000, "mt5", 58.6),
(42.54, 40 * 8, 140, 1000, "mt5", 101.1),
# BERT tests
(0.11, 8 * 8, 140, 300, "bert", 0.34),
(4, 16 * 8, 140, 300, "bert", 6.2),
(20, 64 * 8, 140, 300, "bert", 7.75),
],
)
def test_estimate_training_time(
self, model_size, gpus, tflops, tokens, model_name, expected
):
params = {
"model_size_in_b": model_size,
"gpu_count": gpus,
"tflops_per_gpu": tflops,
"num_tokens_in_b": tokens,
"model_name": model_name,
}
output_days = bc._estimate_training_time(**params)
assert output_days == pytest.approx(
expected=expected, rel=self.margin
), f"Output of _estimate_training_time should be approximately {expected}, "
f"but it is {output_days}. Inputs: model_size_in_b={model_size}, gpu_count={gpus}, "
f"tflops_per_gpu={tflops}, num_tokens_in_b={tokens}, model_name={model_name}."
def test_estimate_training_time_not_implemented_error(self):
params = {
"model_size_in_b": 1,
"gpu_count": 8,
"tflops_per_gpu": 140,
"num_tokens_in_b": 300,
"model_name": "invalid_name",
}
output_days = bc._estimate_training_time(**params)
assert output_days == None
class TestCalculateGbsTpPp:
@pytest.mark.parametrize(
"model_size,model_name,seq_length,expected",
[
# GPT-3 tests
(0.126, "gpt3", 2048, (256, 1, 1)),
(3.0, "gpt3", 2048, (1024, 1, 1)),
(5.0, "gpt3", 2048, (2048, 2, 1)),
(10.0, "gpt3", 2048, (2048, 4, 1)),
(20.0, "gpt3", 2048, (2048, 8, 1)),
(40.0, "gpt3", 2048, (2048, 8, 2)),
(80.0, "gpt3", 2048, (2048, 8, 4)),
(175.0, "gpt3", 2048, (2048, 8, 8)),
(300.0, "gpt3", 2048, (2048, 8, 16)),
(600.0, "gpt3", 2048, (2048, 8, 32)),
(1000.0, "gpt3", 2048, (2048, 8, 64)),
# T5 tests
(0.5, "t5", 512, (2048, 1, 1)),
(3.0, "t5", 512, (1920, 2, 1)),
(6.0, "t5", 512, (1920, 4, 1)),
(13.0, "t5", 512, (1920, 8, 1)),
(20.0, "t5", 512, (1920, 8, 2)),
(40.0, "t5", 512, (1920, 8, 4)),
# mT5 tests
(0.5, "mt5", 512, (2048, 1, 1)),
(3.0, "mt5", 512, (1920, 2, 1)),
(6.0, "mt5", 512, (1920, 4, 1)),
(13.0, "mt5", 512, (1920, 8, 1)),
(20.0, "mt5", 512, (1920, 8, 2)),
(40.0, "mt5", 512, (1920, 8, 4)),
# BERT tests
(0.11, "bert", 512, (256, 1, 1)),
(3.0, "bert", 512, (1024, 1, 1)),
(6.0, "bert", 512, (2048, 2, 1)),
(13.0, "bert", 512, (2048, 4, 1)),
(20.0, "bert", 512, (2048, 8, 1)),
],
)
def test_calculate_gbs_tp_pp(self, model_size, model_name, seq_length, expected):
params = {"model_size_in_b": model_size, "model_name": model_name, "seq_length": seq_length}
output = bc._calculate_gbs_tp_pp(**params)
assert (
expected == output
), f"Output of _calculate_gbs_tp_pp should be {expected} but it is {output}."
class TestGenerateBaseconfig:
margin = 0.05
@pytest.mark.parametrize(
"model_size,nodes,gpus_per_node,gpu_mem,max_days,tokens,vocab,seq_length,custom_cfg,model_name,cfg,expected",
[
# GPT-3 tests
(0.126, 8, 8, 80, 2, 300, 51200, 2048, None, "gpt3", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "gpt3_0.126b", "time_limit": "2-00:00:00", "max_steps": 572204, "max_time": "1:23:30:00", "num_layers": 12, "gbs": 256, "hs": 768, "att_heads": 12, "ffn": "${multiply:4, ${.hidden_size}}", "kv": "null", "init_std": 0.023, "lr": 6e-4, "min_lr": 6e-5, "warmup_steps": 858, "constant_steps": 95e3, "warmup_ratio": None}),
(5.0, 20, 8, 80, 6, 300, 51200, 2048, None, "gpt3", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "gpt3_5.0b", "time_limit": "6-00:00:00", "max_steps": 71525, "max_time": "5:23:30:00", "num_layers": 24, "gbs": 2048, "hs": 4096, "att_heads": 32, "ffn": "${multiply:4, ${.hidden_size}}", "kv": "null", "init_std": 0.01, "lr": 1.2e-4, "min_lr": 1.2e-5, "warmup_steps": 107, "constant_steps": 11873, "warmup_ratio": None}),
(20.0, 80, 8, 80, 6.5, 300, 51200, 2048, None, "gpt3", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "gpt3_20.0b", "time_limit": "6-12:00:00", "max_steps": 71525, "max_time": "6:11:30:00", "num_layers": 44, "gbs": 2048, "hs": 6144, "att_heads": 48, "ffn": "${multiply:4, ${.hidden_size}}", "kv": "null", "init_std": 0.008165, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": 107, "constant_steps": 11873, "warmup_ratio": None}),
(40.0, 80, 8, 80, 25.75, 300, 51200, 2048, None, "gpt3", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "gpt3_40.0b", "time_limit": "25-18:00:00", "max_steps": 71525, "max_time": "25:17:30:00", "num_layers": 48, "gbs": 2048, "hs": 8192, "att_heads": 64, "ffn": "${multiply:4, ${.hidden_size}}", "kv": "null", "init_std": 0.007, "lr": 0.8e-4, "min_lr": 0.8e-5, "warmup_steps": 107, "constant_steps": 11873, "warmup_ratio": None}),
(175.0, 128, 8, 80, 35, 300, 51200, 2048, None, "gpt3", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "gpt3_175.0b", "time_limit": "35-00:00:00", "max_steps": 71525, "max_time": "34:23:30:00", "num_layers": 96, "gbs": 2048, "hs": 12288, "att_heads": 96, "ffn": "${multiply:4, ${.hidden_size}}", "kv": "null", "init_std": 0.006, "lr": 0.6e-4, "min_lr": 0.6e-5, "warmup_steps": 107, "constant_steps": 11873, "warmup_ratio": None}),
# T5 tests
(0.22, 4, 8, 80, 2, 1000, 29000, 512, None, "t5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "t5_0.22b", "time_limit": "2-00:00:00", "max_steps": 953675, "max_time": "1:23:30:00", "num_layers": 12, "gbs": 2048, "hs": 768, "att_heads": 12, "ffn": 2048, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(2.8, 20, 8, 80, 15, 1000, 29000, 512, None, "t5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "t5_2.8b", "time_limit": "15-00:00:00", "max_steps": 1017250, "max_time": "14:23:30:00", "num_layers": 24, "gbs": 1920, "hs": 2048, "att_heads": 32, "ffn": 5120, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(11.0, 20, 8, 80, 45, 1000, 29000, 512, None, "t5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "t5_11.0b", "time_limit": "45-00:00:00", "max_steps": 1017250, "max_time": "44:23:30:00", "num_layers": 24, "gbs": 1920, "hs": 4096, "att_heads": 64, "ffn": 10240, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(41.2, 40, 8, 80, 85, 1000, 29000, 512, None, "t5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": False}}, {"name": "t5_41.2b", "time_limit": "85-00:00:00", "max_steps": 1017250, "max_time": "84:23:30:00", "num_layers": 48, "gbs": 1920, "hs": 6144, "att_heads": 96, "ffn": 10880, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
# mT5 tests
(0.17, 4, 8, 80, 4, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_0.17b", "time_limit": "4-00:00:00", "max_steps": 953675, "max_time": "3:23:30:00", "num_layers": 8, "gbs": 2048, "hs": 512, "att_heads": 6, "ffn": 1024, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(0.39, 8, 8, 80, 5, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_0.39b", "time_limit": "5-00:00:00", "max_steps": 953675, "max_time": "4:23:30:00", "num_layers": 12, "gbs": 2048, "hs": 768, "att_heads": 12, "ffn": 2048, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(3.2, 20, 8, 80, 14, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_3.2b", "time_limit": "14-00:00:00", "max_steps": 1017250, "max_time": "13:23:30:00", "num_layers": 24, "gbs": 1920, "hs": 2048, "att_heads": 32, "ffn": 5120, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(11.9, 20, 8, 80, 50, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_11.9b", "time_limit": "50-00:00:00", "max_steps": 1017250, "max_time": "49:23:30:00", "num_layers": 24, "gbs": 1920, "hs": 4096, "att_heads": 64, "ffn": 10240, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(24.65, 40, 8, 80, 55, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_24.65b", "time_limit": "55-00:00:00", "max_steps": 1017250, "max_time": "54:23:30:00", "num_layers": 36, "gbs": 1920, "hs": 5120, "att_heads": 80, "ffn": 10880, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
(42.54, 40, 8, 80, 90.25, 1000, 250000, 512, None, "mt5", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "mt5_42.54b", "time_limit": "90-06:00:00", "max_steps": 1017250, "max_time": "90:05:30:00", "num_layers": 48, "gbs": 1920, "hs": 6144, "att_heads": 96, "ffn": 10880, "kv": 64, "init_std": 0.015, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": None, "constant_steps": None, "warmup_ratio": 0.01}),
# BERT tests
(0.11, 8, 8, 80, 2, 1800, 30522, 512, None, "bert", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "bert_0.11b", "time_limit": "2-00:00:00", "max_steps": 13800000, "max_time": "1:23:30:00", "num_layers": 12, "gbs": 256, "hs": 768, "att_heads": 12, "ffn": 768*4, "kv": "null", "init_std": 0.023094, "lr": 2e-4, "min_lr": 2e-5, "warmup_steps": 20000, "constant_steps": 2300000, "warmup_ratio": None}),
(4.0, 16, 8, 80, 7, 1800, 30522, 512, None, "bert", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "bert_4.0b", "time_limit": "7-00:00:00", "max_steps": 1720000, "max_time": "6:23:30:00", "num_layers": 48, "gbs": 2048, "hs": 2560, "att_heads": 32, "ffn": 2560*4, "kv": "null", "init_std": 0.012649, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": 2600, "constant_steps": 285000, "warmup_ratio": None}),
(20.0, 64, 8, 80, 12, 1800, 30522, 512, None, "bert", {"search_config": {"train_settings": {"logs": "."}}, "auto_configurator_path": ".", "wandb": {"enable": True, "project": "test_project"}}, {"name": "bert_20.0b", "time_limit": "12-00:00:00", "max_steps": 1716613, "max_time": "11:23:30:00", "num_layers": 44, "gbs": 2048, "hs": 6144, "att_heads": 48, "ffn": 6144*4, "kv": "null", "init_std": 0.008165, "lr": 1e-4, "min_lr": 1e-5, "warmup_steps": 2500, "constant_steps": 285000, "warmup_ratio": None}),
],
)
def test_generate_base_config(
self,
model_size,
nodes,
gpus_per_node,
gpu_mem,
max_days,
tokens,
vocab,
seq_length,
custom_cfg,
model_name,
cfg,
expected,
):
cfg = OmegaConf.create(cfg)
params = {
"model_size_in_b": model_size,
"nodes": nodes,
"gpus_per_node": gpus_per_node,
"gpu_memory_gb": gpu_mem,
"max_training_days": max_days,
"num_tokens_in_b": tokens,
"vocab_size": vocab,
"seq_length": seq_length,
"custom_cfg": custom_cfg,
"model_name": model_name,
"cfg": cfg,
}
out_cfg = bc.generate_base_config(**params)
# Run parameters
assert out_cfg["run"]["name"] == expected["name"], "run.name doesn't match the expected value."
assert out_cfg["run"]["results_dir"] == "${base_results_dir}/${.name}", "run.results_dir must be set to ${base_results_dir}/${.name}"
assert out_cfg["run"]["time_limit"] == expected["time_limit"], "run.time_limit doesn't match the expected value."
# Trainer parameters
assert out_cfg["trainer"]["num_nodes"] == nodes, "trainer.num_nodes doesn't match the expected value."
assert out_cfg["trainer"]["precision"] == "bf16", "trainer.precision doesn't match the expected value."
assert out_cfg["trainer"]["max_steps"] == pytest.approx(expected=expected["max_steps"], rel=self.margin), f"trainer.max_steps is {out_cfg['trainer']['max_steps']} but it should be {expected['max_steps']}."
assert out_cfg["trainer"]["max_time"] == expected["max_time"], "trainer.max_time doesn't match the expected value."
# Exp_manager parameters
if cfg["wandb"]["enable"]:
assert out_cfg["exp_manager"]["create_wandb_logger"], "exp_manager.create_wandb_logger should be True."
assert out_cfg["exp_manager"]["wandb_logger_kwargs"]["project"] == cfg["wandb"]["project"], "exp_manager.wandb_logger_kwargs.project doesn't match the expected value."
else:
assert not out_cfg["exp_manager"]["create_wandb_logger"], "exp_manager.create_wandb_logger should be False."
# Model parameters
if model_name in ["gpt3", "bert"]:
assert out_cfg["model"]["num_layers"] == expected["num_layers"]
assert out_cfg["model"]["hidden_size"] == expected["hs"]
assert out_cfg["model"]["num_attention_heads"] == expected["att_heads"]
if out_cfg["model"]["ffn_hidden_size"] is not None:
assert out_cfg["model"]["ffn_hidden_size"] == expected["ffn"]
if out_cfg["model"]["kv_channels"] is not None:
assert out_cfg["model"]["kv_channels"] == expected["kv"]
else:
assert out_cfg["model"]["encoder"]["num_layers"] == expected["num_layers"]
assert out_cfg["model"]["encoder"]["hidden_size"] == expected["hs"]
assert out_cfg["model"]["encoder"]["num_attention_heads"] == expected["att_heads"]
if out_cfg["model"]["encoder"]["ffn_hidden_size"] is not None:
assert out_cfg["model"]["encoder"]["ffn_hidden_size"] == expected["ffn"]
if out_cfg["model"]["encoder"]["kv_channels"] is not None:
assert out_cfg["model"]["encoder"]["kv_channels"] == expected["kv"]
assert out_cfg["model"]["global_batch_size"] == expected["gbs"]
assert out_cfg["model"]["init_method_std"] == pytest.approx(expected=expected["init_std"], rel=self.margin)
assert out_cfg["model"]["optim"]["lr"] == expected["lr"]
assert out_cfg["model"]["optim"]["sched"]["min_lr"] == pytest.approx(expected=expected["min_lr"], rel=self.margin)
if out_cfg["model"]["optim"]["sched"].get("warmup_steps") is not None:
assert out_cfg["model"]["optim"]["sched"]["warmup_steps"] == pytest.approx(expected=expected["warmup_steps"], rel=self.margin)
if out_cfg["model"]["optim"]["sched"].get("constant_steps") is not None:
assert out_cfg["model"]["optim"]["sched"]["constant_steps"] == pytest.approx(expected=expected["constant_steps"], rel=self.margin)
if out_cfg["model"]["optim"]["sched"].get("warmup_ratio") is not None:
assert out_cfg["model"]["optim"]["sched"]["warmup_ratio"] == pytest.approx(expected=expected["warmup_ratio"], rel=self.margin)
f = f"{cfg['search_config']['train_settings']['logs']}/base_cfg_{model_size}b.yaml"
assert os.path.exists(f), "Base config file was not created correctly."
os.remove(f)
| NeMo-Megatron-Launcher-master | auto_configurator/tests/code_tests/test_base_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate base YAML configuration for any model type and size."""
import math
import os
from typing import Tuple
import omegaconf
import yaml
from autoconfig import utils
def calculate_model_size(
gpu_count: int,
max_training_days: float,
model_size_in_b: float = None,
tflops_per_gpu: int = 140,
num_tokens_in_b: int = 300,
model_name: str = "gpt3",
) -> float:
"""
Estimates a model size to be trained given the constraints. If the
model_size is provided, it estimates the time to train it with the given
constraints.
Example: output 5B params to train for 7 days with 160 GPUs.
:param int gpu_count: number of gpus to use (num_nodes * gpus_per_node).
:param float max_training_days: number of days to train the model for.
:param float model_size_in_b: number of parameters in the model, if known.
:param int tflops_per_gpu: estimated number of TFLOPS/s per GPU.
:param int num_tokens_in_b: number of tokens to train the model for.
:return: number of parameters to use for training.
:rtype: float
"""
# Model size is not known, must be estimated.
if model_size_in_b is None:
model_size_in_b = _estimate_model_size(
max_training_days=max_training_days,
gpu_count=gpu_count,
tflops_per_gpu=tflops_per_gpu,
num_tokens_in_b=num_tokens_in_b,
model_name=model_name,
)
# Model size is known, so only time to train estimate is needed.
else:
max_training_days = _estimate_training_time(
model_size_in_b=model_size_in_b,
gpu_count=gpu_count,
tflops_per_gpu=tflops_per_gpu,
num_tokens_in_b=num_tokens_in_b,
model_name=model_name,
)
print(
f"You can train a {model_size_in_b}B parameter model in "
f"{max_training_days} days using {gpu_count} GPUs. This result assumes "
f"you are training to {num_tokens_in_b}B tokens, and each GPU achieves "
f"{tflops_per_gpu} TFLOPS."
)
return model_size_in_b
def _estimate_model_size(
max_training_days: float, gpu_count: int, tflops_per_gpu: int, num_tokens_in_b: int, model_name: str
) -> float:
"""
Estimates model size given time and hardware constraints. It's only used if the model size is
not provided by the user.
:param float max_training_days: number of days to train the model for.
:param int gpu_count: number of gpus to use (num_nodes * gpus_per_node).
:param int tflops_per_gpu: estimated number of TFLOPS/s per GPU.
:param int num_tokens_in_b: number of tokens to train the model for.
:param str model_name: name of the model, such as gpt3, t5, mt5...
:return: number of parameters to use for training.
:rtype: float
:raises NotImplementedError: if the model_name is not one of the supported models.
"""
model_penalty = 0.87 if model_name == "mt5" else 1.0
valid_models = ["gpt3", "t5", "mt5", "bert"]
try:
if model_name in valid_models:
return round(
model_penalty
* (max_training_days * 3600 * 24 * gpu_count * tflops_per_gpu * 1e12)
/ (8 * num_tokens_in_b * 1e9)
/ 1e9,
2,
)
else:
raise NotImplementedError
except ValueError as err:
print(f"Input values were not valid: {err}")
except ZeroDivisionError as err:
print(f"Cannot divide by zero. This can happen if num_tokens_in_b is zero: {err}")
except NotImplementedError as err:
print(f"Model size estimation is only available for {valid_models}: {err}")
return None
def _estimate_training_time(
model_size_in_b: float, gpu_count: int, tflops_per_gpu: int, num_tokens_in_b: int, model_name: str,
) -> float:
"""
Estimates training time for a given model size and hardware constraint. To be used when
a model size is provided by the user.
:param float model_size_in_b: number of parameters to use for training.
:param int gpu_count: number of gpus to use (num_nodes * gpus_per_node).
:param int tflops_per_gpu: estimated number of TFLOPS/s per GPU.
:param int num_tokens_in_b: number of tokens to train the model for.
:param str model_name: name of the model, such as gpt3, t5, mt5...
:return: number of days it will take to train the model.
:rtype: float
:raises NotImplementedError: if the model_name is not one of the supported models.
"""
model_penalty = 1.15 if model_name == "mt5" else 1.0
valid_models = ["gpt3", "t5", "mt5", "bert"]
try:
if model_name in valid_models:
return round(
model_penalty
* (model_size_in_b * 1e9 * 8 * num_tokens_in_b * 1e9)
/ (3600 * 24 * gpu_count * tflops_per_gpu * 1e12),
2,
)
else:
raise NotImplementedError
except ValueError as err:
print(f"Input values were not valid: {err}")
except ZeroDivisionError as err:
print(f"Cannot divide by zero. This can happen if gpu_count or tflops_per_gpu are zero: {err}")
except NotImplementedError as err:
print(f"Training time estimation is only available for {valid_models}: {err}")
return None
def _calculate_gbs_tp_pp(model_size_in_b: float, seq_length: int, gpu_memory_gb: int = 80, model_name: str = "gpt3") -> Tuple[int]:
"""
Calculates Global Batch Size (GBS), Tensor Parallelism (TP), and Pipeline
Parallelism (PP) values, given a model size and model name.
:param float model_size_in_b: the number of parameters in the model.
:param int gpu_memory_gb: memory available per GPU, in GBs.
:param str model_name: name of the model, such as gpt3, t5, mt5...
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises NotImplementedError: if the model_name is not one of the supported models.
"""
if model_name == "gpt3":
if gpu_memory_gb == 80:
return _gbs_tp_pp_gpt3_80gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
elif gpu_memory_gb == 40:
return _gbs_tp_pp_gpt3_40gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
elif model_name in ["t5", "mt5"]:
if gpu_memory_gb == 80:
return _gbs_tp_pp_t5_80gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
elif gpu_memory_gb == 40:
return _gbs_tp_pp_t5_40gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
elif model_name == "bert":
if gpu_memory_gb == 80:
return _gbs_tp_pp_bert_80gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
elif gpu_memory_gb == 40:
return _gbs_tp_pp_bert_40gb(model_size_in_b=model_size_in_b, seq_length=seq_length)
else:
raise NotImplementedError("Only gpt3, t5, mt5 and bert are supported.")
return None
def _gbs_tp_pp_gpt3_80gb(model_size_in_b: float, seq_length: int) -> Tuple[int]:
"""
Outputs GBS, TP and PP values for any GPT-3 model size for 80GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 2048:
if model_size_in_b <= 1.0:
gbs, tp, pp = 256, 1, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 1024, 1, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 2048, 2, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 2048, 4, 1
elif model_size_in_b <= 20.6:
gbs, tp, pp = 2048, 8, 1
elif model_size_in_b <= 45.6:
gbs, tp, pp = 2048, 8, 2
elif model_size_in_b <= 123.6:
gbs, tp, pp = 2048, 8, 4
elif model_size_in_b <= 196.6:
gbs, tp, pp = 2048, 8, 8
elif model_size_in_b <= 392.2:
gbs, tp, pp = 2048, 8, 16
elif model_size_in_b <= 735:
gbs, tp, pp = 2048, 8, 32
elif model_size_in_b <= 1100:
gbs, tp, pp = 2048, 8, 64
else:
raise ValueError("No GPT-3 model larger than 1.1T parameters is supported.")
elif seq_length == 4096:
if model_size_in_b <= 1.0:
gbs, tp, pp = 128, 1, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 512, 1, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 1024, 2, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 1024, 4, 1
elif model_size_in_b <= 20.6:
gbs, tp, pp = 1024, 4, 2
elif model_size_in_b <= 45.6:
gbs, tp, pp = 1024, 8, 2
else:
raise ValueError("No GPT-3 model larger than 45.6B parameters is supported with sequnce length 4096.")
elif seq_length == 8192:
if model_size_in_b <= 1.0:
gbs, tp, pp = 64, 1, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 256, 1, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 512, 2, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 512, 4, 1
elif model_size_in_b <= 20.6:
gbs, tp, pp = 512, 4, 4
elif model_size_in_b <= 45.6:
gbs, tp, pp = 512, 8, 2
else:
raise ValueError("No GPT-3 model larger than 45.6B parameters is supported with sequnce length 8192.")
elif seq_length == 16384:
if model_size_in_b <= 1.0:
gbs, tp, pp = 32, 2, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 128, 2, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 256, 2, 2
elif model_size_in_b <= 13.0:
gbs, tp, pp = 256, 4, 1
elif model_size_in_b <= 20.6:
gbs, tp, pp = 256, 8, 2
else:
raise ValueError("No GPT-3 model larger than 20.6B parameters is supported with sequnce length 16384.")
elif seq_length == 32768:
if model_size_in_b <= 1.0:
gbs, tp, pp = 16, 2, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 64, 2, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 128, 4, 2
elif model_size_in_b <= 13.0:
gbs, tp, pp = 128, 4, 2
elif model_size_in_b <= 20.6:
gbs, tp, pp = 128, 8, 2
else:
raise ValueError("No GPT-3 model larger than 20.6B parameters is supported with sequnce length 32768.")
else:
raise ValueError(f"seq_length = {seq_length} is not supported. Available seq_length list for GPT-3 models: [2048, 4096, 8192, 16384, 32768]")
return gbs, tp, pp
def _gbs_tp_pp_gpt3_40gb(model_size_in_b: float, seq_length: int) -> Tuple[int, int, int]:
"""
Outputs GBS, TP and PP values for any GPT-3 model size for 40GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 2048:
if model_size_in_b <= 1.0:
gbs, tp, pp = 256, 1, 1
elif model_size_in_b <= 4.0:
gbs, tp, pp = 1024, 4, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 2048, 8, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 2048, 8, 2
elif model_size_in_b <= 20.6:
gbs, tp, pp = 2048, 8, 4
elif model_size_in_b <= 45.6:
gbs, tp, pp = 2048, 8, 4
elif model_size_in_b <= 123.6:
gbs, tp, pp = 2048, 8, 8
elif model_size_in_b <= 196.6:
gbs, tp, pp = 2048, 8, 16
elif model_size_in_b <= 392.2:
gbs, tp, pp = 2048, 8, 32
elif model_size_in_b <= 735:
gbs, tp, pp = 2048, 8, 64
elif model_size_in_b <= 1100:
gbs, tp, pp = 2048, 8, 128
else:
raise ValueError("No GPT-3 model larger than 1.1T parameters is supported.")
else:
raise ValueError("seq_length != 2048 is not supported on 40GB GPU.")
return gbs, tp, pp
def _gbs_tp_pp_t5_80gb(model_size_in_b: float, seq_length: int) -> Tuple[int, int, int]:
"""
Outputs GBS, TP and PP values for any T5/mT5 model size for 80GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 512:
if model_size_in_b <= 1.0:
gbs, tp, pp = 2048, 1, 1
elif model_size_in_b <= 5.0:
gbs, tp, pp = 1920, 2, 1
elif model_size_in_b <= 11.5:
gbs, tp, pp = 1920, 4, 1
elif model_size_in_b <= 18.5:
gbs, tp, pp = 1920, 8, 1
elif model_size_in_b <= 25.9:
gbs, tp, pp = 1920, 8, 2
elif model_size_in_b <= 43.0:
gbs, tp, pp = 1920, 8, 4
elif model_size_in_b <= 85.5:
gbs, tp, pp = 1920, 8, 8
elif model_size_in_b <= 165.5:
gbs, tp, pp = 1920, 8, 16
elif model_size_in_b <= 250:
gbs, tp, pp = 1920, 8, 32
else:
raise ValueError("No T5/mT5 model larger than 250B parameters is supported.")
else:
raise ValueError(f"seq_length = {seq_length} is not supported. Available seq_length list for T5 models: [512]")
return gbs, tp, pp
def _gbs_tp_pp_t5_40gb(model_size_in_b: float, seq_length: int) -> Tuple[int, int, int]:
"""
Outputs GBS, TP and PP values for any T5/mT5 model size for 40GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 512:
if model_size_in_b <= 0.5:
gbs, tp, pp = 2048, 1, 1
if model_size_in_b <= 1.0:
gbs, tp, pp = 2048, 2, 1
elif model_size_in_b <= 5.0:
gbs, tp, pp = 1920, 4, 1
elif model_size_in_b <= 11.5:
gbs, tp, pp = 1920, 8, 1
elif model_size_in_b <= 18.5:
gbs, tp, pp = 1920, 8, 2
elif model_size_in_b <= 25.9:
gbs, tp, pp = 1920, 8, 4
elif model_size_in_b <= 43.0:
gbs, tp, pp = 1920, 8, 8
elif model_size_in_b <= 85.5:
gbs, tp, pp = 1920, 8, 16
elif model_size_in_b <= 165.5:
gbs, tp, pp = 1920, 8, 32
elif model_size_in_b <= 250:
gbs, tp, pp = 1920, 8, 64
else:
raise ValueError("No T5/mT5 model larger than 250B parameters is supported.")
else:
raise ValueError(f"seq_length = {seq_length} is not supported. Available seq_length list for T5 models: [512]")
return gbs, tp, pp
def _gbs_tp_pp_bert_80gb(model_size_in_b: float, seq_length: int) -> Tuple[int, int, int]:
"""
Outputs GBS, TP and PP values for any BERT model size for 80GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 512:
if model_size_in_b <= 1.0:
gbs, tp, pp = 256, 1, 1
elif model_size_in_b <= 3.2:
gbs, tp, pp = 1024, 1, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 2048, 2, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 2048, 4, 1
elif model_size_in_b <= 25.5:
gbs, tp, pp = 2048, 8, 1
elif model_size_in_b <= 46.5:
gbs, tp, pp = 2048, 8, 2
elif model_size_in_b <= 87.5:
gbs, tp, pp = 2048, 8, 4
elif model_size_in_b <= 165.5:
gbs, tp, pp = 4096, 8, 8
elif model_size_in_b <= 250.5:
gbs, tp, pp = 2048, 8, 16
else:
raise ValueError("No BERT model larger than 250B parameters is supported.")
else:
raise ValueError(f"seq_length = {seq_length} is not supported. Available seq_length list for BERT models: [512]")
return gbs, tp, pp
def _gbs_tp_pp_bert_40gb(model_size_in_b: float, seq_length: int) -> Tuple[int, int, int]:
"""
Outputs GBS, TP and PP values for any BERT model size for 40GB GPUs.
:param float model_size_in_b: the number of parameters in the model.
:returns: tuple (gbs, tp, pp)
WHERE
int gbs is the Global Batch Size to use for training.
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
:raises ValueError: if the model_size_in_b is larger than the supported max model size.
"""
if seq_length == 512:
if model_size_in_b <= 1.0:
gbs, tp, pp = 256, 1, 1
elif model_size_in_b <= 3.2:
gbs, tp, pp = 1024, 4, 1
elif model_size_in_b <= 8.0:
gbs, tp, pp = 2048, 8, 1
elif model_size_in_b <= 13.0:
gbs, tp, pp = 2048, 8, 2
elif model_size_in_b <= 25:
gbs, tp, pp = 2048, 8, 4
elif model_size_in_b <= 46.5:
gbs, tp, pp = 2048, 8, 8
elif model_size_in_b <= 87.5:
gbs, tp, pp = 2048, 8, 16
elif model_size_in_b <= 165.5:
gbs, tp, pp = 2048, 8, 32
elif model_size_in_b <= 250.5:
gbs, tp, pp = 2048, 8, 64
else:
raise ValueError("No BERT model larger than 250B parameters is supported.")
else:
raise ValueError(f"seq_length = {seq_length} is not supported. Available seq_length list for BERT models: [512]")
return gbs, tp, pp
def generate_base_config(
model_size_in_b: float,
nodes: int,
gpus_per_node: int,
gpu_memory_gb: int,
max_training_days: float,
num_tokens_in_b: int,
vocab_size: int,
seq_length: int,
custom_cfg: str,
model_name: str,
cfg: omegaconf.dictconfig.DictConfig,
):
"""
Generates base config dictionary for a given model name and size.
:param float model_size_in_b: number of parameters in the model, if known.
:param int nodes: number of nodes to use for training.
:param int gpus_per_node: number of GPUs available in each node.
:param float max_training_days: number of days to train the model for.
:param int num_tokens_in_b: number of tokens to train the model for.
:param str model_name: name of the model, such as gpt3, t5, mt5...
:param omegaconf.dictconfig.DictConfig cfg: full config object.
:return: base config object for the given model.
:rtype: dict
"""
base_cfg = utils.generic_base_config(cfg=cfg, custom_cfg=custom_cfg, model_name=model_name)
# GBS: global batch size
if custom_cfg is None:
gbs, tp, pp = _calculate_gbs_tp_pp(
model_size_in_b=model_size_in_b, gpu_memory_gb=gpu_memory_gb, model_name=model_name, seq_length=seq_length
)
else:
gbs = base_cfg["model"]["global_batch_size"]
tp = base_cfg["model"]["tensor_model_parallel_size"]
pp = base_cfg["model"]["pipeline_model_parallel_size"]
# RUN
base_cfg["run"]["name"] = f"{model_name}_{model_size_in_b}b"
base_cfg["run"]["results_dir"] = "${base_results_dir}/${.name}"
int_days = int(max_training_days)
int_hours = int(24 * (max_training_days - int(max_training_days)))
base_cfg["run"]["time_limit"] = f"{int_days}-{int_hours:02d}:00:00"
# TRAINER
base_cfg["trainer"]["num_nodes"] = nodes
base_cfg["trainer"]["precision"] = "bf16"
base_cfg["trainer"]["max_steps"] = int((num_tokens_in_b * 1e9) / (seq_length * gbs))
if int_hours == 0:
int_days -= 1
int_hours = 23
else:
int_hours -= 1
base_cfg["trainer"]["max_time"] = f"{int_days}:{int_hours:02d}:30:00"
# EXP_MANAGER
wandb_cfg = cfg.get("wandb")
enable = wandb_cfg.get("enable")
project = wandb_cfg.get("project")
if enable:
base_cfg["exp_manager"]["create_wandb_logger"] = bool(enable)
base_cfg["exp_manager"]["wandb_logger_kwargs"]["project"] = project
# MODEL
if custom_cfg is None:
layers, hs, att_h, ffn, kv, lr = utils.calculate_model_size_params(
model_size_in_b=model_size_in_b, vocab_size=vocab_size, seq_length=seq_length, model_name=model_name,
)
if model_name == "gpt3":
base_cfg["model"]["num_layers"] = int(layers)
base_cfg["model"]["global_batch_size"] = int(gbs)
base_cfg["model"]["hidden_size"] = int(hs)
base_cfg["model"]["num_attention_heads"] = int(att_h)
base_cfg["model"]["encoder_seq_length"] = seq_length
base_cfg["model"]["max_position_embeddings"] = seq_length
base_cfg["model"]["data"]["seq_length"] = seq_length
if ffn is not None:
base_cfg["model"]["ffn_hidden_size"] = int(ffn)
if kv is not None:
base_cfg["model"]["kv_channels"] = int(kv)
base_cfg["model"]["init_method_std"] = round(0.64 / math.sqrt(hs), 6)
base_cfg["model"]["optim"]["sched"]["warmup_steps"] = int(0.0015 * base_cfg["trainer"]["max_steps"])
base_cfg["model"]["optim"]["sched"]["constant_steps"] = int(0.166 * base_cfg["trainer"]["max_steps"])
if model_size_in_b <= 13.0:
base_cfg["model"]["sequence_parallel"] = False
elif model_name == "bert":
base_cfg["model"]["global_batch_size"] = int(gbs)
base_cfg["model"]["num_layers"] = int(layers)
base_cfg["model"]["hidden_size"] = int(hs)
base_cfg["model"]["num_attention_heads"] = int(att_h)
if ffn is not None:
base_cfg["model"]["ffn_hidden_size"] = int(ffn)
if kv is not None:
base_cfg["model"]["kv_channels"] = int(kv)
base_cfg["model"]["init_method_std"] = round(0.64 / math.sqrt(hs), 6)
base_cfg["model"]["optim"]["sched"]["warmup_steps"] = int(0.0015 * base_cfg["trainer"]["max_steps"])
base_cfg["model"]["optim"]["sched"]["constant_steps"] = int(0.166 * base_cfg["trainer"]["max_steps"])
if model_size_in_b <= 13.0:
base_cfg["model"]["sequence_parallel"] = False
else:
base_cfg["model"]["global_batch_size"] = int(gbs)
base_cfg["model"]["encoder"]["num_layers"] = int(layers)
base_cfg["model"]["decoder"]["num_layers"] = int(layers)
base_cfg["model"]["encoder"]["hidden_size"] = int(hs)
base_cfg["model"]["decoder"]["hidden_size"] = int(hs)
base_cfg["model"]["encoder"]["num_attention_heads"] = int(att_h)
base_cfg["model"]["decoder"]["num_attention_heads"] = int(att_h)
if ffn is not None:
base_cfg["model"]["encoder"]["ffn_hidden_size"] = int(ffn)
base_cfg["model"]["decoder"]["ffn_hidden_size"] = int(ffn)
if kv is not None:
base_cfg["model"]["encoder"]["kv_channels"] = int(kv)
base_cfg["model"]["decoder"]["kv_channels"] = int(kv)
base_cfg["model"]["init_method_std"] = 0.015
base_cfg["model"]["optim"]["sched"]["warmup_ratio"] = 0.01
base_cfg["model"]["optim"]["lr"] = lr
base_cfg["model"]["optim"]["sched"]["min_lr"] = round(lr * 0.1, 8)
if cfg.get("cluster_type") == "bcp":
index_map_dir = os.path.join(cfg.get("base_results_dir"), "data_index_files")
os.makedirs(index_map_dir, exist_ok=True)
base_cfg["model"]["data"]["index_mapping_dir"] = index_map_dir
with open(f"{cfg.search_config.train_settings.logs}/base_cfg_{model_size_in_b}b.yaml", "w") as f:
yaml.dump(base_cfg, f)
return base_cfg
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/base_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for the HP tool."""
import copy
from typing import List, Optional, Tuple
import omegaconf
import yaml
def _calculate_model_size(
vocab_size: int = None,
seq_length: int = None,
hidden_size: int = None,
num_layers: int = None,
ffn_size: int = None,
kv_channels: int = None,
att_heads: int = None,
model_name: str = "gpt3",
):
"""
Calculates the model size (number of parameters in billions), given the model parameters
and name.
:param int vocab_size: vocabulary size to be used during training.
:param int seq_length: input sequence length to be used during training.
:param int hidden_size: size of the hidden layers of the model.
:param int num_layers: number of layers in the model.
:param int ffn_size: FFN size of the model.
:param int kv_channels: number of KV channels in the transformer layers.
:param int att_heads: number of attention heads in the transformer layers.
:param str model_name: name of the model, i.e gpt3, t5, mt5...
:return: size of the model in billions of parameters.
:rtype: float
:raises NotImplementedError: if the model name is not valid.
"""
if model_name == "gpt3":
model_size = (
12
* num_layers
* hidden_size ** 2
* (1 + (13 / (12 * hidden_size)) + ((vocab_size + seq_length) / (12 * num_layers * hidden_size)))
/ 1e9
)
elif model_name in ["t5", "mt5"]:
# 2 L F + 3 L P + H (2 + 4 L F + L (21 + 12 P) + 1 S + 1 V)
proj_size = att_heads * kv_channels
model_size = (
2 * num_layers * 1.5 * ffn_size
+ 3 * num_layers * proj_size
+ hidden_size
* (2 + 4 * num_layers * 1.5 * ffn_size + num_layers * (21 + 12 * proj_size) + seq_length + vocab_size)
) / 1e9
elif model_name == "bert":
model_size = (
num_layers * (ffn_size + hidden_size * (4 * hidden_size + 3 * att_heads + 2 * ffn_size + 6))
+ hidden_size * (vocab_size + seq_length + hidden_size + 5)
) / 1e9
else:
raise NotImplementedError("Model name is not valid.")
return model_size
def calculate_model_size_params(
model_size_in_b: float, vocab_size: int = 51200, seq_length: int = 2048, model_name: str = "gpt3"
) -> Tuple[int, int, float]:
"""
Calculates the parameters that affect model_size: hidden size, attention heads,
KV channels, and FFN size. It also calculates the learning rate.
:param float model_size_in_b: float, number of parameters in the desired model config, in billions.
:param int seq_length: int, sequence length to be used during training.
:param int vocab_size: int, size of the vocabulary to use for training.
:param str model_name: str, name of the model to be trained, i.e. gpt3, t5, mt5...
:returns: tuple (layers, hs, att_h, ffn, kv, lr)
WHERE
int layers is the number of layers in the model.
int hs is the hidden size of the model.
int att_h is the number of attention heads in the model.
int ffn is the FFN hidden size of the model.
int kv is the number of KV channels in the model.
float lr is the learning rate used to train the model.
:raises ValueError: if the model size is larger than the max supported model size.
:raises NotImplementedError: if the model name is not supported.
"""
ffn, kv = None, None # Only needed for some models.
if model_name == "gpt3":
if model_size_in_b < 0.25:
hs, att_h, lr = 768, 12, 6e-4
elif model_size_in_b < 0.5:
hs, att_h, lr = 1024, 16, 3e-4
elif model_size_in_b < 1:
hs, att_h, lr = 1536, 16, 2.5e-4
elif model_size_in_b < 2:
hs, att_h, lr = 2048, 16, 2e-4
elif model_size_in_b < 3:
hs, att_h, lr = 2560, 32, 1.6e-4
elif model_size_in_b < 4.5:
hs, att_h, lr = 3072, 32, 1.4e-4
elif model_size_in_b < 8:
hs, att_h, lr = 4096, 32, 1.2e-4
elif model_size_in_b < 15:
hs, att_h, lr = 5120, 40, 1e-4
elif model_size_in_b < 25:
hs, att_h, lr = 6144, 48, 1e-4
elif model_size_in_b < 52:
hs, att_h, lr = 8192, 64, 0.8e-4
elif model_size_in_b < 105:
hs, att_h, lr = 10240, 80, 0.7e-4
elif model_size_in_b < 205:
hs, att_h, lr = 12288, 96, 0.6e-4
elif model_size_in_b < 405:
hs, att_h, lr = 20480, 128, 0.5e-4
elif model_size_in_b < 805:
hs, att_h, lr = 20480, 128, 0.4e-4
elif model_size_in_b < 1105:
hs, att_h, lr = 25600, 160, 0.3e-4
else:
raise ValueError("Model_size for GPT-3 must be smaller than 1.1T parameters.")
elif model_name == "t5":
kv, lr = 64, 1e-4
if model_size_in_b < 0.1:
hs, att_h, ffn = 512, 6, 1024
elif model_size_in_b < 0.4:
hs, att_h, ffn = 768, 12, 2048
elif model_size_in_b < 1:
hs, att_h, ffn = 1024, 16, 2816
elif model_size_in_b < 5:
hs, att_h, ffn = 2048, 32, 5120
elif model_size_in_b < 15:
hs, att_h, ffn = 4096, 64, 10240
elif model_size_in_b < 25.9:
hs, att_h, ffn = 5120, 80, 10880
elif model_size_in_b < 43.0:
hs, att_h, ffn = 6144, 96, 10880
elif model_size_in_b <= 85.5:
hs, att_h, ffn = 6144, 96, 16384
elif model_size_in_b <= 165.5:
hs, att_h, ffn, kv = 7680, 96, 20480, 128
elif model_size_in_b <= 250:
hs, att_h, ffn, kv = 12288, 96, 32768, 128
else:
raise ValueError("Model_size for T5 must be smaller than 250B parameters.")
elif model_name == "mt5":
kv, lr = 64, 1e-4
if model_size_in_b < 0.25:
hs, att_h, ffn = 512, 6, 1024
elif model_size_in_b < 0.5:
hs, att_h, ffn = 768, 12, 2048
elif model_size_in_b < 1.2:
hs, att_h, ffn = 1024, 16, 2816
elif model_size_in_b < 5:
hs, att_h, ffn = 2048, 32, 5120
elif model_size_in_b < 15:
hs, att_h, ffn = 4096, 64, 10240
elif model_size_in_b < 25.9:
hs, att_h, ffn = 5120, 80, 10880
elif model_size_in_b < 43.0:
hs, att_h, ffn = 6144, 96, 10880
elif model_size_in_b <= 85.5:
hs, att_h, ffn = 6144, 96, 16384
elif model_size_in_b <= 165.5:
hs, att_h, ffn, kv = 7680, 96, 20480, 128
elif model_size_in_b <= 250:
hs, att_h, ffn, kv = 12288, 96, 32768, 128
else:
raise ValueError("Model_size for mT5 must be smaller than 250B parameters.")
elif model_name == "bert":
lr = 1e-4
if model_size_in_b < 0.25:
hs, att_h, lr = 768, 12, 2e-4
elif model_size_in_b < 0.5:
hs, att_h, lr = 1024, 16, 2e-4
elif model_size_in_b < 1:
hs, att_h = 1536, 16
elif model_size_in_b < 2:
hs, att_h = 2048, 16
elif model_size_in_b < 3:
hs, att_h = 2560, 32
elif model_size_in_b < 4.5:
hs, att_h = 2560, 32
elif model_size_in_b < 8:
hs, att_h = 4096, 32
elif model_size_in_b < 15:
hs, att_h = 5120, 40
elif model_size_in_b <= 25:
hs, att_h = 6144, 48
elif model_size_in_b <= 46.5:
hs, att_h = 7680, 48
elif model_size_in_b <= 87.5:
hs, att_h = 9216, 96
elif model_size_in_b <= 165.5:
hs, att_h = 9216, 96
elif model_size_in_b <= 250.5:
hs, att_h = 12288, 96
else:
raise ValueError("Model_size for BERT must be smaller than 25B parameters.")
ffn = 4 * hs
else:
raise NotImplementedError("Model name is not valid.")
# Try powers of 2
margin = 0.01
for attempt in range(0, 10):
for layers in (2 ** p for p in range(1, 10)):
out_size = _calculate_model_size(
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hs,
num_layers=layers,
ffn_size=ffn,
kv_channels=kv,
att_heads=att_h,
model_name=model_name,
)
if model_size_in_b * (1.0 - margin) < out_size < model_size_in_b * (1.0 + margin):
return layers, hs, att_h, ffn, kv, lr
margin += 0.01 # Double margin of acceptable model sizes.
# Try multiples of 16
margin = 0.01
for attempt in range(0, 6):
for layers in range(16, 201, 16):
out_size = _calculate_model_size(
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hs,
num_layers=layers,
ffn_size=ffn,
kv_channels=kv,
att_heads=att_h,
model_name=model_name,
)
if model_size_in_b * (1.0 - margin) < out_size < model_size_in_b * (1.0 + margin):
return layers, hs, att_h, ffn, kv, lr
margin += 0.01 # Double margin of acceptable model sizes.
# Try multiples of 2
margin = 0.01
for attempt in range(0, 6):
for layers in range(2, 201, 2):
out_size = _calculate_model_size(
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hs,
num_layers=layers,
ffn_size=ffn,
kv_channels=kv,
att_heads=att_h,
model_name=model_name,
)
if model_size_in_b * (1.0 - margin) < out_size < model_size_in_b * (1.0 + margin):
return layers, hs, att_h, ffn, kv, lr
margin += 0.01 # Double margin of acceptable model sizes.
# Try multiples of 5
margin = 0.01
for attempt in range(0, 6):
for layers in range(5, 201, 5):
out_size = _calculate_model_size(
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hs,
num_layers=layers,
ffn_size=ffn,
kv_channels=kv,
att_heads=att_h,
model_name=model_name,
)
if model_size_in_b * (1.0 - margin) < out_size < model_size_in_b * (1.0 + margin):
return layers, hs, att_h, ffn, kv, lr
margin += 0.01 # Double margin of acceptable model sizes.
# Try any valid number
margin = 0.01
for attempt in range(0, 10):
for layers in range(1, 200):
out_size = _calculate_model_size(
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hs,
num_layers=layers,
ffn_size=ffn,
kv_channels=kv,
att_heads=att_h,
model_name=model_name,
)
if model_size_in_b * (1.0 - margin) < out_size < model_size_in_b * (1.0 + margin):
return layers, hs, att_h, ffn, kv, lr
margin += 0.01 # Double margin of acceptable model sizes.
raise Exception("Number of layers not found, config is not possible.")
def generic_base_config(cfg: omegaconf.dictconfig.DictConfig, custom_cfg, model_name: str = "gpt3") -> dict:
"""
Generates a base config dictionary from a base config yaml file.
:param omegaconf.dictconfig.DictConfig cfg: hydra-like config object for the HP tool.
:param str model_name: name of the model, i.e. gpt3, t5, mt5...
:returns: dictionary containing the base configuration for the model.
:rtype: dict
"""
cfg_path = f"{cfg.auto_configurator_path}/base_configs/{model_name}.yaml" if custom_cfg is None else custom_cfg
with open(cfg_path) as f:
base_cfg = yaml.safe_load(f)
return base_cfg
def modify_cfg(
base_cfg: dict,
act: int,
num_mbs_act: int,
act_per_pipe: int,
tp: int,
pp: int,
virtual_pipelines: int,
mbs: int,
max_minutes: int,
max_steps: int,
num_nodes: int,
model_name: str,
) -> dict:
"""
Modify the base configuration for the model with the new parameters that are specific to the current model, which the HP tool heuristics selected.
:param dict base_cfg: base configuration for the current model, which will be modified in this function.
:param int act: number of activation checkpointing layers to use for the model.
:param int num_mbs_act:
:param int act_per_pipe:
:param int tp: Tensor Parallelism (TP) value to be set for the model.
:param int pp: Pipeline Parallelism (PP) value to be set for the model.
:param int virtual_pipelines: Virtual Pipelines value to be set for the model.
:param int mbs: Micro Batch Size (MBS) value to be set for the model.
:param int max_minutes: maximum amount of time to run this model for.
:param int max_steps: maximum number of steps to run this model for.
:param int num_nodes: number of nodes to use for the training run.
:param str model_name: name of the model, i.e. gpt3, t5, mt5...
:return: dictionary containing the updated model configuration parameters.
:rtype: dict
"""
new_cfg = copy.deepcopy(base_cfg)
if act is not None:
if model_name in ["gpt3", "bert"]:
new_cfg["model"]["activations_checkpoint_num_layers"] = act
else:
new_cfg["model"]["encoder"]["activations_checkpoint_num_layers"] = act // 2
new_cfg["model"]["decoder"]["activations_checkpoint_num_layers"] = act // 2
if num_mbs_act is not None and model_name in ["gpt3", "bert"]:
new_cfg["model"]["num_micro_batches_with_partial_activation_checkpoints"] = num_mbs_act
if act_per_pipe is not None and model_name in ["gpt3", "bert"]:
new_cfg["model"]["activations_checkpoint_layers_per_pipeline"] = act_per_pipe
if virtual_pipelines is not None and model_name in ["gpt3", "bert"]:
new_cfg["model"]["virtual_pipeline_model_parallel_size"] = virtual_pipelines
new_cfg["model"]["tensor_model_parallel_size"] = tp
new_cfg["model"]["pipeline_model_parallel_size"] = pp
new_cfg["model"]["micro_batch_size"] = mbs
if model_name in ["gpt3", "bert"]:
att_heads = new_cfg["model"]["num_attention_heads"]
num_layers = new_cfg["model"]["num_layers"]
else:
att_heads = new_cfg["model"]["encoder"]["num_attention_heads"]
num_layers = new_cfg["model"]["encoder"]["num_layers"]
# gbs = mbs * num_gpus * accumulate_grad_batches / (tp * pp)
num_gpus = new_cfg["trainer"]["num_nodes"] * new_cfg["trainer"]["devices"]
gbs = new_cfg["model"]["global_batch_size"]
mod_gbs = gbs % (mbs * num_gpus / (tp * pp))
mod_att_heads = att_heads % tp
mod_layers = num_layers % pp
if mod_gbs == 0 and mod_att_heads == 0 and mod_layers == 0:
# Valid config
new_cfg["trainer"]["num_nodes"] = num_nodes # Necessary for short single-node test.
new_cfg["trainer"]["max_steps"] = max_steps
new_cfg["trainer"]["val_check_interval"] = max_steps
days = max_minutes // 3600
hours = (max_minutes % 3600) // 60
mins = (max_minutes % 3600) % 60
new_cfg["run"]["time_limit"] = f"{days}-{hours}:{mins}:00"
new_cfg["run"][
"name"
] = f"{new_cfg['run']['name']}_{num_nodes}nodes_tp_{tp}_pp_{pp}_mbs_{mbs}_act_ckpt_{act}_num_mbs_act_{num_mbs_act}_act_per_pipe_{act_per_pipe}"
print(
f"Valid config: GBS={gbs}, MBS={mbs}, TP={tp}, PP={pp}, act_ckpt_layers={act}, num_mbs_act={num_mbs_act}, act_per_pipe={act_per_pipe}. Adding to directory."
)
return new_cfg
return None
def create_slurm_file(
new_script_path: str,
cmds: List[str],
job_name: str,
flags: str = "",
dependency: Optional[str] = None,
time: str = "04:00:00",
exclusive: bool = True,
mem: Optional[int] = None,
overcommit: bool = True,
nodes: Optional[int] = None,
ntasks: Optional[int] = None,
ntasks_per_node: Optional[int] = None,
gpus_per_task: Optional[int] = None,
gpus_per_node: Optional[int] = None,
partition: str = "batch",
account: Optional[str] = None,
exclude: Optional[str] = None,
output: Optional[str] = None,
comment: Optional[str] = None,
):
"""
Creates a slurm script file to launch a job on a slurm based cluster. Saves the script
to the local file system in the path specified on new_script_path.
:param str new_stript_path: path where the SLURM script will be stored in the file system.
:param List[str] cmds: list of commands to run, each one inside an srun line.
:param str job_name: name of the slurm job.
:param str flags: flags to be added to each of the srun commands.
:param Optional[str] dependency: job_id(s) to the jobs that need to run before the current job.
:param str time: slurm style time-limit for the job.
:param bool exclusive: slurm exclusive parameter.
:param Optional[int] mem: slurm mem parameter.
:param bool overcommit: slurm overcommit parameter.
:param Optional[int] nodes: number of nodes to use to train the model.
:param Optional[int] ntasks: slurm ntasks parameter.
:param Optional[int] ntasks_per_node: slurm ntasks_per_node parameter.
:param Optional[int] gpus_per_task: slurm gpus_per_task parameter.
:param Optional[int] gpus_per_node: slurm gpus_per_node parameter.
:param str partition: slurm partition parameter.
:param Optional[str] account: slurm account parameter.
:param Optional[str] exclude: slurm exclude parameter.
:return: None
"""
with open(new_script_path, "w") as f:
f.writelines("#!/usr/bin/env bash\n")
if nodes is not None:
f.writelines(f"#SBATCH --nodes={nodes}\n")
if ntasks is not None:
f.writelines(f"#SBATCH --ntasks={ntasks}\n")
if ntasks_per_node is not None:
f.writelines(f"#SBATCH --ntasks-per-node={ntasks_per_node}\n")
if gpus_per_task is not None:
f.writelines(f"#SBATCH --gpus-per-task={gpus_per_task}\n")
if gpus_per_node is not None:
f.writelines(f"#SBATCH --gpus-per-node={gpus_per_node}\n")
if dependency is not None:
dependency = dependency.strip()
if dependency != "singleton":
dependency = f"afterany:{dependency}"
f.writelines(f"#SBATCH --dependency={dependency}\n")
f.writelines(f"#SBATCH -p {partition}\n")
if account is not None:
f.writelines(f"#SBATCH -A {account}\n")
f.writelines(f"#SBATCH --job-name={job_name}\n")
if mem is not None:
f.writelines(f"#SBATCH --mem={mem}\n")
if exclusive:
f.writelines("#SBATCH --exclusive\n")
if overcommit:
f.writelines("#SBATCH --overcommit\n")
if exclude:
f.writelines(f"#SBATCH --exclude={','.join(exclude)}\n")
if output:
f.writelines(f"#SBATCH --output={output}\n")
if comment:
f.writelines(f"#SBATCH --comment={comment}\n")
f.writelines(f"#SBATCH --time={time}\n\n")
for cmd in cmds:
# assert "'" not in cmd
f.writelines(f"srun {flags} sh -c '{cmd}'\n\n")
f.writelines("set +x\n")
def convert_to_cli(cfg: omegaconf.dictconfig.DictConfig, root: bool = True) -> str:
"""
Converts hydra-like OmegaConf config dictionary object to a sring that can be used to override
hydra parameters using the CLI.
:param omegaconf.dictconfig.DictConfig cfg: the config object to be converted to str format.
:return: the string containing the overrides for hydra.
:rtype: str
"""
result = []
if cfg.get("search_config_value") is not None:
result.append(f"search_config={cfg['search_config_value']}")
for k, v in cfg.items():
if k in [
"training_container",
"inference_container",
"training_container_image",
"inference_container_image",
"ci_test",
]:
continue
if isinstance(v, omegaconf.dictconfig.DictConfig):
output = convert_to_cli(v, False)
result.extend([f"{k}.{x}" for x in output if x != ""])
elif isinstance(v, omegaconf.listconfig.ListConfig):
result.append(f"{k}={str(v).replace(' ', '')}")
elif isinstance(v, str) and "{" in v:
continue
else:
result.append(f"{k}={convert_to_null(v)}")
return " \\\n ".join(result) if root else result
def convert_to_null(val: Optional[str]) -> str:
"""
Converts a value to the str null if None is provided, to be able to pass it to hydra.
:param Optional[str] val: value to be replaced with 'null' if the value is None.
:return: either the input value itself or 'null'.
:rtype: str
"""
if val is None:
return "null"
return val
def add_container_mounts(container_mounts: Optional[List[str]]) -> str:
"""
Converts the config container mounts to the right format for an srun command.
:param Optional[List[str]] container_mounts: list of container mounts as in the config file.
:return: the string that can be used in the srun command to add the container mounts.
:rtype: str
"""
mounts_str = ""
if container_mounts[0] is None or container_mounts[0] == "None":
return ""
if container_mounts is not None:
assert isinstance(container_mounts, omegaconf.listconfig.ListConfig), "container_mounts must be a list."
for mount in container_mounts:
if mount is not None and isinstance(mount, str):
mounts_str += f",{mount}" if ":" in mount else f",{mount}:{mount}"
return mounts_str
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares and launches the training HP search using nemo_megatron_launcher."""
import os
import shutil
import subprocess
from typing import List, Tuple
import omegaconf
import yaml
from autoconfig import train, utils
def search_training_config(
base_cfg: dict, model_size_in_b: float, model_name: str, hydra_args: str, cfg: omegaconf.dictconfig.DictConfig,
) -> None:
"""
Entry point for the training HP search. This function calls other functions to perform three
actions: generates the grid of possible configurations; launches those configurations using
nemo_megatron_launcher; and launches a final job to compare the results of all the training
jobs.
:param dict base_cfg: base configuration of the model to be trained.
:param float model_size_in_b: number of parameters in the model, if known.
:param str model_name: name of the model to be trained: gpt3, t5, mt5...
:param str hydra_args: hydra override arguments in string format.
:param omegaconf.dictconfig.fDictConfig cfg: main hydra config object for the HP tool.
:return: None
"""
# Generate candidate configs.
base_dir, results_cfgs, num_nodes = generate_grid_search_configs(base_cfg, model_size_in_b, model_name, cfg)
# Launch candidate configs.
job_ids = launch_grid_search_configs(base_dir, results_cfgs, model_name, cfg)
# Measure and compare throughputs for each config.
launch_throughput_measure(job_ids, model_name, model_size_in_b, num_nodes, hydra_args, cfg)
def generate_grid_search_configs(
base_cfg: dict, model_size_in_b: float, model_name: str, cfg: omegaconf.dictconfig.DictConfig,
) -> Tuple[str, List[int], int]:
"""
Generates the grid of all possible configurations for the given model, and stores
each different configuration in a yaml file.
:param dict base_cfg: base configuration of the model to be trained.
:param float model_size_in_b: number of parameters in the model.
:param str model_name: name of the model to be trained: gpt3, t5, mt5...
:param omegaconf.dictconfig.DictConfig cfg: main hydra config object for the HP tool.
:returns: tuple (base_dir, results_cfgs, num_nodes)
WHERE
str base_dir is the path to the directory where the results will be stored.
List[int] results_cfgs is a list of all the config names that were generated.
int num_nodes is the number of nodes used to run each config.
"""
search_cfg = cfg.get("search_config")
train_cfg = search_cfg.get("train_settings")
num_nodes = train_cfg.get("num_nodes")
act_layers = train_cfg.get("act_ckpt_layers")
# 2 * num_layers is needed because of encoder/decoder architecture.
multiplier = 1 if model_name in ["gpt3", "bert"] else 2
seq_length = base_cfg["model"]["data"]["seq_length"]
num_layers = (
base_cfg["model"]["num_layers"]
if model_name in ["gpt3", "bert"]
else base_cfg["model"]["encoder"]["num_layers"]
)
act_method = base_cfg["model"].get("activations_checkpoint_method", "block")
tp_list, pp_list, mbs_list, min_model_parallel, max_model_parallel = _calculate_tp_pp_mbs_grid(
model_size_in_b=model_size_in_b, num_layers=num_layers, model_name=model_name, seq_length=seq_length, train_cfg=train_cfg,
)
base_dir = f"{cfg.search_config.train_settings.logs}/candidate_configs"
os.makedirs(base_dir, exist_ok=True)
max_minutes = train_cfg.get("max_minutes_per_run")
max_steps = train_cfg.get("max_steps_per_run")
valid_tp_pp_list = []
for tp in tp_list:
for pp in pp_list:
for mbs in mbs_list:
num_gpus = base_cfg["trainer"]["num_nodes"] * base_cfg["trainer"]["devices"]
gbs = base_cfg["model"]["global_batch_size"]
if model_name in ["gpt3", "bert"]:
att_heads = base_cfg["model"]["num_attention_heads"]
num_layers = base_cfg["model"]["num_layers"]
else:
att_heads = base_cfg["model"]["encoder"]["num_attention_heads"]
num_layers = base_cfg["model"]["encoder"]["num_layers"]
mod_gbs = gbs % (mbs * num_gpus / (tp * pp))
mod_att_heads = att_heads % tp
mod_layers = (multiplier * num_layers) % pp
if (
mod_gbs == 0
and mod_att_heads == 0
and mod_layers == 0
and (tp, pp) not in valid_tp_pp_list
and min_model_parallel <= tp * pp <= max_model_parallel
):
valid_tp_pp_list.append((tp, pp))
# Generate grid search configs.
results_cfgs = [[] for _ in range(multiplier * num_layers + 1)]
for tp, pp in valid_tp_pp_list:
(
virtual_pipelines,
act_ckpt_layers,
num_micro_batches_partial_act_ckpt,
act_ckpt_layers_per_pipeline,
) = _set_activations_checkpoint_params(tp, pp, num_layers, act_method, multiplier, model_size_in_b, model_name)
for mbs in mbs_list:
if act_layers is not None and act_layers != "auto":
act_ckpt_layers = act_layers
for act in act_ckpt_layers:
for num_mbs_act in num_micro_batches_partial_act_ckpt:
for act_per_pipe in act_ckpt_layers_per_pipeline:
new_cfg = utils.modify_cfg(
base_cfg=base_cfg,
act=act,
num_mbs_act=num_mbs_act,
act_per_pipe=act_per_pipe,
tp=tp,
pp=pp,
virtual_pipelines=virtual_pipelines,
mbs=mbs,
max_minutes=max_minutes,
max_steps=max_steps,
num_nodes=num_nodes,
model_name=model_name,
)
if new_cfg: # Save candidate cfg.
file_name = f"{model_name}_{model_size_in_b}b_{num_nodes}nodes_tp_{tp}_pp_{pp}_mbs_{mbs}_act_ckpt_{act}_num_mbs_act_{num_mbs_act}_act_per_pipe_{act_per_pipe}.yaml"
results_cfgs[act].append(file_name)
with open(f"{base_dir}/{file_name}", "w") as f:
yaml.dump(new_cfg, f)
print("\nAll candidate configurations created correctly.\n")
return base_dir, results_cfgs, num_nodes
def _set_activations_checkpoint_params(tp, pp, num_layers, act_method, multiplier, model_size_in_b, model_name):
act_multiple = 4 // pp
if act_method == "block":
if 1.0 <= model_size_in_b < 11.3:
act_multiple = 8 // pp
elif 11.3 <= model_size_in_b < 26.0:
act_multiple = 16 // pp
elif 26.0 <= model_size_in_b < 60.0:
act_multiple = 16 // pp
elif 60.0 <= model_size_in_b:
act_multiple = 32 // pp
act_multiple = max(act_multiple, 1)
virtual_pipelines = None
# Num micro batches with partial act ckpt
min_micro_b = 0 # 0 will not be used, minimum will be set to 1 later in the code.
max_micro_b = pp
interval_micro_b = 1
# Act ckpt layers per pipeline
min_layers_per_pipe = 0
max_layers_per_pipe = num_layers
interval_layers_per_pipe = act_multiple
if model_name in ["gpt3", "bert"] and pp > 2: # Interleaved pipeline scheduling.
virtual_pipelines = num_layers // pp # TODO: verify that this is the best value.
act_multiple = 1
max_micro_b = pp * (virtual_pipelines - 1) + (pp - 1) * 2 + 1
interval_micro_b = virtual_pipelines * 8
max_layers_per_pipe = multiplier * num_layers // pp // virtual_pipelines + 1
act_ckpt_layers, num_micro_batches_partial_act_ckpt, act_ckpt_layers_per_pipeline = [None], [None], [None]
if act_method == "block":
# Act ckpt num layers
if virtual_pipelines is None:
act_ckpt_layers = range(0, multiplier * num_layers // pp + 1, act_multiple)
else:
act_ckpt_layers = range(0, multiplier * num_layers // pp // virtual_pipelines + 1, act_multiple)
if pp > 1 and model_name in ["gpt3", "bert"]:
# Num micro batches with partial act ckpt
num_micro_batches_partial_act_ckpt = list(range(min_micro_b, max_micro_b + 1, interval_micro_b))
if num_micro_batches_partial_act_ckpt[0] == 0:
num_micro_batches_partial_act_ckpt[0] = 1
# Act ckpt layers per pipeline
act_ckpt_layers_per_pipeline = range(
min_layers_per_pipe, max_layers_per_pipe + 1, interval_layers_per_pipe
)
return virtual_pipelines, act_ckpt_layers, num_micro_batches_partial_act_ckpt, act_ckpt_layers_per_pipeline
def _tp_pp_mbs_grid_gpt3_80gb(model_size_in_b: float, valid_pp: List[int], seq_length: int) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for GPT-3 and 80GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
tp = [1, 2, 4, 8]
pp = [1]
mbs = [1, 2, 3, 4, 6, 8]
min_model_parallel = 1
max_model_parallel = 8
if seq_length == 2048:
if model_size_in_b <= 1.0:
tp = [1, 2]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
elif model_size_in_b <= 8.0:
tp = [1, 2, 4]
elif model_size_in_b <= 13.0:
tp = [1, 2, 4, 8]
elif model_size_in_b <= 23.0:
tp = [1, 2, 4]
pp = [x for x in valid_pp if 1 <= x <= 4]
mbs = [1, 2, 4]
min_model_parallel = 4
max_model_parallel = 8
elif model_size_in_b <= 45.0:
tp = [2, 4, 8]
pp = [x for x in valid_pp if 1 <= x <= 4]
mbs = [1, 2, 4]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 95:
tp = [2, 4, 8]
pp = [x for x in valid_pp if 1 <= x <= 8]
mbs = [1, 2, 4, 8]
min_model_parallel = 8
max_model_parallel = 64
elif model_size_in_b <= 130.0:
tp = [2, 4, 8]
pp = [x for x in valid_pp if 1 <= x <= 16]
mbs = [1, 2, 4, 8]
min_model_parallel = 16
max_model_parallel = 128
elif model_size_in_b <= 195.0:
tp = [8]
pp = [x for x in valid_pp if 4 <= x <= 16]
mbs = [1, 2, 4]
min_model_parallel = 32
max_model_parallel = 256
elif model_size_in_b <= 395.0:
tp = [8]
pp = [x for x in valid_pp if 8 <= x <= 32]
mbs = [1, 2, 4]
min_model_parallel = 64
max_model_parallel = 512
elif model_size_in_b <= 790.0:
tp = [8]
pp = [x for x in valid_pp if 8 <= x <= 100]
mbs = [1, 2, 4]
min_model_parallel = 128
max_model_parallel = 1024
elif model_size_in_b <= 1100.0:
tp = [8]
pp = [x for x in valid_pp if 16 <= x <= 130]
mbs = [1, 2, 4]
min_model_parallel = 256
max_model_parallel = 2048
elif seq_length == 4096:
if model_size_in_b <= 1.0:
tp = [1, 2, 4]
mbs = [1, 2, 4, 8]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
mbs = [1, 2, 4, 8]
elif model_size_in_b <= 8.0:
tp = [1, 2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2, 4]
elif model_size_in_b <= 13.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2, 4]
elif model_size_in_b <= 23.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2]
min_model_parallel = 4
max_model_parallel = 16
elif model_size_in_b <= 45.0:
tp = [4, 8]
pp = [x for x in valid_pp if 2 <= x <= 4]
mbs = [1, 2]
min_model_parallel = 8
max_model_parallel = 32
elif seq_length == 8192:
if model_size_in_b <= 1.0:
tp = [1, 2]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2, 4]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2, 4]
elif model_size_in_b <= 8.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2]
elif model_size_in_b <= 13.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2]
elif model_size_in_b <= 23.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 4]
mbs = [1]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 45.0:
tp = [8]
pp = [x for x in valid_pp if 4 <= x <= 8]
mbs = [1]
min_model_parallel = 32
max_model_parallel = 64
elif seq_length == 16384:
if model_size_in_b <= 1.0:
tp = [2, 4]
mbs = [1, 2]
elif model_size_in_b <= 4.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1]
elif model_size_in_b <= 8.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1]
elif model_size_in_b <= 13.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1]
elif model_size_in_b <= 23.0:
tp = [4, 8]
pp = [x for x in valid_pp if 2 <= x <= 4]
mbs = [1]
min_model_parallel = 8
max_model_parallel = 32
elif seq_length == 32768:
if model_size_in_b <= 1.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1]
elif model_size_in_b <= 4.0:
tp = [2, 4]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1]
elif model_size_in_b <= 8.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 2]
min_model_parallel = 4
max_model_parallel = 16
mbs = [1]
elif model_size_in_b <= 13.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 2]
min_model_parallel = 4
max_model_parallel = 16
mbs = [1]
elif model_size_in_b <= 23.0:
tp = [8]
pp = [x for x in valid_pp if 2 <= x <= 4]
mbs = [1]
min_model_parallel = 16
max_model_parallel = 32
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _tp_pp_mbs_grid_gpt3_40gb(model_size_in_b: float, valid_pp: List[int]) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for GPT-3 and 40GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
tp = [1, 2, 4, 8]
pp = [1]
mbs = [1, 2, 4, 6, 8, 10, 12, 16]
min_model_parallel = 1
max_model_parallel = 8
if model_size_in_b <= 1.0:
tp = [1, 2, 4]
mbs = [1, 2, 4, 8]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4, 8]
mbs = [1, 2, 4, 8]
elif model_size_in_b <= 8.0:
tp = [2, 4, 8]
pp = [1, 2]
mbs = [1, 2, 4]
min_model_parallel = 2
elif model_size_in_b <= 13.0:
tp = [4, 8]
pp = [1, 2, 4]
mbs = [1, 2, 4]
min_model_parallel = 4
max_model_parallel = 32
elif model_size_in_b <= 23.0:
tp = [2, 4, 8]
pp = [x for x in valid_pp if 1 <= x <= 8]
min_model_parallel = 8
max_model_parallel = 64
elif model_size_in_b <= 45.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 12]
mbs = [1, 2, 4]
min_model_parallel = 16
max_model_parallel = 128
elif model_size_in_b <= 95:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 16]
mbs = [1, 2, 4]
min_model_parallel = 16
max_model_parallel = 256
elif model_size_in_b <= 130.0:
tp = [4, 8]
pp = [x for x in valid_pp if 2 <= x <= 26]
mbs = [1, 2]
min_model_parallel = 32
max_model_parallel = 512
elif model_size_in_b <= 195.0:
tp = [4, 8]
pp = [x for x in valid_pp if 2 <= x <= 32]
mbs = [1, 2]
min_model_parallel = 64
max_model_parallel = 1024
elif model_size_in_b <= 395.0:
tp = [4, 8]
pp = [x for x in valid_pp if 4 <= x <= 64]
mbs = [1, 2]
min_model_parallel = 128
max_model_parallel = 2048
elif model_size_in_b <= 790.0:
tp = [4, 8]
pp = [x for x in valid_pp if 8 <= x <= 128]
mbs = [1, 2]
min_model_parallel = 256
max_model_parallel = 4096
elif model_size_in_b <= 1100.0:
tp = [4, 8]
pp = [x for x in valid_pp if 8 <= x <= 192]
mbs = [1, 2]
min_model_parallel = 512
max_model_parallel = 8192
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _tp_pp_mbs_grid_t5_80gb(model_size_in_b: float, valid_pp: List[int]) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for T5/mT5 and 80GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
tp = [1, 2, 4, 8]
pp = [1]
mbs = [1, 2, 4, 6, 8, 12, 16]
min_model_parallel = 1
max_model_parallel = 8
if model_size_in_b <= 1.0:
tp = [1, 2]
mbs = [16, 32, 64, 128]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
mbs = [4, 6, 8, 12, 16, 24, 32, 48]
elif model_size_in_b <= 8.0:
tp = [2, 4, 8]
mbs = [4, 6, 8, 12, 16, 24, 32]
elif model_size_in_b <= 14.5:
tp = [4, 8]
mbs = [2, 4, 6, 8, 12, 16, 24]
elif model_size_in_b <= 25.9:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 4
max_model_parallel = 16
elif model_size_in_b <= 43.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 4]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 85.5:
tp = [4, 8]
pp = [x for x in valid_pp if 2 <= x <= 8]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 16
max_model_parallel = 64
elif model_size_in_b <= 165.5:
tp = [8]
pp = [x for x in valid_pp if 4 <= x <= 16]
mbs = [1, 2, 4, 6]
min_model_parallel = 32
max_model_parallel = 128
elif model_size_in_b <= 250:
tp = [8]
pp = [x for x in valid_pp if 4 <= x <= 32]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 64
max_model_parallel = 256
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _tp_pp_mbs_grid_t5_40gb(model_size_in_b: float, valid_pp: List[int]) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for T5/mT5 and 40GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
tp = [1, 2, 4, 8]
pp = [1]
mbs = [1, 2, 4, 6, 8, 12, 16]
min_model_parallel = 1
max_model_parallel = 8
if model_size_in_b <= 1.0:
tp = [1, 2]
mbs = [16, 32, 64, 128]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
mbs = [4, 8, 12, 16, 24, 32, 48]
elif model_size_in_b <= 8.0:
tp = [2, 4, 8]
mbs = [4, 6, 8, 12, 16, 24]
elif model_size_in_b <= 14.5:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 2]
mbs = [2, 4, 6, 8, 12, 16]
min_model_parallel = 4
max_model_parallel = 16
elif model_size_in_b <= 25.9:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 8]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 43.0:
tp = [4, 8]
pp = [x for x in valid_pp if 1 <= x <= 8]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 16
max_model_parallel = 32
elif model_size_in_b <= 85.5:
tp = [8]
pp = [x for x in valid_pp if 2 <= x <= 8]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 32
max_model_parallel = 64
elif model_size_in_b <= 165.5:
tp = [8]
pp = [x for x in valid_pp if 4 <= x <= 32]
mbs = [1, 2, 4]
min_model_parallel = 64
max_model_parallel = 128
elif model_size_in_b <= 250:
tp = [8]
pp = [x for x in valid_pp if 8 <= x <= 64]
mbs = [1, 2, 4]
min_model_parallel = 128
max_model_parallel = 256
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _tp_pp_mbs_grid_bert_80gb(model_size_in_b: float, valid_pp: List[int]) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for BERT and 80GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
pp = [1]
mbs = [1, 2, 3, 4, 6, 8]
min_model_parallel = 1
max_model_parallel = 8
if model_size_in_b <= 1.0:
tp = [1, 2]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4]
elif model_size_in_b <= 8.0:
tp = [2, 4, 8]
min_model_parallel = 2
elif model_size_in_b <= 13.0:
tp = [2, 4, 8]
mbs = [1, 2, 3, 4, 6]
min_model_parallel = 2
elif model_size_in_b <= 25.0:
tp = [4, 8]
mbs = [1, 2, 3, 4]
min_model_parallel = 4
elif model_size_in_b <= 46.5:
tp = [4, 8]
pp = [1, 2, 4]
mbs = [1, 2, 3, 4]
min_model_parallel = 4
max_model_parallel = 16
elif model_size_in_b <= 87.5:
tp = [4, 8]
pp = [2, 4, 6, 8]
mbs = [1, 2, 3, 4]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 165.5:
tp = [4, 8]
pp = [4, 6, 8, 16]
mbs = [2, 4, 6, 8]
min_model_parallel = 16
max_model_parallel = 128
elif model_size_in_b <= 250.5:
tp = [8]
pp = [4, 8, 16, 32]
mbs = [1, 2, 3, 4]
min_model_parallel = 32
max_model_parallel = 256
else:
raise ValueError("No BERT model larger than 250B parameters is supported.")
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _tp_pp_mbs_grid_bert_40gb(model_size_in_b: float, valid_pp: List[int]) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for BERT and 40GB GPUs.
:param float model_size_in_b: number of parameters in the model.
:param List[int] valid_pp: list of valid Pipeline Parallelism (PP) values for this config.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
"""
pp = [1]
mbs = [1, 2, 4, 6, 8]
min_model_parallel = 1
max_model_parallel = 8
if model_size_in_b <= 1.0:
tp = [1, 2, 4]
elif model_size_in_b <= 4.0:
tp = [1, 2, 4, 8]
elif model_size_in_b <= 8.0:
tp = [2, 4, 8]
mbs = [1, 2, 4]
elif model_size_in_b <= 13.0:
tp = [2, 4, 8]
mbs = [1, 2, 4]
elif model_size_in_b <= 25.0:
tp = [2, 4, 8]
pp = [1, 2]
mbs = [1, 2, 4]
min_model_parallel = 2
max_model_parallel = 16
elif model_size_in_b <= 46.5:
tp = [4, 8]
pp = [1, 2, 4, 8]
mbs = [1, 2, 3]
min_model_parallel = 8
max_model_parallel = 32
elif model_size_in_b <= 87.5:
tp = [4, 8]
pp = [2, 4, 6, 8]
mbs = [1, 2, 3]
min_model_parallel = 16
max_model_parallel = 64
elif model_size_in_b <= 165.5:
tp = [8]
pp = [4, 6, 8, 16]
mbs = [1, 2]
min_model_parallel = 32
max_model_parallel = 256
elif model_size_in_b <= 250.5:
tp = [8]
pp = [8, 16, 32]
mbs = [1, 2]
min_model_parallel = 64
max_model_parallel = 512
else:
raise ValueError("No BERT model larger than 250B parameters is supported.")
return tp, pp, mbs, min_model_parallel, max_model_parallel
def _calculate_tp_pp_mbs_grid(
model_size_in_b: float, num_layers: int, model_name: str, seq_length: int, train_cfg: omegaconf.dictconfig.DictConfig
) -> Tuple[int, int, int]:
"""
Selects grid search space for TP, PP, MBS parameters for any model, and calls the necessary
heuristics function accordingly.
:param float model_size_in_b: number of parameters in the model.
:param int num_layers: number of layers in the model config.
:param str model_name: name of the model to be used, such as gpt3, t5, mt5...
:param omegaconf.dictconfig.DictConfig train_cfg: config of the model that will be launched.
:returns: tuple (tp, pp, mbs)
WHERE
int tp is the Tensor Parallelism value to use for training.
int pp is the Pipeline Parallelism value to use for training.
int mbs is the Micro Batch Size to use for training.
int min_model_parallel is the minimum parallelism level needed.
:raises NotImplementedError: if the model_name is not one of the supported models.
"""
tp_sizes = train_cfg.get("tensor_parallel_sizes")
pp_sizes = train_cfg.get("pipeline_parallel_sizes")
min_model_parallel_size = train_cfg.get("min_model_parallel_size")
max_model_parallel_size = train_cfg.get("max_model_parallel_size")
mbs_sizes = train_cfg.get("micro_batch_sizes")
gpu_memory_gb = train_cfg.get("gpu_memory_gb")
multiplier = 1 if model_name in ["gpt3", "bert"] else 2
init_pp = [] if model_name == "gpt3" else [1]
valid_pp = init_pp + [
multiplier * x for x in range(1, num_layers + 1) if num_layers % x == 0
] # Only divisors of num_layers are possible.
if model_name == "gpt3":
if gpu_memory_gb == 80:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_gpt3_80gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp, seq_length=seq_length
)
elif gpu_memory_gb == 40:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_gpt3_40gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp
)
elif model_name in ["t5", "mt5"]:
if gpu_memory_gb == 80:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_t5_80gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp
)
elif gpu_memory_gb == 40:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_t5_40gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp
)
elif model_name == "bert":
if gpu_memory_gb == 80:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_bert_80gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp
)
elif gpu_memory_gb == 40:
tp, pp, mbs, min_model_parallel, max_model_parallel = _tp_pp_mbs_grid_bert_40gb(
model_size_in_b=model_size_in_b, valid_pp=valid_pp
)
else:
raise NotImplementedError("Model name not implemented.")
# Override the tp, pp, mbs search if indicated in the config params.
if tp_sizes is not None and tp_sizes != "auto":
tp = tp_sizes
if pp_sizes is not None and pp_sizes != "auto":
pp = pp_sizes
if mbs_sizes is not None and mbs_sizes != "auto":
mbs = mbs_sizes
if min_model_parallel_size is not None and min_model_parallel_size != "auto":
min_model_parallel = min_model_parallel_size
if max_model_parallel_size is not None and max_model_parallel_size != "auto":
max_model_parallel = max_model_parallel_size
return tp, pp, mbs, min_model_parallel, max_model_parallel
def launch_grid_search_configs(
base_dir: str, results_cfgs: List[int], model_name: str, cfg: omegaconf.dictconfig.DictConfig
) -> List[int]:
"""
Launches training jobs for the grid search in parallel. The limit of how many
jobs to launch is specified by limit_search_runs.
:param str base_dir: location where the configs are stored.
:param list results_cfgs: list of config names.
:param str model_name: name of the model to be run.
:param omegaconf.dictconfig.DictConfig cfg: the general config object.
:return: job_ids, list of job ids for all the training jobs.
:rtype: list[int]
"""
launcher_scripts_path = cfg.get("launcher_scripts_path")
search_cfg = cfg.get("search_config")
train_cfg = search_cfg.get("train_settings")
limit = train_cfg.get("limit_search_runs")
results_dir = os.path.join(train_cfg.get("logs"), "training_logs")
job_ids = []
for cfg_list in results_cfgs:
for file_name in cfg_list:
src_file = os.path.join(base_dir, file_name)
dst_dir = os.path.join(launcher_scripts_path, "conf/training", model_name, file_name)
shutil.copyfile(src_file, dst_dir)
job_id = train.run_training(file_name, model_name, results_dir, cfg)
os.remove(dst_dir)
if job_id is not None:
job_ids.append(job_id[:-1])
if len(job_ids) == limit:
return job_ids
return job_ids
def launch_throughput_measure(
dependency_list: List[str],
model_name: str,
model_size_in_b: float,
num_nodes: int,
hydra_args: str,
cfg: omegaconf.dictconfig.DictConfig,
) -> str:
"""
Launch job that measures the throughput of each run in the grid search. This
job will get scheduled with dependencies on all the job ids in dependency_list,
so it will only start running once all the jobs are finished.
:param list dependency_list: list of all the job_ids this job will depend on.
:param str model_name: name of the model, i.e. gpt3, t5, mt5.
:param float model_size_in_b: model size in billions of parameters.
:param str hydra_args: hydra override arguments in string format.
:param omegaconf.dictconfig.DictConfig cfg: general config object for the HP tool.
:return: job_id of the current job.
:rtype: str
"""
# Read config
auto_configurator_path = cfg.get("auto_configurator_path")
cluster_type = cfg.get("cluster_type")
container_mounts = cfg.get("container_mounts")
container = cfg.get("training_container")
hp_cfg = cfg.get("search_config")
base_results_dir = cfg.get("base_results_dir")
# CLUSTER parameters
cluster_cfg = cfg.get("cluster")
partition = cluster_cfg.get("partition")
account = cluster_cfg.get("account")
time_limit = "10:00"
exclusive = cluster_cfg.get("exclusive")
mem = cluster_cfg.get("mem")
overcommit = cluster_cfg.get("overcommit")
ntasks_per_node = 1
gpus_per_task = None
gpus_per_node = None
dependency = None
if dependency_list is not None and len(dependency_list) > 0:
dependency = ":".join(dependency_list)
job_name = f"{cluster_cfg.get('job_name_prefix')}latency_measure"
# Settings parameters
train_settings = hp_cfg.get("train_settings")
log_dir = train_settings.get("logs")
final_log_dir = os.path.join(log_dir, "final_result")
os.makedirs(final_log_dir, exist_ok=True)
# Process container-mounts.
mounts_str = f"{auto_configurator_path}:{auto_configurator_path},{base_results_dir}:{base_results_dir}"
mounts_str += utils.add_container_mounts(container_mounts)
flags = f"--container-image {container} " f"--container-mounts {mounts_str} " f"--no-container-mount-home "
if os.getenv("NEMO_LAUNCHER_CI"): # Whether this job is running in CI or not.
flags += f"-o {log_dir}/slurm_%j.log "
else:
flags += (
f"-o {final_log_dir}/compare_throughput_{model_size_in_b}b_{num_nodes}nodes-%j.log "
f"-e {final_log_dir}/compare_throughput_{model_size_in_b}b_{num_nodes}nodes-%j.error "
)
if cluster_type == "bcm":
new_script_path = os.path.join(auto_configurator_path, "autoconfig/scripts/compare_throughput.sh")
code_path = os.path.join(auto_configurator_path, "autoconfig/scripts/compare_throughput.py")
train_cmd = f"HYDRA_FULL_ERROR=1 python3 -u {code_path} auto_configurator_path={auto_configurator_path} search_config.train_settings.model_size_in_b={model_size_in_b} search_config={model_name}/{model_size_in_b}b search_config_value={model_name}/{model_size_in_b}b +nodes={num_nodes} base_results_dir={base_results_dir} {hydra_args} "
utils.create_slurm_file(
new_script_path=new_script_path,
cmds=[train_cmd],
job_name=job_name,
flags=flags,
dependency=dependency,
exclusive=exclusive,
mem=mem,
overcommit=overcommit,
time=time_limit,
nodes=1,
ntasks_per_node=ntasks_per_node,
gpus_per_task=gpus_per_task,
gpus_per_node=gpus_per_node,
partition=partition,
account=account,
)
if os.getenv("NEMO_LAUNCHER_CI"):
job_id = subprocess.check_output([f'sbatch {new_script_path} | tee "{log_dir}/launcher.log" '], shell=True)
else:
job_id = subprocess.check_output([f"sbatch --parsable {new_script_path}"], shell=True)
dependency = job_id.decode("utf-8")
print(f"Submitted job to select optimal throughput with job id: {dependency}")
return dependency
elif cluster_type == "bcp":
code_path = os.path.join(auto_configurator_path, "autoconfig/scripts/compare_throughput.py")
train_cmd = f"HYDRA_FULL_ERROR=1 python3 -u {code_path} auto_configurator_path={auto_configurator_path} search_config.train_settings.model_size_in_b={model_size_in_b} search_config={model_name}/{model_size_in_b}b search_config_value={model_name}/{model_size_in_b}b +nodes={num_nodes} base_results_dir={base_results_dir} {hydra_args} "
job_id = subprocess.check_output([train_cmd], shell=True)
dependency = job_id.decode("utf-8")
print(f"Submitted job to select optimal throughput with job id: {dependency}")
return dependency
return None
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/training_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to launch training jobs using nemo_megatron_launcher."""
import os
import subprocess
from autoconfig import utils
from omegaconf import OmegaConf
def run_training(file_name: str, model_name: str, results_dir: str, cfg: OmegaConf) -> int:
"""
Launch a training job for the given model name and config file, using nemo_megatron_launcher.
:param str file_name: name of the file configuration to be selected for training with nemo_megatron_launcher.
:param str model_name: model type to be run, usually gpt3, t5 or mt5.
:param str results_dir: path to the directory where the results will be stored.
:param OmegaConf cfg: OmegaConf object with full configuration for the HP tool.
:return: SLURM job_id of the training job that was launched.
:rtype: str
"""
# Copy cluster config to nemo_megatron_launcher.
launcher_scripts_path = cfg.get("launcher_scripts_path")
cluster_cfg = cfg.get("cluster")
dst = os.path.join(launcher_scripts_path, "conf/cluster/bcm.yaml")
copy_config_to_file(cluster_cfg, dst)
print(f"Copied cluster config to {dst}")
# Generate string of hydra overrides for nemo_megatron_launcher.
overrides_str = generate_overrides_str(file_name, model_name, results_dir, cfg)
nemo_megatron_ci = f"NEMO_LAUNCHER_CI=1" if bool(os.getenv("NEMO_LAUNCHER_CI")) else ""
main_path = os.path.join(launcher_scripts_path, "main.py")
cmd = f"HYDRA_FULL_ERROR=1 {nemo_megatron_ci} python3 {main_path} {overrides_str} "
# Launch job with command cmd.
try:
job_output = subprocess.check_output([cmd], shell=True).decode("utf-8")
job_id = job_output.split(" ")[-1]
except Exception as err:
job_id = None
print(err)
print(f"Submitted Training script with job id: {job_id}")
return job_id
def copy_config_to_file(cfg: OmegaConf, dst: str) -> None:
"""
Copies OmegaConf configuration to a dst file.
:param OmegaConf cfg: OmegaConfg object with the config to be stored in a file.
:param str dst: destination path to where the config will be stored. Must be a yaml file.
:return: None
"""
with open(dst, "w") as f:
OmegaConf.save(config=cfg, f=f)
def convert_to_absolute_path(path: str) -> str:
"""
Removes the /../ part from relative paths to convert them to absolute paths.
:param str path: the path that will be converted.
:return: the converted path with no /../ elements in it.
:rtype: str
"""
path_split = path.split("/")
result = []
for index, elem in enumerate(path_split):
if elem == "..":
result.pop(-1)
else:
result.append(elem)
return "/".join(result)
def generate_overrides_str(file_name: str, model_name: str, results_dir: str, cfg: OmegaConf) -> str:
"""
Generates string with hydra-like parameter overrides for nemo_megatron_launcher.
:param str file_name: name of the file configuration to be selected for training with nemo_megatron_launcher.
:param str model_name: model type to be run, usually gpt3, t5 or mt5.
:param str results_dir: path to the directory where the results will be stored.
:param OmegaConf cfg: OmegaConf object with full configuration for the HP tool.
:return: string containing all the hydra-like overrides required for the training job.
:rtype: str
"""
file_name = file_name.replace(".yaml", "")
training_model = f"{model_name}/{file_name}"
cluster_type = cfg.get("cluster_type")
container = cfg.get("training_container")
auto_configurator_path = cfg.get("auto_configurator_path")
auto_configurator_path = convert_to_absolute_path(auto_configurator_path)
launcher_scripts_path = cfg.get("launcher_scripts_path")
launcher_scripts_path = convert_to_absolute_path(launcher_scripts_path)
data_dir = cfg.get("data_dir")
container_mounts = cfg.get("container_mounts", "null")
api_key_file = cfg.get("wandb").get("api_key_file")
if api_key_file is None:
api_key_file = "null"
# Process container-mounts.
mounts_str = f"{auto_configurator_path}:{auto_configurator_path},{results_dir}:{results_dir}"
mounts_str += utils.add_container_mounts(container_mounts)
overrides_str = (
f"training={training_model} "
f"stages=[training] "
f"cluster_type={cluster_type} "
f"base_results_dir={results_dir} "
f"\"container='{container}'\" "
f"launcher_scripts_path={launcher_scripts_path} "
f"data_dir={data_dir} "
f"training.exp_manager.create_checkpoint_callback=False "
f"container_mounts=\[{mounts_str}\] "
f"wandb_api_key_file={api_key_file} "
)
return overrides_str
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import itertools
import json
import os
import random
import subprocess
from autoconfig import utils
NEMO_LAUNCHER_DEBUG = os.getenv("NEMO_LAUNCHER_DEBUG", "False").lower() in ("true", "t", "1")
def nodes_necessary(gpus_per_node, tp, pp):
if tp > gpus_per_node:
if tp % gpus_per_node != 0:
return 0
else:
return max(pp, pp * tp // gpus_per_node)
else:
return pp
def get_vocabulary_size(base_cfg, cfg):
vocab_path_cfg = base_cfg["model"]["tokenizer"]["vocab_file"]
vocab_path = vocab_path_cfg.format(data_dir=cfg.data_dir)[1:]
try:
with open(vocab_path) as f:
data = json.load(f)
vocabulary_size = len(data)
print(f"Vocabulary loaded from {vocab_path} with size {vocabulary_size}")
divider = base_cfg["model"]["make_vocab_size_divisible_by"]
if divider > 1:
new_vocabulary_size = divider * (vocabulary_size // divider + 1)
if new_vocabulary_size != vocabulary_size:
print(f"make_vocab_size_divisible_by set so vocabulary rounded " f"to {new_vocabulary_size}")
return new_vocabulary_size
else:
return vocabulary_size
else:
return vocabulary_size
except IOError as io:
print("Vocabulary open error", io)
print("FAILED TO LOAD VOCABULARY FOR TOKENIZER - set to default 51200")
return 51200
def filter_configuration(base_cfg, cfg, tp, pp, gpus_per_node):
attention_heads = base_cfg["model"]["num_attention_heads"]
num_layers = base_cfg["model"]["num_layers"]
if attention_heads % tp != 0:
print(
f"FasterTransformer invalid configuration "
f"TENSOR_PARALLEL={tp} "
f"PIPELINE_PARALLEL={pp} ignored due to "
f"base_cfg[model][num_attention_heads]={attention_heads}."
)
return False
elif num_layers % pp != 0:
print(
f"FasterTransformer invalid configuration "
f"TENSOR_PARALLEL={tp} "
f"PIPELINE_PARALLEL={pp} ignored due to "
f"base_cfg[model][num_layers]={num_layers}."
)
return False
elif pp == 1 or (pp > 1 and tp >= gpus_per_node):
return nodes_necessary(gpus_per_node, tp, pp) > 0
print(f"FasterTransformer partial node configuration " f"TENSOR_PARALLEL={tp} " f"PIPELINE_PARALLEL={pp} ignored.")
return False
def configure_fastertransformer(base_cfg, cfg, tp, pp, bs, destination):
max_seq_len = (
cfg.search_config.inference_settings.benchmark.input_len
+ cfg.search_config.inference_settings.benchmark.output_len
)
inter_size = base_cfg["model"]["hidden_size"] * 4
size_per_head = base_cfg["model"]["hidden_size"] // base_cfg["model"]["num_attention_heads"]
vocabulary_size = get_vocabulary_size(base_cfg, cfg)
command = [
f"python3",
f"{cfg.fastertransformer_path}/examples/pytorch/gpt/utils/generate_gpt_config.py",
f"--max_seq_len {max_seq_len}",
f"--beam_width {cfg.search_config.inference_settings.benchmark.beam_width}",
f"--head_num {base_cfg['model']['num_attention_heads']}",
f"--size_per_head {size_per_head}",
f"--inter_size {inter_size}",
f"--num_layer {base_cfg['model']['num_layers']}",
f"--vocab_size {vocabulary_size}",
f"--data_type {cfg.search_config.inference_settings.run.data_type}",
f"-topk {cfg.search_config.inference_settings.benchmark.topk}",
f"-topp {cfg.search_config.inference_settings.benchmark.topp}",
f"--tensor_para_size {tp}",
f"--pipeline_para_size {pp}",
f"--request_batch_size {bs}",
f"--request_output_len {cfg.search_config.inference_settings.benchmark.output_len}",
f"--destination {destination}",
]
print(f"Generated config for FasterTransformer to: {destination} ")
result = os.system(" ".join(command))
if result != 0:
raise Exception("generate_gpt_config.py failed")
def generate_start_ids(base_cfg, cfg, bs, destination):
command = [
f"python3",
f"{cfg.fastertransformer_path}/examples/pytorch/gpt/utils/generate_start_ids.py",
f"-max_batch_size {bs}",
f"-max_input_length {cfg.search_config.inference_settings.benchmark.input_len}",
f"--destination {destination}",
]
print(f"Generated start_ids for FasterTransformer to: {destination}")
result = os.system(" ".join(command))
if result != 0:
raise Exception("generate_start_ids.py failed")
def generate_submission(base_cfg, cfg, job_name, nodes, tasks_per_node, ini, csv, submission_file, mounts_str):
cluster_job_name = f"{cfg.cluster.job_name_prefix}{job_name}"
gpus_per_task = cfg.cluster.gpus_per_task
gpus_per_node = cfg.cluster.gpus_per_node
path_list = submission_file.split("/")
path_list[-1] = "log_job_%j.out"
output = "/".join(path_list)
path_list[-1] = "log_job_%j.err"
error = "/".join(path_list)
bash_commands = [
f"export NCCL_LAUNCH_MODE=GROUP",
"echo ${SLURM_PROCID}.${SLURM_LOCALID}@$(hostname)",
f"/opt/FasterTransformer/build/bin/multi_gpu_gpt_example {ini} {csv}",
]
bash_command = [" && ".join(bash_commands)]
flags = [
"--mpi pmix",
f"--output {output}",
f"--error {error}",
f"--container-image {cfg.training_container}",
f"--container-mounts {mounts_str}",
f"--unbuffered",
]
flags_str = " ".join(flags)
utils.create_slurm_file(
new_script_path=submission_file,
cmds=bash_command,
job_name=cluster_job_name,
flags=flags_str,
time=cfg.search_config.inference_settings.run.time_limit,
nodes=nodes,
ntasks_per_node=tasks_per_node,
gpus_per_task=gpus_per_task,
gpus_per_node=gpus_per_node,
partition=cfg.cluster.partition,
account=cfg.cluster.account,
output=output,
comment=f"'FasterTransformer {job_name}'",
)
def submit_job(submission_file, results_dir):
if os.getenv('NEMO_LAUNCHER_CI'):
job_id = subprocess.check_output(
[f'sbatch {submission_file} | tee "{results_dir}/../launcher.log" '], shell=True
)
else:
if not NEMO_LAUNCHER_DEBUG:
job_id = subprocess.check_output([f"sbatch --parsable {submission_file}"], shell=True)
else:
job_id = str(random.randint(10000, 99999)).encode("utf-8")
dependency = job_id.decode("utf-8").split()[-1]
return dependency
def search_inference_config(base_cfg, cfg):
"""
Main function to launch a inference sweep job, with the config given in cfg.
"""
# Prepare global folders
inference_results_dir = os.path.join(cfg.search_config.inference_settings.run.results_dir, "inference")
os.makedirs(inference_results_dir, exist_ok=True)
# Process container-mounts.
auto_configurator_path = cfg.get("auto_configurator_path")
base_results_dir = cfg.get("base_results_dir")
container_mounts = cfg.get("container_mounts")
mounts_str = f"{auto_configurator_path}:{auto_configurator_path},{base_results_dir}:{base_results_dir}"
mounts_str += utils.add_container_mounts(container_mounts)
assert (
cfg.search_config.inference_settings.run.model_type == "gpt3"
), "Only GPT-3 models are currently supported for the inference HP search."
cluster_gpus_per_task = cfg.cluster.gpus_per_task
cluster_gpus_per_node = cfg.cluster.gpus_per_node
all_configurations = itertools.product(
cfg.search_config.inference_settings.run.tensor_parallel_sizes,
cfg.search_config.inference_settings.run.pipeline_parallel_sizes,
cfg.search_config.inference_settings.benchmark.batch_sizes,
)
gpus_per_node = cfg.search_config.inference_settings.run.gpus_per_node
configurations = list(
[
(tp, pp, bs)
for tp, pp, bs in all_configurations
if filter_configuration(base_cfg, cfg, tp, pp, gpus_per_node)
]
)
if len(configurations) == 0:
print("ALL FasterTransformer CONFIGURATIONS NOT VALID FOR BENCHMARK")
return
job_ids = []
for tp, pp, bs in configurations:
benchmark_model_name = f"{cfg.search_config.inference_settings.run.model_train_name}_tp{tp}_pp{pp}_bs{bs}"
model_dir = os.path.join(inference_results_dir, benchmark_model_name)
os.makedirs(model_dir, exist_ok=True)
# Generate .ini file for FasterTransformer.
config_ini_file = os.path.join(model_dir, "config.ini")
configure_fastertransformer(base_cfg, cfg, tp, pp, bs, config_ini_file)
# Generate start ids for this model.
config_start_ids_file = os.path.join(model_dir, "start_ids.csv")
generate_start_ids(base_cfg, cfg, bs, config_start_ids_file)
# Generate the submission Slurm job.
submission_file = os.path.join(model_dir, "submission_script.sh")
job_name = f"benchmark_FT_{benchmark_model_name}"
num_nodes = nodes_necessary(gpus_per_node, tp, pp)
tasks_per_node = min(gpus_per_node, tp)
generate_submission(
base_cfg=base_cfg,
cfg=cfg,
job_name=job_name,
nodes=num_nodes,
tasks_per_node=tasks_per_node,
ini=config_ini_file,
csv=config_start_ids_file,
submission_file=submission_file,
mounts_str=mounts_str,
)
dependency = submit_job(submission_file, inference_results_dir)
job_ids.append(dependency)
print()
# Prepare final job config files.
results_dir = os.path.join(inference_results_dir, "final_summary")
os.makedirs(results_dir, exist_ok=True)
cfg_fields = ["TP", "PP", "BS"]
configurations_file_name = os.path.join(results_dir, "inference_sweep_configs.csv")
with open(configurations_file_name, 'w') as configs_file:
cfg_writer = csv.writer(configs_file)
cfg_writer.writerow(cfg_fields)
cfg_writer.writerows(configurations)
# Prepare final summary job.
dependency_string = ":".join(job_ids)
summary_submission_file = os.path.join(results_dir, "job_submission.sh")
summary_job_output = os.path.join(results_dir, f"log_final_summary_job_%j.out")
summary_job_error = os.path.join(results_dir, f"log_final_summary_job_%j.err")
summary_job_result = os.path.join(results_dir, "final_output.csv")
summary_name = f"{cfg.search_config.inference_settings.run.model_train_name}_summary"
summary_job_name = f"{cfg.cluster.job_name_prefix}{summary_name}_job"
summary_script_path = f"{cfg.auto_configurator_path}/autoconfig/inference_summary.py"
summary_command_elem = [
f"python3 {summary_script_path}",
f"--model-prefix {cfg.search_config.inference_settings.run.model_train_name}",
f"--configs-csv {configurations_file_name}",
f"--workspace {inference_results_dir}",
f"--output {summary_job_result}",
]
echo_command_elem = f"cat {summary_job_result}"
bash_command = [" ".join(summary_command_elem) + " && " + echo_command_elem]
summary_flags = [
f"--output {summary_job_output}",
f"--error {summary_job_error}",
f"--container-image {cfg.training_container}",
f"--container-mounts {mounts_str}",
f"--unbuffered",
]
summary_flags_str = " ".join(summary_flags)
utils.create_slurm_file(
new_script_path=summary_submission_file,
cmds=bash_command,
job_name=summary_job_name,
flags=summary_flags_str,
time=cfg.search_config.inference_settings.run.time_limit,
nodes=1,
ntasks_per_node=1,
gpus_per_task=cluster_gpus_per_task,
gpus_per_node=cluster_gpus_per_node,
partition=cfg.cluster.partition,
account=cfg.cluster.account,
output=summary_job_output,
comment=f"'FasterTransformer {summary_job_name}'",
dependency=dependency_string,
)
submit_job(summary_submission_file, inference_results_dir)
print("Submitted job to generate the final summary.")
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/inference_sweep.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import glob
import os
def scrap_latency(file_name):
with open(file_name, "r") as file_handle:
for line in file_handle:
if line.find("FT-CPP-decoding-beamsearch-time") != -1:
elements = line.split(" ")
return float(elements[-2])
return "FAILURE"
def main():
parser = argparse.ArgumentParser(description="Generate summary from inference benchmark")
parser.add_argument("--model-prefix", help="File prefix for logs", required=True)
parser.add_argument("--configs-csv", help="Path to CSV file with profile configurations", required=True)
parser.add_argument("--workspace", help="Path to workspace folder", required=True)
parser.add_argument("--output", help="Path to save output summary", required=True)
args = parser.parse_args()
with open(args.configs_csv, 'r') as csv_file:
config_lines = list(csv.reader(csv_file))
rows = []
for tp, pp, bs in [l for l in config_lines[1:] if len(l) == 3]:
file_prefix = f"{args.workspace}/{args.model_prefix}_tp{tp}_pp{pp}_bs{bs}/log_job*.out"
files = [f for f in glob.glob(file_prefix) if os.path.isfile(f)]
if len(files) != 1:
latency = "MISSING_LOG"
else:
latency = scrap_latency(files[0])
gpu_norm_throughput = round(int(bs) * 1000.0 / float(latency) / int(tp) / int(pp), 3)
row = [tp, pp, bs, latency, gpu_norm_throughput]
rows.append(row)
header = ["TP", "PP", "BS", "Latency [ms]", "Throughput per GPU [samples/sec/gpu]"]
with open(args.output, 'w') as output_file:
output_writer = csv.writer(output_file)
output_writer.writerow(header)
output_writer.writerows(rows)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/inference_summary.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Optional
import omegaconf
from autoconfig.base_config import calculate_model_size, generate_base_config
from autoconfig.inference_sweep import search_inference_config
from autoconfig.training_config import search_training_config
SUPPORTED_MODELS = ["gpt3", "t5", "mt5", "bert"]
def search_config(cfg: omegaconf.dictconfig.DictConfig, hydra_args: Optional[str] = None):
"""
Main function that implements the entire pipeline to search the optimal
model config and launch the grid searches for both training and inference
constraints.
:param omegaconf.dictconfig.DictConfig cfg: main hydra config object for the HP tool.
:param Optional[str] hydra_args: hydra override arguments in string format.
:return: None
"""
model_type = cfg.get("search_config_value")
model_name, model_size = model_type.split("/")
assert model_name in SUPPORTED_MODELS, f"search_config must be set to one of {SUPPORTED_MODELS}/<model_size>"
# Read config
hp_cfg = cfg.get("search_config")
train_cfg = hp_cfg.get("train_settings")
nodes = train_cfg.get("num_nodes")
gpus_per_node = train_cfg.get("gpus_per_node")
gpu_memory_gb = train_cfg.get("gpu_memory_gb")
max_training_days = train_cfg.get("max_training_days")
max_minutes_per_run = train_cfg.get("max_minutes_per_run")
model_size_in_b = train_cfg.get("model_size_in_b")
vocab_size = train_cfg.get("vocab_size")
tflops_per_gpu = train_cfg.get("tflops_per_gpu")
num_tokens_in_b = train_cfg.get("num_tokens_in_b")
seq_length = train_cfg.get("seq_length")
custom_cfg = train_cfg.get("custom_config")
gpu_count = nodes * gpus_per_node
assert isinstance(gpu_count, int) and gpu_count > 0, "nodes * gpus_per_node must be an int larger than zero."
assert isinstance(gpu_memory_gb, int) and gpu_memory_gb in (40, 80,), "gpu_memory_gb can only be 40 or 80."
assert (
isinstance(max_minutes_per_run, int) and max_minutes_per_run >= 10
), "max_minutes_per_run must be an int and be at least 10 minutes."
# Logging config
log_dir = train_cfg.get("logs")
os.makedirs(log_dir, exist_ok=True)
os.makedirs(os.path.join(log_dir, "candidate_configs"), exist_ok=True)
os.makedirs(os.path.join(log_dir, "training_logs"), exist_ok=True)
os.makedirs(os.path.join(log_dir, "final_result"), exist_ok=True)
# Calculate model size
model_size_in_b = calculate_model_size(
gpu_count=gpu_count,
max_training_days=max_training_days,
model_size_in_b=model_size_in_b,
tflops_per_gpu=tflops_per_gpu,
num_tokens_in_b=num_tokens_in_b,
model_name=model_name,
)
cfg.search_config.train_settings.model_size_in_b = model_size_in_b
# Generate base config for the given model size
base_cfg = generate_base_config(
model_size_in_b=model_size_in_b,
nodes=nodes,
gpus_per_node=gpus_per_node,
gpu_memory_gb=gpu_memory_gb,
max_training_days=max_training_days,
num_tokens_in_b=num_tokens_in_b,
vocab_size=vocab_size,
seq_length=seq_length,
custom_cfg=custom_cfg,
cfg=cfg,
model_name=model_name,
)
# Launch grid search for training constraints
if cfg.get("run_training_hp_search"):
search_training_config(base_cfg, model_size_in_b, model_name, hydra_args, cfg)
# Launch grid search for inference constraints
if cfg.get("run_inference_hp_search"):
search_inference_config(
base_cfg=base_cfg, cfg=cfg,
)
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/search_config.py |
import csv
import os
import re
import sys
from shutil import copyfile
import hydra
import pandas as pd
from omegaconf import OmegaConf
from tensorboard.backend.event_processing import event_accumulator
@hydra.main(config_path="../../conf", config_name="config")
def main(cfg):
auto_configurator_path = cfg.auto_configurator_path
settings_cfg = cfg.search_config.train_settings
model_size = settings_cfg.model_size_in_b
output_top_n = settings_cfg.output_top_n
nodes = cfg.get("nodes")
training_logs = os.path.join(settings_cfg.get("logs"), "training_logs")
candidate_configs = os.path.join(settings_cfg.get("logs"), "candidate_configs")
final_result_logs = os.path.join(settings_cfg.get("logs"), "final_result")
result_columns = [
"Model Name",
"Model Size",
"Seq Length",
"TP",
"PP",
"MBS",
"Act Ckpt Layers",
"Act Ckpt Micro Bathes",
"Act Ckpt Layers per Pipeline",
"Num Layers",
"Hidden Size",
"FFN Hidden Size",
"GBS",
"Nodes",
"GPUs per Node",
"Time per Step",
"Samples per Second",
"Model TFLOPS / GPU",
"Model TFLOPS Aggregate",
"Config Name",
]
error_columns = [
"Model Name",
"Model Size",
"Seq Length",
"TP",
"PP",
"MBS",
"Act Ckpt Layers",
"Act Ckpt Micro Bathes",
"Act Ckpt Layers per Pipeline",
"Num Layers",
"Hidden Size",
"FFN Hidden Size",
"GBS",
"Nodes",
"GPUs per Node",
"Error Message",
]
result = []
errors = []
dirs = os.listdir(training_logs)
for candidate_dir in dirs:
config_path = os.path.join(candidate_configs, f"{candidate_dir}.yaml")
candidate_cfg = OmegaConf.load(config_path)
model_cfg = candidate_cfg.get("model")
encoder_cfg = model_cfg.get("encoder")
decoder_cfg = model_cfg.get("decoder")
data_cfg = model_cfg.get("data")
trainer_cfg = candidate_cfg.get("trainer")
model_name = candidate_cfg.get("run").get("name").split("_")[0]
gbs = model_cfg.get("global_batch_size")
enc_seq_len = (
model_cfg.get("encoder_seq_length") if model_name in ("gpt3", "bert") else model_cfg.get("seq_length")
)
dec_seq_len = data_cfg.get("seq_length_dec")
if model_name in ("gpt3", "bert"):
hs = model_cfg.get("hidden_size")
ffn_hs = None
layers = model_cfg.get("num_layers")
act_ckpt_layers = model_cfg.get("activations_checkpoint_num_layers")
num_mbs_act = model_cfg.get("num_micro_batches_with_partial_activation_checkpoints")
act_per_pipe = model_cfg.get("activations_checkpoint_layers_per_pipeline")
else:
hs = encoder_cfg.get("hidden_size")
ffn_hs = encoder_cfg.get("ffn_hidden_size")
layers = encoder_cfg.get("num_layers") + decoder_cfg.get("num_layers")
act_ckpt_layers = encoder_cfg.get("activations_checkpoint_num_layers") + decoder_cfg.get(
"activations_checkpoint_num_layers"
)
num_mbs_act = None
act_per_pipe = None
tp = model_cfg.get("tensor_model_parallel_size")
pp = model_cfg.get("pipeline_model_parallel_size")
mbs = model_cfg.get("micro_batch_size")
vocab = settings_cfg.get("vocab_size")
gpus_per_node = trainer_cfg.get("devices")
if f"{nodes}nodes" not in candidate_dir:
continue
for f in os.listdir(os.path.join(training_logs, candidate_dir)):
if f.endswith(".err"):
error_file = os.path.join(training_logs, candidate_dir, f)
error = find_error(error_file)
if error:
errors.append([
model_name,
model_size,
enc_seq_len,
tp,
pp,
mbs,
act_ckpt_layers,
num_mbs_act,
act_per_pipe,
layers,
hs,
ffn_hs,
gbs,
nodes,
gpus_per_node,
error,
])
files = os.listdir(os.path.join(training_logs, candidate_dir, "results"))
for f in files:
if f[:6] == "events":
event_file = os.path.join(training_logs, candidate_dir, "results", f)
ea = event_accumulator.EventAccumulator(event_file)
ea.Reload()
try:
timing_list = ea.Scalars("train_step_timing")
if len(timing_list) <= 6:
continue
timing_list = [x.value for x in timing_list[5:]]
avg_global_step_time = round(sum(timing_list) / len(timing_list), 4)
samples_per_s = round(gbs / avg_global_step_time, 2)
m_tflops, m_tflops_gpu = calculate_tflops(
model_name=model_name,
gbs=gbs,
enc_seq_len=enc_seq_len,
dec_seq_len=dec_seq_len,
hs=hs,
ffn_hs=ffn_hs,
layers=layers,
vocab=vocab,
nodes=nodes,
gpus_per_node=gpus_per_node,
time_per_step=avg_global_step_time,
)
config_name = f"tp{tp}_pp{pp}_mbs{mbs}_act_{act_ckpt_layers}_num_mbs_act_{num_mbs_act}_act_per_pipe_{act_per_pipe}"
result.append(
[
model_name,
model_size,
enc_seq_len,
tp,
pp,
mbs,
act_ckpt_layers,
num_mbs_act,
act_per_pipe,
layers,
hs,
ffn_hs,
gbs,
nodes,
gpus_per_node,
avg_global_step_time,
samples_per_s,
m_tflops_gpu,
m_tflops,
config_name,
]
)
finally:
continue
result.sort(key=lambda x: x[14])
print(f"Top {min(output_top_n, len(result))} configs sorted from fastest to slowest:")
for i, res in enumerate(result):
print(f"Config #{i+1}: {res[-1]} with {res[14]:.4f}s per global step.")
if i + 1 == output_top_n:
break
top_config = f"{model_name}_{model_size}b_{nodes}nodes_tp_{result[0][2]}_pp_{result[0][3]}_mbs_{result[0][4]}_act_ckpt_{result[0][5]}_num_mbs_act_{result[0][6]}_act_per_pipe_{result[0][7]}"
print("\n==================================================")
print(f"Optimal config: {top_config} with {result[0][14]:.4f}s per global step.")
print(f"Saving config to {final_result_logs}/optimal_config_{model_size}b_{nodes}nodes.yaml.")
print("==================================================\n")
# Save results as a CSV file.
os.makedirs(final_result_logs, exist_ok=True)
result_df = pd.DataFrame(result, columns=result_columns)
result_df.to_csv(os.path.join(final_result_logs, f"final_summary_{nodes}nodes.csv"), index=False)
error_df = pd.DataFrame(errors, columns=error_columns)
error_df.to_csv(os.path.join(final_result_logs, f"failed_jobs_{nodes}nodes.csv"), index=False)
copyfile(
os.path.join(candidate_configs, f"{top_config}.yaml"),
os.path.join(final_result_logs, f"optimal_config_{model_size}b_{nodes}nodes.yaml"),
)
def calculate_tflops(
model_name, gbs, enc_seq_len, dec_seq_len, hs, ffn_hs, layers, vocab, nodes, gpus_per_node, time_per_step,
):
"""Calculates model and hardware TFLOPS for each model.
GPT-3 Formulas:
Model FLOPs = (24𝐵𝑠ℎ^2 + 4𝐵𝑠^2ℎ) x (3 x num_layers) + 6𝐵𝑠ℎ
T5/mT5 Formula:
Model FLOPs =
Bert Formula:
Model FLOPs = 72BLsh^2 * ( 1 + (s/6h) + (v/12hL))
"""
if model_name == "gpt3":
# Model FLOPS calculation
model_flops = (
(24 * gbs * enc_seq_len * hs * hs + 4 * gbs * enc_seq_len * enc_seq_len * hs) * (3 * layers)
+ (6 * gbs * enc_seq_len * hs * vocab)
) / time_per_step
model_flops_per_gpu = model_flops / (nodes * gpus_per_node)
model_tflops = model_flops / 1e12
model_tflops_per_gpu = model_flops_per_gpu / 1e12
elif model_name == "bert":
model_flops = (
72 * gbs * layers * enc_seq_len * hs * hs * (1 + (enc_seq_len / (6 * hs)) + (vocab / (12 * hs * layers)))
) / time_per_step
model_flops_per_gpu = model_flops / (nodes * gpus_per_node)
model_tflops = model_flops / 1e12
model_tflops_per_gpu = model_flops_per_gpu / 1e12
elif model_name in ["t5", "mt5"]:
# Encoder Layer FLOPS: include self attention + MLP
flops_self_attn_enc = 8 * gbs * enc_seq_len * hs * hs + 4 * gbs * enc_seq_len * enc_seq_len * hs
flops_mlp_enc = 6 * gbs * enc_seq_len * hs * ffn_hs # geglu needs two gemms for h -> ffn_h
flops_enc_layer = flops_self_attn_enc + flops_mlp_enc
# Decoder Layer FLOPS: inlcude self_attn + cross_attn + MLP
flops_self_attn_dec = 8 * gbs * dec_seq_len * hs * hs + 4 * gbs * dec_seq_len * dec_seq_len * hs
flops_cross_attn_dec = (
4 * gbs * enc_seq_len * hs * hs
+ 4 * gbs * dec_seq_len * hs * hs
+ 4 * gbs * enc_seq_len * dec_seq_len * hs
)
flops_mlp_dec = 6 * gbs * dec_seq_len * hs * ffn_hs # geglu needs two gemms for h -> ffn_h
flops_dec_layer = flops_self_attn_dec + flops_cross_attn_dec + flops_mlp_dec
# FLOPs of logits layer in the head
flops_logits = 2 * gbs * dec_seq_len * hs * vocab
# FLOPs of fprop
flops_fprop = (flops_enc_layer + flops_dec_layer) * (layers // 2) + flops_logits
# FLOPs of each train step (FLOPs of bprop is 2*fprop)
model_flops = 3 * flops_fprop / time_per_step
model_flops_per_gpu = model_flops / (nodes * gpus_per_node)
model_tflops = model_flops / 1e12
model_tflops_per_gpu = model_flops_per_gpu / 1e12
else:
raise NotImplementedError("Model type not supported.")
return round(model_tflops, 2), round(model_tflops_per_gpu, 2)
def find_error(error_file: str, errors: list = ["CUDA out of memory"]):
"""
Finds the error among job output.
:param list errors: list of "popular" errors.
:param str error_file: path to the job output.
:return: str error if job has been failed because of one of listed errors and None if not.
:rtype: str
"""
error = None
with open(error_file, 'r') as f:
output = f.read()
for e in errors:
if e in output:
error = e
return error
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | auto_configurator/autoconfig/scripts/compare_throughput.py |
NeMo-Megatron-Launcher-master | launcher_scripts/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import hydra
import omegaconf
from nemo_launcher.core.data_stages import CustomDataPreparation, MC4DataPreparation, PileDataPreparation
from nemo_launcher.core.export_stages import Export
from nemo_launcher.core.stages import (
AdapterLearning,
Conversion,
EvalHarnessEvaluation,
FineTuning,
IA3Learning,
NeMoEvaluation,
PromptLearning,
Training,
)
from nemo_launcher.core.rlhf_stages import RLHFRewardModel, RLHFPPO
omegaconf.OmegaConf.register_new_resolver("multiply", lambda x, y: x * y, replace=True)
omegaconf.OmegaConf.register_new_resolver("divide_ceil", lambda x, y: int(math.ceil(x / y)), replace=True)
omegaconf.OmegaConf.register_new_resolver("divide_floor", lambda x, y: int(math.floor(x / y)), replace=True)
STR2STAGECLASS = {
"training": Training,
"fine_tuning": FineTuning,
"prompt_learning": PromptLearning,
"adapter_learning": AdapterLearning,
"ia3_learning": IA3Learning,
"conversion": Conversion,
"export": Export,
"evaluation": {
EvalHarnessEvaluation: ["gpt3", "prompt_gpt3"],
NeMoEvaluation: ["t5", "mt5", "prompt_t5", "prompt_mt5", "adapter_t5", "adapter_gpt3", "ia3_t5", "ia3_gpt3"],
},
"data_preparation": {
PileDataPreparation: ["gpt3", "t5", "bert"],
MC4DataPreparation: ["mt5"],
CustomDataPreparation: ["generic"],
},
"rlhf_rm": RLHFRewardModel,
"rlhf_ppo": RLHFPPO,
}
@hydra.main(config_path="conf", config_name="config")
def main(cfg):
requested_stages = cfg.get("stages")
dependency = None
for stage_name in requested_stages:
stage_class = STR2STAGECLASS[stage_name]
if isinstance(stage_class, dict):
stage_config_choice = cfg.get(f"{stage_name}_config")
choice_model_type = stage_config_choice.rsplit("/", 1)[0]
for cls, model_types in stage_class.items():
if choice_model_type in model_types:
stage_class = cls
break
if dependency is not None:
cfg[stage_name]["run"]["dependency"] = dependency
stage = stage_class(cfg)
job_id = stage.run()
job_path = stage.get_job_path()
command = " \\\n ".join(sys.argv)
with open(job_path.folder / "launcher_cmd.log", "w") as f:
f.write(command)
if job_id:
dependency = f"afterany:{job_id}"
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/main.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import glob, os
import logging
import json
import re
from pathlib import Path
from typing import Any, Dict, List, Optional
import omegaconf
from nemo_launcher.core.launchers import AutoLauncher
from nemo_launcher.utils.job_utils import JobPaths
from omegaconf import OmegaConf
from nemo_launcher.core.stages import NeMoStage, clean_command_groups
class RLHFRewardModel(NeMoStage):
"""Stage class of rlhf_rm with NeMo scripts"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "rlhf_rm"
self.stage_cfg = cfg.get("rlhf_rm")
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"gpt3": self._rlhf_code_path / "examples/nlp/gpt/train_reward_model.py",
}
return model_type_to_code_path[model_type]
class RLHFPPO(NeMoStage):
"""Stage class of rlhf_rm with NeMo scripts"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "rlhf_ppo"
self.stage_cfg = cfg.get("rlhf_ppo")
def get_env_vars(self) -> Dict:
"""
Set up dictionary for environment variables
The environment variables from hydra config will be set inside the job scripts.
For Example:
Set `env_vars.NVTE_BIAS_DROPOUT_FUSION=1` while calling nemo_launcherlauncher-scripts,
`NVTE_BIAS_DROPOUT_FUSION=1` will be set while running the job.
:return: a dictionary of env vars while running the job.
:rtype: Dict
"""
env_vars = {k: v for k, v in self.cfg.get("env_vars").items() if v is not None}
return env_vars
def _make_cluster_parameters(self, cluster: str) -> Dict:
"""
Make a cluster-specific parameters for jobs on different clusters.
Current clusters include bcm(slurm).
For example for bcm, it will return slurm parameters:
{'job_name': 'some_name', 'nodes': 2, 'ntasks_per_node': 8, ...}
:return: a dictionary of cluster parameters, e.g. `ntasks_per_node`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
time_limit = run_cfg.get("time_limit")
dependency = run_cfg.get("dependency")
subcfg_list = ["reward_model_server", "initial_policy_server", "critic_server", "actor"]
job_name = run_cfg.get("name")
nodes = []
for subcfg in subcfg_list:
nodes.append(stage_cfg.get(subcfg).get("trainer").get("num_nodes"))
ntasks_per_node = []
for subcfg in subcfg_list:
ntasks_per_node.append(stage_cfg.get(subcfg).get("trainer").get("devices"))
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
setup = None
env_vars = self.get_env_vars()
for i in range(3):
env_vars[f"HETJOB{i}_HOST"] = f"$(scontrol show hostnames=$SLURM_JOB_NODELIST_HET_GROUP_{i} | head -n1)"
if env_vars:
setup = [f"export {k}={v}" for k, v in env_vars.items()]
cluster_parameters = {}
shared_parameters = {
"job_name": job_name,
"nodes": nodes,
"time": time_limit,
"ntasks_per_node": ntasks_per_node,
"setup": setup,
"heterogeneous": True,
}
if cluster == "bcm":
cluster_cfg = cfg.get("cluster")
slurm_cfg = {**copy.deepcopy(cluster_cfg)}
job_name_prefix = slurm_cfg.pop("job_name_prefix")
cluster_parameters = {**slurm_cfg}
cluster_parameters.update(
{
**shared_parameters,
"dependency": dependency,
"container_image": container_image,
"container_mounts": container_mounts,
}
)
cluster_parameters["job_name"] = job_name_prefix + cluster_parameters["job_name"]
return cluster_parameters
def _cuda_visible_devices(self, cfg_name) -> str:
ntasks_per_node = self.stage_cfg.run.get("ntasks_per_node")
if ntasks_per_node is None:
ntasks_per_node = self.stage_cfg.get(cfg_name).trainer.get("devices", 1)
return (
"CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7"
if ntasks_per_node == 8
else f"CUDA_VISIBLE_DEVICES={','.join(map(str, range(ntasks_per_node)))}"
)
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
command_groups = []
subcfg_list = ["reward_model_server", "initial_policy_server", "critic_server", "actor"]
code_path_list = [
self._rlhf_code_path / "examples/nlp/gpt/serve_reward_model.py",
self._rlhf_code_path / "examples/nlp/gpt/serve_initial_policy.py",
self._rlhf_code_path / "examples/nlp/gpt/serve_ppo_critic.py",
self._rlhf_code_path / "examples/nlp/gpt/train_gpt_ppo_actor.py",
]
for i, code_path in enumerate(code_path_list):
command = self._make_wandb_login_command()
command += self._make_nemo_path_command()
core_command = [
self._cuda_device_max_connections,
self._cuda_visible_devices(subcfg_list[i]),
self._set_ln_sm_margin,
self._skip_ag_overlap,
self._nvte_bias_gelu_nvfusion,
]
nemo_cammnd = [
f"python3 -u {code_path} ",
f"--config-path={stage_cfg_path.parents[0]}",
f"--config-name={stage_cfg_path.name}",
]
if i == 3:
nemo_cammnd += [
"actor.model.rlhf.reward_model.ip=${HETJOB0_HOST}",
"actor.model.rlhf.initial_policy.ip=${HETJOB1_HOST}",
"actor.model.rlhf.critic.ip=${HETJOB2_HOST}",
]
nemo_call_string = " \\\n ".join(nemo_cammnd)
core_command += [
self._make_api_log_command_prefix(results_dir=self.get_job_path().results_folder),
self._make_nsys_command_prefix(results_dir=self.get_job_path().results_folder),
nemo_call_string,
]
core_command_string = " ".join([c for c in core_command if c])
command += [core_command_string]
command_groups.append(command)
command_groups = clean_command_groups(command_groups)
return command_groups
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/rlhf_stages.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from pathlib import Path
from typing import Dict, List, Optional
import omegaconf
from nemo_launcher.core.launchers import AutoLauncher
from nemo_launcher.core.stages import NemoMegatronStage, clean_command_groups, create_args_list
from nemo_launcher.utils.file_utils import download_single_file
class DataStage(NemoMegatronStage):
"""
DataStage is base class for data preprocessing stages.
It can hold multiple sub-stages. For example, preparing the Pile dataset includes data downloading,
extraction and data preprocessing. They have dependencies on each other and will be launched one by one.
"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "data_preparation"
self.stage_cfg = cfg.get("data_preparation")
def _make_sub_stages(self):
raise NotImplementedError
def run(self) -> str:
"""
Run current stage including all of the substages, returns job id on slurm based system otherwise empty string
:return: job id on slurm based system otherwise empty string
:rtype: str
"""
# Setup folders and datasets
self.setup_folder_and_data()
sub_stages = self._make_sub_stages()
job_id = ""
for sub_stage in sub_stages:
# Save stage hydra config
job_path = self.get_job_path(sub_stage)
job_path.folder.mkdir(parents=True, exist_ok=True)
stage_cfg_path = NemoMegatronStage.save_stage_hydra_config(self.stage_cfg, job_path)
if job_id:
dependency = f"aftercorr:{job_id}"
self.stage_cfg["run"]["dependency"] = dependency
# Make cluster parameters
cluster_parameters = self._make_cluster_parameters(self.cluster, sub_stage)
# Make command groups
command_groups = self.make_stage_command_groups(stage_cfg_path, sub_stage)
# Create launcher
launcher = AutoLauncher(folder=job_path.folder, cluster=self.cluster, **cluster_parameters,)
job_id = launcher.launch(command_groups=command_groups)
return job_id
def make_stage_command_groups(self, stage_cfg_path: Path, sub_stage: Optional = None,) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:param Optional sub_stage: current sub_stage name
:return: command groups for current stage
:rtype: List[List[str]]
"""
command_groups = [[]]
command_groups[0] += self._make_sub_stage_command(sub_stage)
command_groups = clean_command_groups(command_groups)
return command_groups
def _make_private_cluster_parameters(self, cluster, sub_stage):
raise NotImplementedError
def _make_cluster_parameters(self, cluster: str, sub_stage: Optional = None,) -> Dict:
"""
Make a cluster-specific parameters for jobs on different clusters.
Current clusters include bcm(slurm), bcp and interactive.
For example for bcm, it will return slurm parameters:
{'job_name': 'some_name', 'nodes': 2, 'ntasks_per_node': 8, ...}
:param str cluster: i.e. `bcm`, `bcp`, `interactive`, etc.
:param Optional sub_stage: current sub_stage name
:return: a dictionary of cluster parameters, e.g. `ntasks_per_node`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
job_name = run_cfg.get("name")
time_limit = run_cfg.get("time_limit")
dependency = run_cfg.get("dependency")
env_vars = self.get_env_vars()
env_vars["PYTHONPATH"] = f"{self._launcher_scripts_path}:${{PYTHONPATH}}" # Required by pile download
env_vars["NGC_ARRAY_TYPE"] = "MPIJob" # Required by BCP
setup = [f"export {k}={v}" for k, v in env_vars.items()]
cluster_parameters = {}
shared_parameters = {
"job_name": job_name,
"time": time_limit,
"setup": setup,
}
private_parameters = self._make_private_cluster_parameters(cluster, sub_stage,)
if cluster == "bcm":
cluster_cfg = cfg.get("cluster")
slurm_cfg = {**copy.deepcopy(cluster_cfg)}
job_name_prefix = slurm_cfg.pop("job_name_prefix")
cluster_parameters = {
**slurm_cfg,
"dependency": dependency,
}
cluster_parameters.update(
{**shared_parameters, **private_parameters,}
)
cluster_parameters["job_name"] = job_name_prefix + cluster_parameters["job_name"]
elif cluster == "bcp":
cluster_parameters.update(
{**shared_parameters, **private_parameters,}
)
elif cluster == "interactive":
raise ValueError("Data preparation is not supported in interactive mode.")
return cluster_parameters
class PileDataPreparation(DataStage):
"""DataStage for preparing the Pile dataset for gpt3 and t5"""
def _make_sub_stages(self) -> List[str]:
"""
Create a list of sub-stage names which are required to run in current data stage.
Based on the input config, some of sub stages may not need to run.
:return: a list of sub-stage names which are required to run
:rtype: List[str]
"""
sub_stages = []
if self.stage_cfg.get("download_the_pile", False):
sub_stages += ["download", "extract"]
if self.stage_cfg.get("preprocess_data", False):
sub_stages += ["preprocess"]
return sub_stages
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
job_path = self.get_job_path()
job_path.folder.mkdir(parents=True, exist_ok=True)
data_cfg = self.stage_cfg
download_vocab_url = data_cfg.get("download_vocab_url")
download_merges_url = data_cfg.get("download_merges_url")
vocab_save_dir = data_cfg.get("vocab_save_dir")
merges_save_dir = data_cfg.get("merges_save_dir")
# Download vocab
if download_vocab_url is not None:
assert vocab_save_dir is not None, "vocab_save_dir must be a valid path."
download_single_file(
url=download_vocab_url,
save_dir=vocab_save_dir,
file_name="vocab.json" if download_vocab_url.endswith("json") else "vocab.txt",
)
# Download merges
if download_merges_url is not None:
assert merges_save_dir is not None, "merges_save_dir must be a valid path."
download_single_file(
url=download_merges_url, save_dir=merges_save_dir, file_name="merges.txt",
)
def _make_private_cluster_parameters(self, cluster: str, sub_stage: str) -> Dict:
"""
A simplifying function to make cluster parameters specific to each cluster type.
Shared cluster parameters are handled in _make_cluster_parameters.
This is function is introduced because for different dataset preparation the required slurm params are different,
but the shared parameters are always the same. As a result, one only needs to override private parameters
for different DataStage.
:param str cluster: cluster type
:param str sub_stage: current sub_stage name
:return: a dictionary of private cluster parameters, e.g. `bcp_preproc_npernode`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
node_array_size = run_cfg.get("node_array_size")
array = run_cfg.get("array")
bcp_preproc_npernode = run_cfg.get("bcp_preproc_npernode") if sub_stage == "preprocess" else 1
if cluster == "bcm":
return {
"nodes": 1,
"array": f"{array}%{node_array_size}",
"container_image": container_image,
"container_mounts": container_mounts,
}
if cluster == "bcp":
return {
"nodes": node_array_size,
"ntasks_per_node": bcp_preproc_npernode,
"bcp_launcher": "'mpirun --allow-run-as-root'",
}
return {}
def _make_sub_stage_command(self, sub_stage: str) -> List[str]:
"""Make a command of the specified sub-stage"""
pile_prep_path = self._launcher_scripts_path / "nemo_launcher/collections/dataprep_scripts/pile_dataprep"
stage_to_code_path = {
"download": pile_prep_path / "download.py",
"extract": pile_prep_path / "extract.py",
"preprocess": pile_prep_path / "preprocess.py",
}
choice_model_type, choice_name = self.get_stage_config_choice()
code_path = stage_to_code_path[sub_stage]
args = create_args_list(
hydra=True,
data_config=choice_name,
cluster_type=self.cluster,
launcher_scripts_path=self._launcher_scripts_path,
data_dir=self._data_dir,
the_pile_url=self.stage_cfg.get("the_pile_url"),
file_numbers=self.stage_cfg.get("file_numbers"),
rm_downloaded=self.stage_cfg.get("rm_downloaded"),
rm_extracted=self.stage_cfg.get("rm_extracted"),
tokenizer_type=self.stage_cfg.get("tokenizer_type"),
vocab_save_dir=self.stage_cfg.get("vocab_save_dir"),
merges_save_dir=self.stage_cfg.get("merges_save_dir"),
)
sub_stage_command = [f"python3 -u {code_path}", *args]
sub_stage_command = " \\\n ".join(sub_stage_command)
return [sub_stage_command]
class MC4DataPreparation(DataStage):
"""DataStage for preparing the mC4 dataset for mt5"""
def _make_sub_stages(self) -> List[str]:
"""
Create a list of sub-stage names which are required to run in current data stage.
Based on the input config, some of sub stages may not need to run.
:return: a list of sub-stage names which are required to run
:rtype: List[str]
"""
sub_stages = []
if self.stage_cfg.get("download_mc4", False):
sub_stages += ["prepare", "download"]
if self.stage_cfg.get("preprocess_data", False):
sub_stages += ["setup_preprocess", "preprocess"]
return sub_stages
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
job_path = self.get_job_path()
job_path.folder.mkdir(parents=True, exist_ok=True)
data_cfg = self.stage_cfg
download_vocab_url = data_cfg.get("download_vocab_url")
download_tokenizer_url = data_cfg.get("download_tokenizer_url")
vocab_save_dir = data_cfg.get("vocab_save_dir")
tokenizer_save_dir = data_cfg.get("tokenizer_save_dir")
if download_vocab_url is not None:
assert vocab_save_dir is not None, "vocab_save_dir must be a valid path."
download_single_file(
url=download_vocab_url, save_dir=vocab_save_dir, file_name="vocab.txt",
)
if download_tokenizer_url is not None:
assert tokenizer_save_dir is not None, "vocab_save_dir must be a valid path."
download_single_file(
url=download_tokenizer_url, save_dir=tokenizer_save_dir, file_name="mt5_tokenizer.model",
)
def _make_private_cluster_parameters(self, cluster: str, sub_stage: str) -> Dict:
"""
A simplifying function to make cluster parameters specific to each cluster type.
Shared cluster parameters are handled in _make_cluster_parameters.
This is function is introduced because for different dataset preparation the required slurm params are different,
but the shared parameters are always the same. As a result, one only needs to override private parameters
for different DataStage.
:param str cluster: cluster type
:param str sub_stage: current sub_stage name
:return: a dictionary of private cluster parameters, e.g. `bcp_preproc_npernode`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
node_array_size = run_cfg.get("node_array_size") if sub_stage in ["download", "preprocess"] else 1
array = f"0-{node_array_size-1}"
if sub_stage == "preprocess":
ntasks_per_node = run_cfg.get("workers_per_node")
cpus_per_task = run_cfg.get("cpus_per_node") // ntasks_per_node
else:
ntasks_per_node = 1
cpus_per_task = None
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
if cluster == "bcm":
return {
"nodes": 1,
"array": f"{array}%{node_array_size}",
"container_image": container_image,
"container_mounts": container_mounts,
"ntasks_per_node": ntasks_per_node,
"cpus_per_task": cpus_per_task,
}
if cluster == "bcp":
return {
"nodes": node_array_size,
"ntasks_per_node": ntasks_per_node,
"bcp_launcher": "'mpirun --allow-run-as-root'",
}
return {}
def _make_sub_stage_command(self, sub_stage: str) -> List[str]:
"""Make a command of the specified sub-stage"""
mc4_prep_path = self._launcher_scripts_path / "nemo_launcher/collections/dataprep_scripts/mc4_dataprep"
stage_to_code_path = {
"prepare": mc4_prep_path / "prepare.py",
"download": mc4_prep_path / "download.py",
"setup_preprocess": mc4_prep_path / "setup_preprocess.py",
"preprocess": mc4_prep_path / "preprocess.py",
}
data_cfg = self.stage_cfg
run_cfg = data_cfg.get("run")
code_path = stage_to_code_path[sub_stage]
if sub_stage == "prepare":
args = create_args_list(
data_path=data_cfg.get("mc4_dir"),
git_lfs_path=data_cfg.get("git_lfs_dir"),
languages=data_cfg.get("languages"),
node_array_size=run_cfg.get("node_array_size"),
worker_mapping_file=data_cfg.get("download_worker_mapping"),
)
if data_cfg.get("use_cleaned_english"):
args += ["--cleaned-en"]
elif sub_stage == "download":
args = create_args_list(
c4_path=Path(data_cfg.get("mc4_dir")) / "c4",
git_lfs_path=data_cfg.get("git_lfs_dir"),
worker_mapping_file=data_cfg.get("download_worker_mapping"),
)
elif sub_stage == "setup_preprocess":
args = create_args_list(
c4_path=Path(data_cfg.get("mc4_dir")) / "c4",
soft_link_path=data_cfg.get("softlinks_dir"),
languages=data_cfg.get("languages"),
node_array_size=run_cfg.get("node_array_size"),
workers_per_node=run_cfg.get("workers_per_node"),
max_split_size=200,
worker_mapping_file=data_cfg.get("preprocess_worker_mapping"),
)
if data_cfg.get("use_cleaned_english"):
args += ["--cleaned-en"]
else:
assert sub_stage == "preprocess", f"Unknown substage {sub_stage}"
args = create_args_list(
output_path=data_cfg.get("preprocessed_dir"),
workers_per_node=run_cfg.get("workers_per_node"),
worker_mapping_file=data_cfg.get("preprocess_worker_mapping"),
tokenizer_library="sentencepiece",
tokenizer_model=data_cfg.get("tokenizer_model"),
dataset_impl="mmap",
log_interval="2000",
preproc_folder="store_true",
apply_ftfy="store_true",
workers=run_cfg.get("cpus_per_node") // run_cfg.get("workers_per_node"),
)
if data_cfg.get("rm_downloaded"):
args += ["--rm-downloaded"]
sub_stage_command = [f"python3 -u {code_path}", *args]
sub_stage_command = " \\\n ".join(sub_stage_command)
return [sub_stage_command]
class CustomDataPreparation(DataStage):
"""DataStage for preparing a customized dataset"""
def _make_sub_stages(self) -> List[str]:
"""
Create a list of sub-stage names which are required to run in current data stage.
Based on the input config, some of sub stages may not need to run.
:return: a list of sub-stage names which are required to run
:rtype: List[str]
"""
sub_stages = []
if self.stage_cfg.get("train_tokenizer", False):
sub_stages += ["train_tokenizer"]
if self.stage_cfg.get("preprocess_data", False):
sub_stages += ["preprocess"]
return sub_stages
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
job_path = self.get_job_path()
job_path.folder.mkdir(parents=True, exist_ok=True)
# Setup preprocess data
data_cfg = self.stage_cfg
raw_dataset_files = data_cfg.get("raw_dataset_files")
preprocess_worker_mapping = data_cfg.get("preprocess_worker_mapping")
if data_cfg.get("preprocess_data", False):
if not isinstance(raw_dataset_files, omegaconf.listconfig.ListConfig):
raw_dataset_files = os.listdir(raw_dataset_files)
# Sort list of files in directory by size
sorted_files = sorted(raw_dataset_files, key=lambda x: os.stat(x).st_size)
file_sizes = [os.stat(x).st_size for x in sorted_files]
avail_workers = nodes * workers_per_node
distributed_files = [[] for _ in range(avail_workers)]
distributed_size = [0] * avail_workers
for f, file_size in zip(sorted_files, file_sizes):
min_ind = distributed_size.index(min(distributed_size))
distributed_files[min_ind].append(f)
distributed_size[min_ind] += file_size
output = [",".join(distributed_files[i]) for i in range(avail_workers)]
output = "\n".join(output)
with open(preprocess_worker_mapping, "w") as file:
file.write(output)
print(f" ****** Workers mapping saved to {preprocess_worker_mapping} ...")
for i in range(avail_workers):
print(
f"{i + 1:>4d} "
f"{distributed_size[i]:>7.2f}GB "
f"{','.join([os.path.basename(file) for file in distributed_files[i]]):s}"
)
def _make_private_cluster_parameters(self, cluster: str, sub_stage: str) -> Dict:
"""
A simplifying function to make cluster parameters specific to each cluster type.
Shared cluster parameters are handled in _make_cluster_parameters.
This is function is introduced because for different dataset preparation the required slurm params are different,
but the shared parameters are always the same. As a result, one only needs to override private parameters
for different DataStage.
:param str cluster: cluster type
:param str sub_stage: current sub_stage name
:return: a dictionary of private cluster parameters, e.g. `bcp_preproc_npernode`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
if sub_stage == "preprocess":
node_array_size = run_cfg.get("node_array_size")
ntasks_per_node = run_cfg.get("workers_per_node")
cpus_per_task = run_cfg.get("cpus_per_node") // ntasks_per_node
else:
node_array_size = 1
ntasks_per_node = 1
cpus_per_task = None
array = f"0-{node_array_size - 1}"
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
if cluster == "bcm":
return {
"nodes": 1,
"array": f"{array}%{node_array_size}",
"container_image": container_image,
"container_mounts": container_mounts,
"ntasks_per_node": ntasks_per_node,
"cpus_per_task": cpus_per_task,
}
if cluster == "bcp":
return {
"nodes": node_array_size,
"ntasks_per_node": ntasks_per_node,
"bcp_launcher": "'mpirun --allow-run-as-root'",
}
return {}
def _make_sub_stage_command(self, sub_stage: str) -> List[str]:
"""Make a command of the specified sub-stage"""
data_cfg = self.stage_cfg
if sub_stage == "train_tokenizer":
bpe_save_dir = Path(data_cfg.get("bpe_save_dir"))
bpe_save_dir.mkdir(parents=True, exist_ok=True)
train_tokenizer_args = data_cfg.get("train_tokenizer_args")
code_path = f"cd {bpe_save_dir} && spm_train"
args = create_args_list(**train_tokenizer_args)
else:
assert sub_stage == "preprocess", f"Unknown substage {sub_stage}"
code_path = (
self._launcher_scripts_path
/ "nemo_launchernemo_launcher/collections/dataprep_scripts/custom_dataprep/preprocess.py"
)
args = create_args_list(
output_path=data_cfg.get("preprocessed_dir"),
workers_per_node=run_cfg.get("workers_per_node"),
worker_mapping_file=data_cfg.get("preprocess_worker_mapping"),
tokenizer_library="sentencepiece",
tokenizer_model=data_cfg.get("tokenizer_model"),
dataset_impl="mmap",
log_interval="2000",
preproc_folder="store_true",
apply_ftfy="store_true",
workers=run_cfg.get("cpus_per_node") // run_cfg.get("workers_per_node"),
)
sub_stage_command = [f"python3 -u {code_path}", *args]
sub_stage_command = " \\\n ".join(sub_stage_command)
return [sub_stage_command]
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/data_stages.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from logging import config
from typing import Union
# provide a way to change level through NEMO_LAUNCHER_LOG_LEVEL environment variable:
# ...
LOG_VARNAME = "NEMO_LAUNCHER_LOG_LEVEL"
level_str = os.environ.get(LOG_VARNAME, "INFO").upper()
level: Union[int, str] = level_str if not level_str.isdigit() else int(level_str)
CONFIG = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"nemo_megatron_basic": {"format": "%(name)s %(levelname)s (%(asctime)s) - %(message)s"}},
"handlers": {
"nemo_megatron_out": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "nemo_megatron_basic",
"stream": "ext://sys.stdout",
},
"nemo_megatron_err": {
"class": "logging.StreamHandler",
"level": "WARNING",
"formatter": "nemo_megatron_basic",
"stream": "ext://sys.stderr",
},
},
"loggers": {"nemo_launcher": {"handlers": ["nemo_megatron_err", "nemo_megatron_out"], "level": level}},
}
if level != "NOCONFIG":
logging.config.dictConfig(CONFIG)
def get_logger() -> logging.Logger:
return logging.getLogger("NEMO_LAUNCHER")
def exception(*args: str) -> None:
get_logger().exception(*args)
def warning(*args: str) -> None:
get_logger().warning(*args)
logger = get_logger()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/logger.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import inspect
import os
import random
import re
import shlex
import shutil
import warnings
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Set, Union
import nemo_launcher.utils.job_utils as job_utils
from nemo_launcher.core.logger import logger
NEMO_LAUNCHER_CI = os.getenv("NEMO_LAUNCHER_CI", "False").lower() in ("true", "t", "1")
NEMO_LAUNCHER_DEBUG = os.getenv("NEMO_LAUNCHER_DEBUG", "False").lower() in ("true", "t", "1")
NEMO_LAUNCHER_MEMORY_MEASURE = os.getenv("NEMO_LAUNCHER_MEMORY_MEASURE", "False").lower() in ("true", "t", "1")
class AutoLauncher:
"""
Automatic launcher class. It will create a launcher based on input cluster name.
"""
def __init__(self, folder: Union[str, Path], job_name: str, cluster: Optional[str] = None, **kwargs: Any) -> None:
self.cluster = cluster or self.which()
self.cluster = self.cluster.lower()
launchers = self.get_launchers()
if self.cluster not in launchers:
raise ValueError(f"AutoLauncher doesn't know any cluster named {self.cluster}")
self._launcher = launchers[self.cluster](folder, job_name, **kwargs)
def launch(self, command_groups: List[List[str]]) -> str:
"""
Use the launcher to launch the command groups.
:param List[List[str]] command_groups: Command groups to launch with
:return: job id on slurm based system otherwise empty string
:rtype: str
"""
job_id = self._launcher.launch(command_groups)
return job_id
@staticmethod
def which() -> str:
"""Returns what the detected cluster is"""
raise NotImplementedError
@staticmethod
def get_launchers():
"""Returns supported launchers as a dictionary from launcher name to launcher class"""
return {
"bcm": SlurmLauncher,
"bcp": BCPLauncher,
"interactive": InteractiveLauncher,
}
class Launcher:
"""Base launcher class"""
def __init__(self, folder: Union[Path, str], job_name: str):
self.folder = folder
self.job_name = job_name
def launch(self, command_groups: List[List[str]]) -> str:
"""
Use the launcher to launch the command groups.
:param List[List[str]] command_groups: Command groups to launch with
:return: job id on slurm based system otherwise empty string
:rtype: str
"""
submission_file_path = self._make_submission_file(command_groups)
logger.info(f"Job {self.job_name} submission file created at '{submission_file_path}'")
job_id = ""
if not NEMO_LAUNCHER_DEBUG:
job_id = self._submit_command(submission_file_path)
if job_id:
logger.info(f"Job {self.job_name} submitted with Job ID {job_id}")
with open(self.folder / "launcher.log", "w") as f:
f.write(f"Submitted batch job {job_id}")
else:
job_id = str(random.randint(10000, 99999))
logger.info(f"[DEBUG] Job {self.job_name} submitted with FAKE Job ID {job_id}")
return job_id
def _submit_command(self, submission_file_path: Path) -> str:
"""Submits a set of command groups to the cluster"""
raise NotImplementedError
def _make_submission_file(self, command_groups: List[List[str]]) -> Path:
"""
Make a submission script file, as following
on interactive cluster, it's a bash file, trigger with bash.
on slurm cluster, it's a slurm script file, trigger with sbatch.
on BCP cluster, it's a BCP script file, trigger with bash.
:param List[List[str]] command_groups: Command groups to launch with
:return: job id on slurm based system otherwise empty string
:rtype: str
"""
job_paths = job_utils.JobPaths(folder=self.folder, job_name=self.job_name)
folder = job_paths.folder
folder.mkdir(parents=True, exist_ok=True)
submission_file_path = job_paths.submission_file
with submission_file_path.open("w") as f:
f.write(self._make_submission_file_text(command_groups))
return submission_file_path
class InteractiveLauncher(Launcher):
"""
Interactive job launcher
This class is used to hold the parameters to run a job on an interactive node (single node only).
In practice, it will create a batch file in the specified directory for the job and
trigger the job with `bash` command.
:param Union[Path, str] folder: folder for storing job submission/output and logs.
:param str job_name: Name of the job, used as job folder name
:param Any **kwargs: Parse other cluster parameters required for interactive running
"""
def __init__(self, folder: Union[Path, str], job_name: str, **kwargs: Any) -> None:
super().__init__(folder, job_name)
self.parameters = kwargs
def _submit_command(self, submission_file_path: Path) -> str:
"""Launch the submission command"""
command_list = self._make_submission_command(submission_file_path)
# run
job_utils.CommandFunction(command_list, ret_stdout=False, verbose=False)() # explicit errors
return ""
@staticmethod
def _make_submission_command(submission_file_path: Path) -> List[str]:
"""Make a command to trigger submission script. On interactive cluster, the script is triggerred with bash"""
return ["bash", str(submission_file_path)]
def _make_submission_file_text(self, command_groups: List[List[str]]) -> str:
"""
Given the command groups, generate submission script file's text.
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
On interactive cluster, multi-gpu python scripts are launched with `torchrun --nproc_per_node=??`
:param List[List[str]] command_groups: Command groups to launch with
:return: submission script file's text
:rtype: str
"""
nodes = self.parameters.get("nodes", 1)
ntasks_per_node = self.parameters.get("ntasks_per_node", 1)
assert nodes == 1, "Multi-node is not supported in interactive mode."
paths = job_utils.JobPaths(folder=self.folder, job_name=self.job_name)
time_tag = datetime.datetime.now().strftime("%m%d_%H%M%S")
stdout = str(paths.stdout).replace("_%j", f"_{time_tag}")
# now create
lines = ["#!/bin/bash", ""]
# environment setup:
setup = self.parameters.get("setup", None)
if setup is not None:
lines += ["", "# setup"] + setup
for group_ind, command_group in enumerate(command_groups):
command = ";\n ".join(command_group)
command = command.replace("python3 -u", f"torchrun --nproc_per_node={ntasks_per_node}")
lines += [
"",
f"# command {group_ind + 1}",
f"bash -c \"",
f" {command} \" 2>&1 | tee -a {stdout}",
"",
]
return "\n".join(lines)
class BCPLauncher(Launcher):
"""
BCP job launcher
This class is used to hold the parameters to run a job on BCP platform.
In practice, it will create a batch file in the specified directory for the job
and trigger the job with `bash` command.
:param Union[Path, str] folder: folder for storing job submission/output and logs.
:param str job_name: Name of the job, used as job folder name
:param Any **kwargs: Parse other cluster parameters required for BCP running,
including `nodes`, `ntasks_pernode`, `bcp_launcher`, etc.
"""
def __init__(self, folder: Union[Path, str], job_name: str, **kwargs: Any) -> None:
super().__init__(folder, job_name)
self.parameters = kwargs
self.parameters = self._convert_parameters(self.parameters)
@classmethod
def _equivalence_dict(cls):
return {
"name": "job_name",
"nodes": "nnodes",
"tasks_per_node": "npernode",
"ntasks_per_node": "npernode",
"bcp_launcher": "launcher",
}
def _convert_parameters(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""translate bcp parameter names"""
# replace type in some cases
eq_dict = self._equivalence_dict()
if eq_dict is not None:
params = {eq_dict.get(k, k): v for k, v in params.items()}
return params
def _submit_command(self, submission_file_path: Path) -> str:
"""Launch the submission command"""
command_list = self._make_submission_command(submission_file_path)
# run
job_utils.CommandFunction(command_list, ret_stdout=False, verbose=False)() # explicit errors
return ""
@staticmethod
def _make_submission_command(submission_file_path: Path) -> List[str]:
"""Make a command to trigger submission script. On BCP cluster, the script is triggerred with bash"""
return ["bash", str(submission_file_path)]
def _make_submission_file_text(self, command_groups: List[List[str]]) -> str:
"""
Given the command groups, generate submission script file's text.
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
On BCP cluster, multi-gpu python scripts are launched with `bcprun --nnodes ? --npernode ?`
:param List[List[str]] command_groups: Command groups to launch with
:return: submission script file's text
:rtype: str
"""
paths = job_utils.JobPaths(folder=self.folder, job_name=self.job_name)
time_tag = datetime.datetime.now().strftime("%m%d_%H%M%S")
stdout = str(paths.stdout).replace("_%j", f"_{time_tag}")
nnodes = self.parameters.get("nnodes", 1)
npernode = self.parameters.get("npernode", 1)
launcher = self.parameters.get("launcher")
launcher_flags = ""
if launcher is not None:
launcher_flags = f"--launcher {launcher}"
env_vars = self.parameters.get("env_vars")
env_flags = ""
if env_vars is not None:
env_flags = [f"--env '{k}={v}'" for k, v in env_vars.items()]
env_flags = " ".join(env_flags)
# now create
lines = ["#!/bin/bash", ""]
# environment setup:
setup = self.parameters.get("setup", None)
if setup is not None:
lines += ["", "# setup"] + setup
# Add pause_and_prime_dns_connection to command groups on BCP
launcher_scripts_path = Path("/opt/NeMo-Megatron-Launcher/launcher_scripts") # Hard code path on BCP
pause_and_prime_dns_connection_command = (
f"python3 -u {launcher_scripts_path / 'nemo_launcher/collections/pause_and_prime_dns_connections.py'}"
)
_nemo_code_path = "/opt/NeMo"
for ind in range(len(command_groups)):
# TODO: Find a better way to insert pause_and_prime_dns_connection_command
if _nemo_code_path in command_groups[ind]:
command_groups[ind] = [pause_and_prime_dns_connection_command] + command_groups[ind]
for group_ind, command_group in enumerate(command_groups):
command = ";\n ".join(command_group)
if group_ind + 1 == len(command_groups):
bcprun_cmd = f"bcprun --nnodes {nnodes} --npernode {npernode}"
else:
bcprun_cmd = f"bcprun --nnodes 1 --npernode 1"
lines += [
"",
f"# command {group_ind + 1}",
f"{bcprun_cmd} " f"{launcher_flags} {env_flags} --cmd \"",
f" {command} \" 2>&1 | tee -a {stdout}",
"",
]
return "\n".join(lines)
class SlurmLauncher(Launcher):
"""
Slurm job launcher
This class is used to hold the parameters to run a job on slurm.
In practice, it will create a batch file in the specified directory for the job,
trigger the job with `sbatch` command and return a job id.
:param Union[Path, str] folder: folder for storing job submission/output and logs.
:param str job_name: Name of the job, used as job folder name
:param Any **kwargs: See slurm documentation for most parameters.
Most useful parameters are: time, mem, gpus_per_node, cpus_per_task, partition
Below are the parameters that differ from slurm documentation:
setup: a list of command to run in sbatch before running srun
"""
def __init__(self, folder: Union[Path, str], job_name: str, **kwargs: Any) -> None:
super().__init__(folder, job_name)
self.parameters = {}
self._update_parameters(job_name=job_name, **kwargs)
if shutil.which("srun") is None and not NEMO_LAUNCHER_DEBUG:
raise RuntimeError('Could not detect "srun", are you indeed on a slurm cluster?')
@classmethod
def _equivalence_dict(cls):
return {
"name": "job_name",
"timeout_min": "time",
"mem_gb": "mem",
"nodes": "nodes",
"cpus_per_task": "cpus_per_task",
"gpus_per_node": "gpus_per_node",
"tasks_per_node": "ntasks_per_node",
}
@classmethod
def _valid_parameters(cls) -> Set[str]:
"""Parameters that can be set through update_parameters"""
return set(_get_default_parameters())
def _convert_parameters(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""translate slurm parameter names"""
# replace type in some cases
eq_dict = self._equivalence_dict()
if eq_dict is not None:
params = {eq_dict.get(k, k): v for k, v in params.items()}
if "mem" in params:
params["mem"] = _convert_mem(params["mem"])
return params
def _update_parameters(self, **kwargs: Any) -> None:
"""
Updates sbatch submission file parameters
Raises ValueError:
In case an erroneous keyword argument is added, a list of all eligible parameters
is printed, with their default values
:param Any **kwargs: See slurm documentation for most parameters.
Most useful parameters are: time, mem, gpus_per_node, cpus_per_task, partition
Below are the parameters that differ from slurm documentation:
setup: a list of command to run in sbatch before running srun
"""
defaults = _get_default_parameters()
in_valid_parameters = sorted(set(kwargs) - set(defaults))
if in_valid_parameters:
string = "\n - ".join(f"{x} (default: {repr(y)})" for x, y in sorted(defaults.items()))
logger.warning(
f"Unrecognized sbatch parameter(s): {in_valid_parameters}. Use at your own risk.\n\nValid parameters are:\n - {string}"
)
self.parameters.update({k: v for k, v in kwargs.items() if k not in in_valid_parameters})
self.parameters.update({"additional_parameters": {k: kwargs[k] for k in in_valid_parameters}},)
self.parameters = self._convert_parameters(self.parameters)
def _submit_command(self, submission_file_path: Path) -> str:
"""Launch the submission command"""
command_list = self._make_submission_command(submission_file_path)
# run
output = job_utils.CommandFunction(command_list, verbose=False)() # explicit errors
job_id = ""
if output:
job_id = self._get_job_id_from_submission_command(output)
return job_id
def _make_submission_file_text(self, command_groups: List[List[str]]) -> str:
"""
Given the command groups, generate submission script file's text.
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param List[List[str]] command_groups: Command groups to launch with
:return: submission script file's text
:rtype: str
"""
return _make_sbatch_string(command_groups=command_groups, folder=self.folder, **self.parameters)
@staticmethod
def _make_submission_command(submission_file_path: Path) -> List[str]:
"""Make a command to trigger submission script. On slurm cluster, the script is triggerred with sbatch"""
return ["sbatch", str(submission_file_path)]
@staticmethod
def _get_job_id_from_submission_command(string: Union[bytes, str]) -> str:
"""Returns the job ID from the output of sbatch string"""
if not isinstance(string, str):
string = string.decode()
output = re.search(r"job (?P<id>[0-9]+)", string)
if output is None:
raise utils.FailedSubmissionError(
f'Could not make sense of sbatch output "{string}"\n'
"Job instance will not be able to fetch status\n"
"(you may however set the job job_id manually if needed)"
)
return output.group("id")
@functools.lru_cache()
def _get_default_parameters() -> Dict[str, Any]:
"""Parameters that can be set through update_parameters"""
specs = inspect.getfullargspec(_make_sbatch_string)
zipped = zip(specs.args[-len(specs.defaults) :], specs.defaults) # type: ignore
return {key: val for key, val in zipped if key not in {"command_groups", "folder"}}
# pylint: disable=too-many-arguments,unused-argument, too-many-locals
def _make_sbatch_string(
command_groups: List[List[str]],
folder: Union[str, Path],
job_name: str = "nemo_launcher",
partition: Optional[str] = None,
time: int = 5,
nodes: Union[int, List[int]] = 1,
ntasks_per_node: Optional[Union[int, List[int]]] = None,
cpus_per_task: Optional[int] = None,
cpus_per_gpu: Optional[int] = None,
num_gpus: Optional[int] = None, # legacy
gpus_per_node: Optional[int] = None,
gpus_per_task: Optional[int] = None,
qos: Optional[str] = None, # quality of service
setup: Optional[List[str]] = None,
mem: Optional[str] = None,
mem_per_gpu: Optional[str] = None,
mem_per_cpu: Optional[str] = None,
dependency: Optional[str] = None,
comment: Optional[str] = None,
constraint: Optional[str] = None,
exclude: Optional[str] = None,
account: Optional[str] = None,
gres: Optional[str] = None,
exclusive: Optional[Union[bool, str]] = None,
array: Optional[str] = None,
stderr_to_stdout: bool = False,
container_image: Optional[str] = None,
container_mounts: Optional[str] = None,
additional_parameters: Optional[Dict[str, Any]] = None,
srun_args: Optional[Iterable[str]] = None,
heterogeneous: bool = False,
) -> str:
"""Creates the content of an sbatch file with provided parameters
Parameters
----------
See slurm sbatch documentation for most parameters:
https://slurm.schedmd.com/sbatch.html
Below are the parameters that differ from slurm documentation:
command_groups:
each command group will be assigned one srun
folder: str/Path
folder where print logs and error logs will be written
setup: list
a list of command to run in sbatch before running srun
additional_parameters: dict
Forces any parameter to a given value in sbatch. This can be useful
to add parameters which are not currently available in nemo_launcher.
Eg: {"mail-user": "[email protected]", "mail-type": "BEGIN"}
srun_args: List[str]
Add each argument in the list to the srun call
Raises
------
ValueError
In case an erroneous keyword argument is added, a list of all eligible parameters
is printed, with their default values
"""
nonslurm = [
"nonslurm",
"folder",
"command_groups",
"additional_parameters",
"setup",
"stderr_to_stdout",
"container_image",
"container_mounts",
"srun_args",
"heterogeneous",
]
parameters = {k: v for k, v in locals().items() if v is not None and k not in nonslurm}
# rename and reformat parameters
if num_gpus is not None:
warnings.warn('"num_gpus" is deprecated, please use "gpus_per_node" instead (overwritting with num_gpus)')
parameters["gpus_per_node"] = parameters.pop("num_gpus", 0)
if "cpus_per_gpu" in parameters and "gpus_per_task" not in parameters:
warnings.warn('"cpus_per_gpu" requires to set "gpus_per_task" to work (and not "gpus_per_node")')
# add necessary parameters
job_name = parameters.get("job_name")
paths = job_utils.JobPaths(folder=folder, job_name=job_name)
stdout = str(paths.stdout)
stderr = str(paths.stderr)
if array is not None:
stdout = stdout.replace("%j", "%A_%a")
stderr = stderr.replace("%j", "%A_%a")
parameters["output"] = stdout.replace("%t", "0")
if not stderr_to_stdout:
parameters["error"] = stderr.replace("%t", "0")
if NEMO_LAUNCHER_CI: # Override output file for slurm
parameters["output"] = parameters["error"] = str(paths.folder / "slurm_%j.out")
stdout = stderr = parameters["output"]
if additional_parameters is not None:
parameters.update(additional_parameters)
# now create
lines = ["#!/bin/bash", "", "# Parameters"]
if heterogeneous:
for i in range(len(nodes)):
het_parameters = parameters.copy()
het_parameters["output"] = parameters["output"].replace("_%j", f"_{i}_%j")
if "error" in parameters:
het_parameters["error"] = parameters["error"].replace("_%j", f"_{i}_%j")
het_parameters.update(
{
"job_name": f"{job_name}_{i}",
"nodes": nodes[i],
"ntasks_per_node": ntasks_per_node[i],
}
)
for k in sorted(parameters):
lines.append(_as_sbatch_flag(k, het_parameters[k]))
if i != len(nodes) -1:
lines.append(f"#SBATCH hetjob")
else:
for k in sorted(parameters):
lines.append(_as_sbatch_flag(k, parameters[k]))
# environment setup:
if setup is not None:
lines += ["", "# setup"] + setup
# commandline (this will run the function and args specified in the file provided as argument)
# We pass --output and --error here, because the SBATCH command doesn't work as expected with a filename pattern
stderr_flags = [] if stderr_to_stdout else ["--error", stderr]
container_flags = ["--container-image", container_image] if container_image else []
container_flags += ["--container-mounts", container_mounts] if container_mounts else []
if srun_args is None:
srun_args = []
if NEMO_LAUNCHER_MEMORY_MEASURE:
srun_args += ["--overlap"]
mem_stdout = stdout.replace("_%j", "_mem_%j")
mem_stdout = mem_stdout.replace("_%A_%a", "_mem_%A_%a")
mem_srun_cmd = shlex.join(
["srun", "--ntasks=1", "--ntasks-per-node=1", "--output", mem_stdout, *container_flags, *srun_args]
)
lines += [
"",
"# run memory measure",
f"{mem_srun_cmd} \\",
f" nvidia-smi --query-gpu=timestamp,index,,memory.total,memory.free,memory.used --format=csv -l 1 & ",
"",
]
for group_ind, command_group in enumerate(command_groups):
if heterogeneous:
het_group = f"--het-group={group_ind}"
het_stdout = stdout.replace("_%j", f"_{group_ind}_%j")
het_stderr = stderr_flags.copy()
if het_stderr:
het_stderr[-1] = het_stderr[-1].replace("_%j", f"_{group_ind}_%j")
srun_cmd = shlex.join(["srun", "--output", het_stdout, *het_stderr, *container_flags, *srun_args, het_group])
command = ";\n ".join(command_group)
lines += [
"",
f"# command {group_ind + 1}",
f"{srun_cmd} bash -c \"",
f" {command} \" &",
"",
]
if group_ind == len(nodes) - 1:
lines += ["wait"]
else:
lines += ["sleep 30"]
else:
srun_cmd = shlex.join(["srun", "--output", stdout, *stderr_flags, *container_flags, *srun_args])
command = ";\n ".join(command_group)
lines += [
"",
f"# command {group_ind + 1}",
f"{srun_cmd} bash -c \"",
f" {command} \"",
"",
]
return "\n".join(lines)
def _convert_mem(mem_gb: float) -> str:
"""Convert non-integer mem_gb to unit MB"""
if mem_gb == int(mem_gb):
if int(mem_gb) == 0:
return "0"
return f"{int(mem_gb)}GB"
return f"{int(mem_gb * 1024)}MB"
def _as_sbatch_flag(key: str, value: Any) -> str:
"""Convert key value pairs to `#SBATCH --{key}={value}` flags"""
key = key.replace("_", "-")
if value is True:
return f"#SBATCH --{key}"
value = shlex.quote(str(value))
return f"#SBATCH --{key}={value}"
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/launchers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import functools
import glob, os
import logging
import json
import re
from pathlib import Path
from typing import Any, Dict, List, Optional
import omegaconf
from nemo_launcher.core.launchers import AutoLauncher
from nemo_launcher.utils.data_utils.prepare_squad import (
prepare_squad_for_fine_tuning,
prepare_squad_for_prompt_learning,
)
from nemo_launcher.utils.job_utils import JobPaths
from omegaconf import OmegaConf
class NemoMegatronStage:
"""
Base class for NeMo Megatron stages. All stages should build on top of this class.
Call `run` function to run current stage.
"""
def __init__(self, cfg):
self.cfg = cfg
self.cluster = cfg.get("cluster_type")
self.stage_name = None
self.stage_cfg = None
self.setup_stage_vars(cfg)
self.job_name = self.stage_cfg.run.get("name")
self.nodes_scheduler = {}
def setup_stage_vars(self, cfg: OmegaConf):
"""Setup the stage vars, i.e. stage name and stage cfg"""
raise NotImplementedError
def run(self) -> str:
"""
Run current stage returns job id on slurm based system otherwise empty string
:return: job id on slurm based system otherwise empty string
:rtype: str
"""
# Setup folders and datasets
self.setup_folder_and_data()
# Save stage hydra config
job_path = self.get_job_path()
if self.cfg.get('training').get('model').get('rampup_batch_size') and self.stage_name == 'training':
gpus = self.stage_cfg.get("trainer").get("devices")
self._find_optimal_nodes(self.cfg, gpus)
current_gbs = self._get_current_gbs(self.cfg)
nodes = self.nodes_scheduler[str(current_gbs)]
self.stage_cfg["trainer"]["num_nodes"] = nodes
self.cfg['training']["trainer"]["num_nodes"] = nodes
logging.info(f"global batch size and number of nodes will change following this schedule:\n {self.nodes_scheduler}")
stage_cfg_path = NemoMegatronStage.save_stage_hydra_config(self.stage_cfg, job_path)
# Make cluster parameters
cluster_parameters = self._make_cluster_parameters(self.cluster)
# Make command groups
command_groups = self.make_stage_command_groups(stage_cfg_path)
# Create launcher
launcher = AutoLauncher(folder=job_path.folder, cluster=self.cluster, **cluster_parameters,)
job_id = launcher.launch(command_groups=command_groups)
return job_id
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
job_path = self.get_job_path()
job_path.folder.mkdir(parents=True, exist_ok=True)
results_folder = job_path.results_folder
results_folder.mkdir(parents=True, exist_ok=True)
@staticmethod
def save_stage_hydra_config(stage_cfg: OmegaConf, job_path: JobPaths) -> Path:
"""
Interpolate and save hydra config file for current stage
:param OmegaConf stage_cfg: current stage's hydra configuration
:param JobPaths job_path: JobPaths object
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
_hydra_interpolation(stage_cfg)
cfg_save_path = job_path.config_file
omegaconf.OmegaConf.save(stage_cfg, cfg_save_path)
return cfg_save_path
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
raise NotImplementedError
def _make_wandb_login_command(self) -> List[str]:
"""Make a command of login with w&b api key"""
cfg = self.cfg
wandb_cmd = ""
if cfg.wandb_api_key_file is not None:
with open(cfg.wandb_api_key_file, "r") as f:
wandb_api_key = f.readline().rstrip()
wandb_cmd = f"wandb login {wandb_api_key}"
return [wandb_cmd]
def _make_nemo_path_command(self) -> List[str]:
"""Extend nemo path to python path"""
return [
f"cd {self._nemo_code_path}",
"git rev-parse HEAD",
f'export PYTHONPATH={self._nemo_code_path}:\${{PYTHONPATH}}',
]
# def _make_numa_mapping_command(self) -> List[str]:
# """Make a command of numa mapping call"""
# cfg = self.cfg
# numa_cfg = cfg.get("numa_mapping")
# if not numa_cfg.get("enable"):
# return []
# numa_override = [f"{k}={v}" for k, v in numa_cfg.items()]
# numa_command = [
# f"python3 -u {self._launcher_scripts_path / 'nemo_launcher/collections/numa_mapping.py'}",
# *numa_override,
# ]
# numa_command = " \\\n ".join(numa_command)
# return [numa_command]
def _make_api_log_command_prefix(self, results_dir: str) -> str:
"""Make a command prefix of api logging"""
choice_model_type, choice_name = self.get_stage_config_choice()
api_log = self.cfg.get("api_log", False)
api_log_prefix = ""
if api_log:
api_log_path = os.path.join(results_dir, "api_logs")
api_log_prefix = (
"[[ \${SLURM_LOCALID} -eq 0 ]] && "
f"API_LOG_CMD='apiLog.sh -p {choice_model_type}/{choice_name} -v nemo_launcher' || API_LOG_CMD=''; "
f"LOGPATH={api_log_path} \${{API_LOG_CMD}}"
)
return api_log_prefix
def _make_nsys_command_prefix(self, results_dir: str) -> str:
"""Make a command prefix of nsys profiling"""
model_cfg = self.stage_cfg.get("model")
if not model_cfg:
return ""
nsys_cfg = model_cfg.get("nsys_profile", None)
nsys_prefix = ""
if nsys_cfg is not None and nsys_cfg.get("enabled", False):
profile_out_path = os.path.join(results_dir, "profile_logs")
os.makedirs(profile_out_path, exist_ok=True)
slurm_node = "\${SLURM_NODEID}"
slurm_rank = "\${SLURM_PROCID}"
slurm_jobid = "\${SLURM_JOB_ID}"
nsys_prefix = (
f"nsys profile -s none "
f"-t {','.join(nsys_cfg.trace)} "
f"-o {profile_out_path}/profile_{slurm_jobid}_node{slurm_node}_rank{slurm_rank} "
f"--force-overwrite true "
f"--capture-range=cudaProfilerApi "
f"--capture-range-end=stop"
)
return nsys_prefix
def _make_container_mounts_string(self) -> str:
"""
Make container mounting string based on hydra configurations
:return: container mounting string, e.g. "/path/to/A:/path/to/A,/path/to/B:/path/to/B,..."
:rtype: str
"""
def add_container_mounts(container_mounts):
mounts_str = ""
if container_mounts is not None:
assert isinstance(
container_mounts, omegaconf.listconfig.ListConfig
), "container_mounts must be a list."
for mount in container_mounts:
if mount is not None and isinstance(mount, str):
mounts_str += f",{mount}" if ":" in mount else f",{mount}:{mount}"
return mounts_str
cfg = self.cfg
data_dir = cfg.get("data_dir")
base_results_dir = cfg.get("base_results_dir")
mounts_string = f"{self._launcher_scripts_path}:{self._launcher_scripts_path},{data_dir}:{data_dir},{base_results_dir}:{base_results_dir}"
container_mounts = cfg.get("container_mounts")
mounts_string += add_container_mounts(container_mounts)
return mounts_string
def _make_cluster_parameters(self, cluster: str) -> Dict:
"""
Make a cluster-specific parameters for jobs on different clusters.
Current clusters include bcm(slurm), bcp and interactive.
For example for bcm, it will return slurm parameters:
{'job_name': 'some_name', 'nodes': 2, 'ntasks_per_node': 8, ...}
:param str cluster: i.e. `bcm`, `bcp`, `interactive`, etc.
:return: a dictionary of cluster parameters, e.g. `ntasks_per_node`
:rtype: Dict
"""
cfg = self.cfg
stage_cfg = self.stage_cfg
run_cfg = stage_cfg.get("run")
job_name = run_cfg.get("name")
time_limit = run_cfg.get("time_limit")
nodes = run_cfg.get("nodes")
dependency = run_cfg.get("dependency")
if nodes is None:
nodes = stage_cfg.get("trainer").get("num_nodes")
ntasks_per_node = run_cfg.get("ntasks_per_node")
if ntasks_per_node is None:
ntasks_per_node = stage_cfg.get("trainer").get("devices")
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
setup = None
env_vars = self.get_env_vars()
if env_vars:
setup = [f"export {k}={v}" for k, v in env_vars.items()]
cluster_parameters = {}
shared_parameters = {
"job_name": job_name,
"nodes": nodes,
"time": time_limit,
"ntasks_per_node": ntasks_per_node,
"setup": setup,
}
if cluster == "bcm":
cluster_cfg = cfg.get("cluster")
if cfg.get("training").get("model").get("ub_tp_comm_overlap", False):
if "srun_args" not in cluster_cfg:
cluster_cfg["srun_args"] = []
cluster_cfg["srun_args"] += ["--mpi=pmix"]
slurm_cfg = {**copy.deepcopy(cluster_cfg)}
job_name_prefix = slurm_cfg.pop("job_name_prefix")
cluster_parameters = {**slurm_cfg}
cluster_parameters.update(
{
**shared_parameters,
"dependency": dependency,
"container_image": container_image,
"container_mounts": container_mounts,
}
)
cluster_parameters["job_name"] = job_name_prefix + cluster_parameters["job_name"]
elif cluster == "bcp":
cluster_parameters.update(
{**shared_parameters, "env_vars": env_vars,}
)
elif cluster == "interactive":
cluster_parameters.update(shared_parameters)
return cluster_parameters
def _find_optimal_nodes(self, cfg, gpus) -> None:
nodes_scheduler_path = f"{cfg.get('training').get('run').get('results_dir')}/nodes_scheduler.json"
try:
with open(nodes_scheduler_path, 'r') as nodes_scheduler:
self.nodes_scheduler = json.load(nodes_scheduler)
except FileNotFoundError:
mbs = cfg.get('training').get('model').get('micro_batch_size')
gbs = cfg.get('training').get('model').get('global_batch_size')
rampup_bs = cfg.get('training').get('model').get('rampup_batch_size')
tp = cfg.get('training').get('model').get('tensor_model_parallel_size')
pp = cfg.get('training').get('model').get('pipeline_model_parallel_size')
num_nodes = cfg.get('training').get('trainer').get('num_nodes')
start_bs = rampup_bs[0]
increment = rampup_bs[1]
cbs = start_bs
rbs = [start_bs]
while cbs <= (gbs - increment):
rbs.append(rbs[-1] + increment)
cbs += increment
self.nodes_scheduler[str(gbs)] = num_nodes
for b in rbs[::-1][1:]:
optimal_lst = []
prev = int(min(list(self.nodes_scheduler.values())))
for nodes in range(1, prev + 1):
dp = (gpus * nodes) // (tp * pp)
if b % (mbs * dp) == 0 and b % (mbs * gpus * nodes) == 0 and nodes <= prev:
optimal_lst.append(nodes)
self.nodes_scheduler[str(b)] = max(optimal_lst)
sched_rbs = [int(i) for i in self.nodes_scheduler.keys()]
assert rbs[::-1] == sched_rbs, (
"please, make sure you enter the correct combination of"
" ramp up batch size and number of nodes"
)
with open(nodes_scheduler_path, 'w') as nodes_scheduler:
nodes_scheduler.write(json.dumps(self.nodes_scheduler))
def _get_current_gbs(self, cfg):
start_bs = cfg.get('training').get('model').get('rampup_batch_size')[0]
results_dir = cfg.get('training').get('run').get('results_dir')
os.chdir(results_dir)
job_numbers = []
try:
for file in glob.glob("*.out"):
file = file.split('_')[-1].split('.')[0]
job_numbers.append(int(file))
job_number = max(job_numbers)
last_job = glob.glob(f"*{job_number}.out")[0]
with open(last_job, 'r') as logs:
logs = logs.read()
current_gbs = re.findall(r'global_batch_size=(\d+)', logs)[-1]
except:
current_gbs = start_bs
return current_gbs
def get_env_vars(self) -> Dict:
"""
Set up dictionary for environment variables
The environment variables from hydra config will be set inside the job scripts.
For Example:
Set `env_vars.NVTE_BIAS_DROPOUT_FUSION=1` while calling nemo_launcherlauncher-scripts,
`NVTE_BIAS_DROPOUT_FUSION=1` will be set while running the job.
:return: a dictionary of env vars while running the job.
:rtype: Dict
"""
env_vars = {k: v for k, v in self.cfg.get("env_vars").items() if v is not None}
return env_vars
def get_stage_config_choice(self):
"""
Return current stages config's corresponding `choice_model_type` and `choice_name`
For example, if `training=gpt3/5b`, then `choice_model_type=gpt3` and `choice_name=5b`
"""
stage_config_choice = self.cfg.get(f"{self.stage_name}_config")
choice_model_type = stage_config_choice.rsplit("/", 1)[0]
choice_name = stage_config_choice.rsplit("/", 1)[1]
return choice_model_type, choice_name
@property
def _launcher_scripts_path(self) -> Path:
return Path(self.cfg.get("launcher_scripts_path"))
@property
def _nemo_code_path(self) -> Path:
return Path("/opt/NeMo")
@property
def _data_dir(self) -> Path:
return Path(self.cfg.get("data_dir"))
@property
def _rlhf_code_path(self) -> Path:
return Path("/opt/nemo-rlhf")
@property
def _cuda_visible_devices(self) -> str:
ntasks_per_node = self.stage_cfg.run.get("ntasks_per_node")
if ntasks_per_node is None:
ntasks_per_node = self.stage_cfg.trainer.get("devices", 1)
return (
"CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7"
if ntasks_per_node == 8
else f"CUDA_VISIBLE_DEVICES={','.join(map(str, range(ntasks_per_node)))}"
)
@property
def _cuda_device_max_connections(self) -> str:
model_cfg = self.stage_cfg.get("model")
if not model_cfg:
return ""
tensor_model_parallel_size = model_cfg.get("tensor_model_parallel_size", 1)
return "CUDA_DEVICE_MAX_CONNECTIONS=1" if tensor_model_parallel_size > 1 else ""
@property
def _nvte_bias_gelu_nvfusion(self) -> str:
"""Only used in pretraining; override in training class"""
return ""
@functools.lru_cache()
def get_job_path(self, sub_stage: Optional = None) -> JobPaths:
"""Fetch a JobPaths object for current stage"""
run_cfg = self.stage_cfg.get("run")
results_dir = Path(run_cfg.get("results_dir")) # TODO: rename this to job dir in config
if sub_stage is not None:
results_dir = results_dir / sub_stage
return JobPaths(results_dir, self.job_name)
@property
def _set_ln_sm_margin(self) -> str:
""" Set LayerNorm SM margin when using P2P communication overlap to support the overlap with LayerNorm kernel """
if (self.cfg.training.model.get("overlap_p2p_comm", False) and
self.cfg.training.model.get("pipeline_model_parallel_size") > 1 and
self.cfg.training.model.get("virtual_pipeline_model_parallel_size") > 1):
get_ln_sm_margin_command = (
f"python3 {self._launcher_scripts_path / 'nemo_launcher/collections/conditional_cfgs.py'} "
f"name=get_ln_sm_margin"
)
return f"NVTE_FWD_LAYERNORM_SM_MARGIN=\$({get_ln_sm_margin_command}) NVTE_BWD_LAYERNORM_SM_MARGIN=\$({get_ln_sm_margin_command})"
return ""
@property
def _skip_ag_overlap(self) -> str:
""" Skip TP-AllGather overlap with ring-exchange at (1) bf16 and (2) PP > 1 """
if (self.cfg.training.model.get("ub_tp_comm_overlap", False) and
self.cfg.training.model.get("pipeline_model_parallel_size") > 1):
use_fp8 = self.cfg.training.model.get("fp8", False)
get_ag_overlap_command = (
f"python3 {self._launcher_scripts_path / 'nemo_launcher/collections/conditional_cfgs.py'} "
f"name=get_ag_overlap "
f"fp8={use_fp8} "
)
return f"NVTE_UB_SPLIT_AG=\$({get_ag_overlap_command})"
return ""
class NeMoStage(NemoMegatronStage):
"""
Stage is a nemo stage if it uses a nemo scripts
Current nemo stage includes:
- pretraining
- fine-tuning
- prompt-learning
- t5/mt5 eval
GPT3 eval is not a NeMo stage because it uses eval-harness inside nemo_launcher collections.
"""
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
# Training has one command group
# Shared with fine-tuning and prompt learning
command_groups = [[]]
command_groups[0] += self._make_wandb_login_command()
command_groups[0] += self._make_nemo_path_command()
# command_groups[0] += self._make_numa_mapping_command()
# _cuda_device_max_connections and _cuda_visible_devices cannot be used as command prefix on BCP
if self.cluster == "bcp":
core_command = []
else:
core_command = [
self._cuda_device_max_connections,
self._cuda_visible_devices,
self._set_ln_sm_margin,
self._skip_ag_overlap,
self._nvte_bias_gelu_nvfusion,
]
core_command += [
self._make_api_log_command_prefix(results_dir=self.get_job_path().results_folder),
self._make_nsys_command_prefix(results_dir=self.get_job_path().results_folder),
self._make_nemo_call_string(stage_cfg_path),
]
core_command_string = " ".join([c for c in core_command if c])
command_groups[0] += [core_command_string]
command_groups = clean_command_groups(command_groups)
return command_groups
def _make_nemo_call_string(self, stage_cfg_path: Path) -> str:
"""
Make nemo scripts calling command string
This is for current nemo stage's essential nemo script calling.
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command string of nemo script calling
:rtype: str
"""
choice_model_type, choice_name = self.get_stage_config_choice()
code_path = self._get_nemo_code_path(choice_model_type)
hydra_override = self._make_hydra_override()
command = [
f"python3 -u {code_path} ",
f"--config-path={stage_cfg_path.parents[0]}",
f"--config-name={stage_cfg_path.name}",
*hydra_override,
]
command_string = " \\\n ".join(command)
return command_string
def _make_hydra_override(self) -> List:
"""
Override some existing hydra configurations if necessary.
Example use cases are:
1. For bcp cluster, `+rank=\${RANK}` is required running some NeMo scripts.
Existing hydra config doesn't have `rank` field, so we overwrite on the fly.
2. Auto blend training dataset by overwriting empty `model.data.data_prefix` as
`model.data.data_prefix=\$({auto_blend_command})`. Existing `model.data.data_prefix`
could be None in cfg, so we overwrite it in this function.
"""
hydra_override = []
if self.cluster == "bcp":
hydra_override += ["+rank=\${RANK}"]
return hydra_override
def get_env_vars(self) -> Dict:
"""
Set up dictionary for environment variables
The environment variables from hydra config will be set inside the job scripts.
For Example:
Set `env_vars.NVTE_BIAS_DROPOUT_FUSION=1` while calling nemo_launcherlauncher-scripts,
`NVTE_BIAS_DROPOUT_FUSION=1` will be set while running the job.
:return: a dictionary of env vars while running the job.
:rtype: Dict
"""
env_vars = super().get_env_vars()
devices = self.stage_cfg.trainer.get("devices", 1)
if self.cluster != "bcm":
env_vars["SLURM_NTASKS_PER_NODE"] = devices
if self.cluster == "bcp": # Set env prefix as env var on BCP
for env_var_str in [self._cuda_device_max_connections, self._cuda_visible_devices, self._set_ln_sm_margin, self._skip_ag_overlap,]:
if env_var_str:
var_name, var_val = env_var_str.split("=")
env_vars[var_name] = var_val
return env_vars
class Training(NeMoStage):
"""Stage class of pretraining with NeMo scripts"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "training"
self.stage_cfg = cfg.get("training")
def _make_hydra_override(self) -> List:
"""
Override some existing hydra configurations if necessary.
Example use cases are:
1. For bcp cluster, `+rank=\${RANK}` is required running some NeMo scripts.
Existing hydra config doesn't have `rank` field, so we overwrite on the fly.
2. Auto blend training dataset by overwriting empty `model.data.data_prefix` as
`model.data.data_prefix=\$({auto_blend_command})`. Existing `model.data.data_prefix`
could be None in cfg, so we overwrite it in this function.
:return: hydra override string added in nemo script calling
:rtype: str
"""
hydra_override = []
choice_model_type, choice_name = self.get_stage_config_choice()
if self.cluster == "bcp":
hydra_override += ["+rank=\${RANK}"]
if self.stage_cfg.model.data.get("data_prefix", None) is None:
preprocessed_dir = self.stage_cfg.run.get("preprocessed_dir")
blending_alpha = self.stage_cfg.run.get("blending_alpha")
auto_blend_command = (
f"python3 {self._launcher_scripts_path / 'nemo_launcher/collections/auto_blend.py'} "
f"model_type={choice_model_type} "
f"preprocessed_dir={preprocessed_dir} "
f"blending_alpha={blending_alpha}"
)
hydra_override += [f"model.data.data_prefix=\$({auto_blend_command})"]
if self.stage_cfg.model.get("ub_tp_comm_overlap", False):
ub_cfg_name = self._get_ub_cfg_override()
hydra_override += [f"'[email protected]_tp_comm_overlap_cfg={ub_cfg_name}'"]
if self.stage_cfg.model.get("gc_interval", 0) > 1:
gc_interval = min(self.stage_cfg.model.get("gc_interval"), self.cfg.training.trainer.get("val_check_interval"))
hydra_override += [f"model.gc_interval={gc_interval}"]
return hydra_override
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"t5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_pretraining.py",
"mt5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_pretraining.py",
"gpt3": self._nemo_code_path / "examples/nlp/language_modeling/megatron_gpt_pretraining.py",
"bert": self._nemo_code_path / "examples/nlp/language_modeling/megatron_bert_pretraining.py",
}
return model_type_to_code_path[model_type]
def _get_ub_cfg_override(self) -> str:
"""
Spawn the script to search UB configuration file
"""
tp_size = self.stage_cfg.model.get("tensor_model_parallel_size")
hidden_size = self.stage_cfg.model.get("hidden_size")
mb_size = self.stage_cfg.model.get("micro_batch_size")
seqlen = self.stage_cfg.model.get("encoder_seq_length")
cfg_name = f"ub_cfg_\\${{gpu_name:}}_h{hidden_size}_tp{tp_size}_mbs{mb_size}_seqlen{seqlen}"
return cfg_name
class FineTuning(NeMoStage):
"""Stage class of fine-tuning with NeMo scripts"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "fine_tuning"
self.stage_cfg = cfg.get("fine_tuning")
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
super().setup_folder_and_data()
# Prepare fine-tuning dataset
data_dir = self.cfg.get("data_dir")
task_name = self.stage_cfg.run.get("task_name")
# GLUE for internal use
download_glue_script_path = self._launcher_scripts_path / "nemo_launcher/utils/data_utils/download_glue.py"
if download_glue_script_path.exists():
from nemo_launcher.utils.data_utils.download_glue import TASKS_LOWER, download_glue
if task_name in TASKS_LOWER:
download_glue(data_dir=os.path.join(data_dir, "glue_data"), tasks=task_name)
# Prepare dataset for squad
if task_name in ["squad", "xquad"]:
prepare_squad_for_fine_tuning(data_dir=os.path.join(data_dir, "squad_data"))
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"gpt3" : self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_gpt_sft.py",
"t5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py",
"mt5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py",
}
return model_type_to_code_path[model_type]
class PromptLearning(NeMoStage):
"""Stage class of prompt-learning with NeMo scripts"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "prompt_learning"
self.stage_cfg = cfg.get("prompt_learning")
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
# Setup folders
super().setup_folder_and_data()
# Prepare prompt learning dataset
data_dir = self.cfg.get("data_dir")
task_name = self.stage_cfg.run.get("task_name")
# Prepare squad dataset
if task_name == 'squad':
prepare_squad_for_prompt_learning(
os.path.join(data_dir, "prompt_data"), self._launcher_scripts_path,
)
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"gpt3": self._nemo_code_path / "examples/nlp/language_modeling/megatron_gpt_prompt_learning.py",
"t5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_prompt_learning.py",
"mt5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_prompt_learning.py",
}
return model_type_to_code_path[model_type]
class AdapterLearning(PromptLearning):
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "adapter_learning"
self.stage_cfg = cfg.get("adapter_learning")
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"gpt3": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_gpt_adapter_tuning.py",
"t5": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_t5_adapter_tuning.py",
}
return model_type_to_code_path[model_type]
class IA3Learning(PromptLearning):
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "ia3_learning"
self.stage_cfg = cfg.get("ia3_learning")
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
model_type_to_code_path = {
"gpt3": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_gpt_ia3_tuning.py",
"t5": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_t5_ia3_tuning.py",
}
return model_type_to_code_path[model_type]
class Conversion(NemoMegatronStage):
"""Stage class of converting training checkpoints to .nemo format"""
def setup_stage_vars(self, cfg: OmegaConf):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "conversion"
self.stage_cfg = cfg.get("conversion")
def _make_hparams_override_command(self):
"""
Make the command string to override some fields in hparams.yaml file while converting checkpoint into .nemo format
:return: command string for hparams override with the script in collections
:rtype: str
"""
model_cfg = self.stage_cfg.get("model")
hparams_file = model_cfg.get("hparams_file")
vocab_file = model_cfg.get("vocab_file")
merge_file = model_cfg.get("merge_file")
tokenizer_model = model_cfg.get("tokenizer_model")
override_configs = {
"hparams_file": hparams_file,
"output_path": self.get_job_path().results_folder,
"vocab_file": vocab_file,
"merge_file": merge_file,
"tokenizer_model": tokenizer_model,
}
hparams_override = [f"{k}={v}" for k, v in override_configs.items()]
override_command = [
f"python3 -u {self._launcher_scripts_path / 'nemo_launcher/collections/hparams_override.py'}",
*hparams_override,
]
override_command = " \\\n ".join(override_command)
return [override_command]
def _make_checkpoint_search_command(self, **kwargs: Any) -> str:
"""
Make the command string to search for the latest checkpoint inside checkpoint folder
:param Path **kwargs: checkpoint search script's argument override
:return: command string for searching for latest checkpoint with the script in collections
:rtype: str
"""
checkpoint_override = [f"{k}={v}" for k, v in kwargs.items()]
return (
f"python3 {self._launcher_scripts_path / 'nemo_launcher/collections/checkpoint_search.py'} "
f"{' '.join(checkpoint_override)}"
)
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
command_groups = [[], []]
command_groups[0] += self._make_hparams_override_command()
run_cfg = self.stage_cfg.get("run")
model_cfg = self.stage_cfg.get("model")
checkpoint_search_command = self._make_checkpoint_search_command(
checkpoint_folder=model_cfg.get("checkpoint_folder"),
checkpoint_name=model_cfg.get("checkpoint_name"),
tensor_model_parallel_size=model_cfg.get("tensor_model_parallel_size"),
pipeline_model_parallel_size=model_cfg.get("pipeline_model_parallel_size"),
)
command_groups[-1] += [f"export CKPT_NAME=$({checkpoint_search_command})"]
nemo_file_name = run_cfg.get("nemo_file_name")
hparams_override_file = self.get_job_path().results_folder / "hparams_override.yaml"
nemo_file_path = self.get_job_path().results_folder / nemo_file_name
code_path = self._nemo_code_path / "examples/nlp/language_modeling/megatron_ckpt_to_nemo.py"
args = create_args_list(
replace_underscore=False,
gpus_per_node=run_cfg.get("ntasks_per_node"),
model_type=model_cfg.get("model_type"),
checkpoint_folder=model_cfg.get("checkpoint_folder"),
checkpoint_name="\${CKPT_NAME}",
hparams_file=hparams_override_file,
nemo_file_path=nemo_file_path,
tensor_model_parallel_size=model_cfg.get("tensor_model_parallel_size"),
pipeline_model_parallel_size=model_cfg.get("pipeline_model_parallel_size"),
)
if model_cfg.get("pipeline_model_parallel_split_rank") is not None:
args += create_args_list(
replace_underscore=False,
pipeline_model_parallel_split_rank=model_cfg.get("pipeline_model_parallel_split_rank"),
)
args += ["--bcp"] if self.cluster == "bcp" else []
core_command = [f"python3 -u {code_path}", *args]
core_command_string = " \\\n ".join(core_command)
command_groups[-1] += [core_command_string]
command_groups = clean_command_groups(command_groups)
return command_groups
class NeMoEvaluation(NeMoStage):
"""
Stage class of gpt3/t5/mt5 evaluation with NeMo scripts
Including: fine-tuning eval, prompt-learning eval, adapter/ia3 learning eval
"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "evaluation"
self.stage_cfg = cfg.get("evaluation")
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
command_groups = super().make_stage_command_groups(stage_cfg_path)
choice_model_type, choice_name = self.get_stage_config_choice()
if any([choice_model_type.startswith(type) for type in ["prompt", "ia3", "adapter"]]):
pred_file_path = self.stage_cfg.get("pred_file_path")
ground_truth_file_path = self.stage_cfg.get("ground_truth_file_path")
code_path = (
self._launcher_scripts_path / "nemo_launcher/collections/metric_calculation/squad_metric_calc.py"
)
args = create_args_list(pred=pred_file_path, ground_truth=ground_truth_file_path,)
split_string = self.stage_cfg.get("split_string", None)
if split_string:
args += create_args_list(split_string=f"'{split_string}'")
calculation_command = [f"python3 {code_path}", *args]
calculation_command = " \\\n ".join(calculation_command)
elif choice_name == "squad":
output_file_path_prefix = self.stage_cfg.model.data.validation_ds.get("output_file_path_prefix")
pred_file_path = output_file_path_prefix + "_validation_dataloader0_inputs_preds_labels.jsonl"
ground_truth_file_path = self.stage_cfg.model.data.validation_ds.get("ground_truth_file_path")
code_path = (
self._launcher_scripts_path / "nemo_launcher/collections/metric_calculation/fine_tuning_metric_calc.py"
)
args = create_args_list(
replace_underscore=False,
pred_file=pred_file_path,
target_file=ground_truth_file_path,
squad_eval_script_path=self._launcher_scripts_path
/ "nemo_launcher/collections/metric_calculation/squad_metric_calc.py",
)
calculation_command = [f"python3 {code_path}", *args]
calculation_command = " \\\n ".join(calculation_command)
else:
calculation_command = None
if calculation_command is not None:
command_groups += [[calculation_command]]
return command_groups
def _get_nemo_code_path(self, model_type: str) -> Path:
"""
Provide the essential nemo code path for running the stage, usually different model types use different nemo scripts.
For example, `megatron_t5_pretraining.py` for t5 and `megatron_gpt_pretraining.py` for gpt3.
:param str model_type: i.e. `gpt3`, `t5`, `mt5`, etc.
:return: path current stage's essential nemo scripts code
:rtype: Path
"""
if model_type in ["gpt3", "prompt_gpt3"]:
raise ValueError("Evaluating GPT-3 models needs `EvalHarnessEvaluation` class.")
model_type_to_code_path = {
"t5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py",
"mt5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py",
"prompt_t5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_prompt_learning_eval.py",
"prompt_mt5": self._nemo_code_path / "examples/nlp/language_modeling/megatron_t5_prompt_learning_eval.py",
"ia3_t5": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_t5_ia3_eval.py",
"ia3_gpt3": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_gpt_ia3_eval.py",
"adapter_t5": self._nemo_code_path / "examples/nlp/language_modeling/tuning/megatron_t5_adapter_eval.py",
"adapter_gpt3": self._nemo_code_path
/ "examples/nlp/language_modeling/tuning/megatron_gpt_adapter_eval.py",
}
return model_type_to_code_path[model_type]
class EvalHarnessEvaluation(NemoMegatronStage):
"""Stage class of gpt-3 evaluation harness"""
def __init__(self, cfg):
super().__init__(cfg)
choice_model_type, choice_name = self.get_stage_config_choice()
self.prompt_evaluation = choice_model_type == "prompt_gpt3"
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "evaluation"
self.stage_cfg = cfg.get("evaluation")
def _make_download_command_string(self) -> str:
"""
Make dataset download command for evaluation harness.
:return: command string of downloading evaluation data
:rtype: str
"""
data_dir = self.cfg.get("data_dir")
cache_dir = os.path.join(data_dir, "eval_harness_data")
run_cfg = self.stage_cfg.get("run")
tasks = run_cfg.get("tasks")
code_path = self._launcher_scripts_path / "nemo_launcher/collections/eval_harness/download.py"
args = create_args_list(tasks=tasks, cache_dir=cache_dir,)
download_command = [f"python3 {code_path}", *args]
download_command_string = " \\\n ".join(download_command)
return download_command_string
def make_stage_command_groups(self, stage_cfg_path: Path) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
if self.prompt_evaluation:
command_groups = [[]]
else:
command_groups = [[], []]
command_groups[0] += [self._make_download_command_string()]
data_dir = self.cfg.get("data_dir")
cache_dir = os.path.join(data_dir, "eval_harness_data")
run_cfg = self.stage_cfg.get("run")
model_cfg = self.stage_cfg.get("model")
code_path = self._launcher_scripts_path / "nemo_launcher/collections/eval_harness/evaluate.py"
args = create_args_list(
replace_underscore=False,
name=run_cfg.get("name"),
model=model_cfg.get("model_type"),
tasks=run_cfg.get("tasks"),
cache_dir=cache_dir,
output_path=self.get_job_path().results_folder,
batch_size=model_cfg.get("eval_batch_size"),
tensor_model_parallel_size=model_cfg.get("tensor_model_parallel_size"),
pipeline_model_parallel_size=model_cfg.get("pipeline_model_parallel_size"),
precision=model_cfg.get("precision"),
)
if self.prompt_evaluation:
args += create_args_list(
replace_underscore=False,
nemo_model=model_cfg.get("nemo_model"),
prompt_dataset_paths=model_cfg.get("prompt_dataset_paths"),
)
else:
# GPT evaluation
args += create_args_list(
replace_underscore=False,
vocab_file=model_cfg.get("vocab_file"),
merge_file=model_cfg.get("merge_file"),
nemo_model=model_cfg.get("nemo_model"),
checkpoint_folder=model_cfg.get("checkpoint_folder"),
checkpoint_name=model_cfg.get("checkpoint_name"),
hparams_file=model_cfg.get("hparams_file"),
)
core_command = [f"python3 -u {code_path}", *args]
core_command_string = " \\\n ".join(core_command)
command_groups[-1] += [core_command_string]
command_groups = clean_command_groups(command_groups)
return command_groups
def clean_command_groups(command_groups: List[List[str]]) -> List[List[str]]:
"""
Remove empty command group in command groups
:param List[List[str]] command_groups: command groups is a list of command group
:return: cleaned command groups
:rtype: List[List[str]]
"""
for ind, command_group in enumerate(command_groups):
command_groups[ind] = [c for c in command_group if c]
return command_groups
def _hydra_interpolation(cfg: OmegaConf) -> None:
"""
Interpolate hydra config values in cfg object, bypassing lazy interpolation
:param OmegaConf cfg: OmegaConf object with the config to be interpolated
:return: None
"""
def interpolate(cfg: OmegaConf):
if isinstance(cfg, omegaconf.dictconfig.DictConfig):
for k, v in cfg.items():
cfg[k] = interpolate(v)
elif isinstance(cfg, omegaconf.listconfig.ListConfig):
for i, v in enumerate(cfg):
cfg[i] = interpolate(v)
return cfg
interpolate(cfg)
def create_args_list(hydra: bool = False, replace_underscore: bool = True, **kwargs: Any,) -> List[str]:
"""
An easy tool function to convert arguments into a list of argument strings.
For example, `create_args_list(a=123, b=456)` will generate `['--a=123', '--b=456']`.
:param bool hydra: Either a hydra argument or regular argument, `--` will be added to regular arguments
:param bool replace_underscore: Whether to replace `_` with `-` in arguments' names.
:params Any **kwargs: argument name and their value
:return: A list of argument strings, e.g. `['--a=123', '--b=456', ...]`
:rtype: List[str]
"""
args = []
for k, v in kwargs.items():
if hydra:
args.append(f"{k}={v}")
else:
# use "store_true" to add keys only args
if replace_underscore:
k = k.replace("_", "-")
args.append(f"--{k}" if v == "store_true" else f"--{k}={v}")
return args
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/stages.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
from pathlib import Path
from typing import Dict, List
from nemo_launcher.core.launchers import AutoLauncher
from nemo_launcher.core.stages import NemoMegatronStage, clean_command_groups
FT_PATH = Path("/opt/FasterTransformer")
FT_BACKEND_PATH = Path("/opt/fastertransformer_backend")
# for debugging
FT_PATH_WITH_BUILD = FT_PATH
FT_PATH = Path(os.environ.get("FT_PATH", FT_PATH))
class Export(NemoMegatronStage):
"""
Stage is a FasterTransformer export stage.
It includes two steps:
- NeMo to FasterTransformer checkpoint export.
- Triton model configuration.
"""
def setup_stage_vars(self, cfg):
"""Setup the stage vars, i.e. stage name and stage cfg"""
self.stage_name = "export"
self.stage_cfg = cfg.get("export")
def _make_checkpoint_search_command(self, **kwargs):
checkpoint_override = [f"{k}={v}" for k, v in kwargs.items()]
return (
f"python3 {self._launcher_scripts_path / 'nemo_launcher/collections/checkpoint_search.py'} "
f"{' '.join(checkpoint_override)}"
)
def make_stage_command_groups(self, stage_cfg_path, sub_stage=None,) -> List[List[str]]:
"""
Make the command groups for current stage
Command groups is a list of command group. A command group is defined as:
0. Command group is a list of command strings
1. Each command group occupies one bcprun, srun or bash
2. Each command group eventually has multiple commands connected by ";"
:param Path stage_cfg_path: path to interpolated and saved configuration
:return: command groups for current stage
:rtype: List[List[str]]
"""
command_groups = [[]]
command_groups[0] += self._make_sub_stage_command(sub_stage)
command_groups = clean_command_groups(command_groups)
return command_groups
def _make_sub_stage_command(self, sub_stage):
"""
Make the command group for current stage
It occupies one bcprun, srun or bash.
:return: command group for current stage
:rtype: List[List[str]]
"""
choice_model_type, choice_name = self.get_stage_config_choice()
cmds_fn = {
"convert": {
"gpt3": self._get_gpt_conversion_cmds,
"t5": self._get_t5_conversion_cmds,
"mt5": self._get_t5_conversion_cmds,
},
}[sub_stage][choice_model_type]
return cmds_fn(self.cfg)
def _make_sub_stages(self):
sub_stages = ["convert"]
return sub_stages
def setup_folder_and_data(self) -> None:
"""Setup job/data folders and fine-tuning/prompt-learning dataset"""
"""Setup required folders and dataset"""
job_path = self.get_job_path()
job_path.folder.mkdir(parents=True, exist_ok=True)
results_folder = job_path.results_folder
results_folder.mkdir(parents=True, exist_ok=True)
def run(self) -> str:
"""Execute export stage"""
# Setup folders and datasets
self.setup_folder_and_data()
sub_stages = self._make_sub_stages()
job_id = ""
for sub_stage in sub_stages:
# Save stage hydra config
job_path = self.get_job_path(sub_stage)
job_path.folder.mkdir(parents=True, exist_ok=True)
stage_cfg_path = NemoMegatronStage.save_stage_hydra_config(self.stage_cfg, job_path)
if job_id:
dependency = f"aftercorr:{job_id}"
self.stage_cfg["run"]["dependency"] = dependency
# Make cluster parameters
cluster_parameters = self._make_cluster_parameters(self.cluster, sub_stage)
# Make command groups
command_groups = self.make_stage_command_groups(stage_cfg_path, sub_stage)
# Create launcher
launcher = AutoLauncher(folder=job_path.folder, cluster=self.cluster, **cluster_parameters,)
job_id = launcher.launch(command_groups=command_groups)
return job_id
def _make_cluster_parameters(self, cluster: str, sub_stage=None,) -> Dict:
"""Prepare cluster configuration"""
cfg = self.cfg
stage_cfg = self.stage_cfg
ft_model_cfg = stage_cfg.get("model")
triton_cfg = stage_cfg.get("triton_deployment")
run_cfg = stage_cfg.get("run")
job_name = run_cfg.get("name")
time_limit = run_cfg.get("time_limit")
dependency = run_cfg.get("dependency")
container_image = cfg.get("container")
container_mounts = self._make_container_mounts_string()
num_tasks = ft_model_cfg.tensor_model_parallel_size * triton_cfg.pipeline_model_parallel_size
nodes = 1
ntasks_per_node = 1
setup = None
env_vars = self.get_env_vars()
if env_vars:
setup = [f"export {k}={v}" for k, v in env_vars.items()]
cluster_parameters = {}
shared_parameters = {
"job_name": job_name,
"nodes": nodes,
"time": time_limit,
"ntasks_per_node": ntasks_per_node,
"setup": setup,
}
if cluster == "bcm":
cluster_cfg = cfg.get("cluster")
slurm_cfg = {**copy.deepcopy(cluster_cfg)}
job_name_prefix = slurm_cfg.pop("job_name_prefix")
cluster_parameters = {**slurm_cfg}
cluster_parameters.update(
{
**shared_parameters,
"dependency": dependency,
"container_image": container_image,
"container_mounts": container_mounts,
}
)
cluster_parameters["job_name"] = job_name_prefix + cluster_parameters["job_name"]
elif cluster == "bcp":
cluster_parameters.update(
{**shared_parameters, "env_vars": env_vars,}
)
elif cluster == "interactive":
cluster_parameters.update(shared_parameters)
return cluster_parameters
def _get_gpt_conversion_cmds(self, cfg):
""" Generate export commands for GPT-3 models"""
run_cfg = cfg.export.run
ft_model_cfg = cfg.export.model
triton_cfg = cfg.export.triton_deployment
tokenizer_cfg = cfg.training.model.tokenizer
checkpoint_path = ft_model_cfg.checkpoint_path
triton_model_dir = triton_cfg.triton_model_dir
nemo_megatron_scripts_path = Path(cfg.launcher_scripts_path)
converter_path = FT_PATH / "examples/pytorch/gpt/utils/nemo_ckpt_convert.py"
prepare_model_config_script_path = (
nemo_megatron_scripts_path / "nemo_launcher/collections/export_scripts/prepare_triton_model_config.py"
)
template_path = FT_BACKEND_PATH / "all_models/gpt/fastertransformer/config.pbtxt"
triton_model_version_dir = f"{triton_model_dir}/1"
convert_cmd = (
f"python -u {converter_path} \\\n"
f" --in-file {checkpoint_path} \\\n"
f" --saved-dir {triton_model_version_dir} \\\n"
f" --infer-gpu-num {ft_model_cfg.tensor_model_parallel_size} \\\n"
f" --weight-data-type {ft_model_cfg.weight_data_type} \\\n"
f" --vocab-path {tokenizer_cfg.vocab_file} \\\n"
f" --merges-path {tokenizer_cfg.merge_file} \\\n"
f" --processes {ft_model_cfg.processes} \\\n"
f" --load-checkpoints-to-cpu {int(ft_model_cfg.load_checkpoints_to_cpu)}"
)
triton_prepare_model_config_cmd = (
f"python -u {prepare_model_config_script_path} \\\n"
f" --model-train-name {run_cfg.model_train_name} \\\n"
f" --template-path {template_path} \\\n"
f" --ft-checkpoint {triton_model_version_dir}/{ft_model_cfg.tensor_model_parallel_size}-gpu \\\n"
f" --config-path {triton_model_dir}/config.pbtxt \\\n"
f" --max-batch-size {triton_cfg.max_batch_size} \\\n"
f" --pipeline-model-parallel-size {triton_cfg.pipeline_model_parallel_size} \\\n"
f" --tensor-model-parallel-size {ft_model_cfg.tensor_model_parallel_size} \\\n"
f" --data-type {triton_cfg.data_type}"
)
if triton_cfg.int8_mode:
triton_prepare_model_config_cmd += " \\\n --int8-mode"
if triton_cfg.enable_custom_all_reduce:
triton_prepare_model_config_cmd += " \\\n --enable-custom-all-reduce"
return [
(
f"export PYTHONPATH={FT_PATH}:${{PYTHONPATH}} && \\\n"
+ f"rm -rf {triton_model_dir} && \\\n" # to not mix old and newly generated FT checkpoint files
+ f"{convert_cmd} && \\\n"
+ triton_prepare_model_config_cmd
)
]
def _get_t5_conversion_cmds(self, cfg):
""" Generate export commands for T5/mT5 models"""
run_cfg = cfg.export.run
ft_model_cfg = cfg.export.model
triton_cfg = cfg.export.triton_deployment
checkpoint_path = ft_model_cfg.checkpoint_path
triton_model_dir = triton_cfg.triton_model_dir
nemo_megatron_scripts_path = Path(cfg.launcher_scripts_path)
converter_path = FT_PATH / "examples/pytorch/t5/utils/nemo_t5_ckpt_convert.py"
prepare_model_config_script_path = (
nemo_megatron_scripts_path / "nemo_launcher/collections/export_scripts/prepare_triton_model_config.py"
)
template_path = FT_BACKEND_PATH / "all_models/t5/fastertransformer/config.pbtxt"
triton_model_version_dir = f"{triton_model_dir}/1"
convert_cmd = (
f"python -u {converter_path} \\\n"
f" --in-file {checkpoint_path} \\\n"
f" --saved-dir {triton_model_version_dir} \\\n"
f" --model-name {run_cfg.model_train_name} \\\n"
f" --infer-gpu-num {ft_model_cfg.tensor_model_parallel_size} \\\n"
f" --weight-data-type {ft_model_cfg.weight_data_type} \\\n"
f" --processes {ft_model_cfg.processes}"
)
triton_prepare_model_config_cmd = (
f"python -u {prepare_model_config_script_path} \\\n"
f" --model-train-name {run_cfg.model_train_name} \\\n"
f" --template-path {template_path} \\\n"
f" --ft-checkpoint {triton_model_version_dir}/{ft_model_cfg.tensor_model_parallel_size}-gpu \\\n"
f" --config-path {triton_model_dir}/config.pbtxt \\\n"
f" --max-batch-size {triton_cfg.max_batch_size} \\\n"
f" --pipeline-model-parallel-size {triton_cfg.pipeline_model_parallel_size} \\\n"
f" --tensor-model-parallel-size {ft_model_cfg.tensor_model_parallel_size} \\\n"
f" --data-type {triton_cfg.data_type}"
)
if triton_cfg.int8_mode:
triton_prepare_model_config_cmd += " \\\n --int8-mode"
if triton_cfg.enable_custom_all_reduce:
triton_prepare_model_config_cmd += " \\\n --enable-custom-all-reduce"
return [
(
f"export PYTHONPATH={FT_PATH}:${{PYTHONPATH}} && \\\n"
+ f"rm -rf {triton_model_dir} && \\\n" # to not mix old and newly generated FT checkpoint files
+ f"{convert_cmd} && \\\n"
+ triton_prepare_model_config_cmd
)
]
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/core/export_stages.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import select
import subprocess
import sys
from pathlib import Path
from typing import IO, Any, Dict, List, Optional, Tuple, Union
class JobPaths:
"""Creates paths related to the slurm job and its submission"""
def __init__(self, folder: Union[Path, str], job_name: str,) -> None:
self._folder = Path(folder).expanduser().absolute()
self._job_name = job_name
@property
def folder(self) -> Path:
return self._folder
@property
def results_folder(self) -> Path:
return self._folder / 'results'
@property
def submission_file(self) -> Path:
return Path(self.folder / f"{self._job_name}_submission.sh")
@property
def config_file(self) -> Path:
return Path(self.folder / f"{self._job_name}_hydra.yaml")
@property
def stderr(self) -> Path:
return Path(self.folder / f"log-{self._job_name}_%j.err")
@property
def stdout(self) -> Path:
return Path(self.folder / f"log-{self._job_name}_%j.out")
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.folder})"
class CommandFunction:
"""
Wraps a command as a function in order to make sure it goes through the
pipeline and notify when it is finished.
The output is a string containing everything that has been sent to stdout.
WARNING: use CommandFunction only if you know the output won't be too big !
Otherwise use subprocess.run() that also streams the outputto stdout/stderr.
:param list command: command to run, as a list
:param bool verbose: prints the command and stdout at runtime
:param bool ret_stdout: whether to return entire stdout
:param Path/str cwd: path to the location where the command must run from
:return: Everything that has been sent to stdout if `ret_stdout == True`
:rtype: str
"""
def __init__(
self,
command: List[str],
verbose: bool = True,
ret_stdout: bool = True,
cwd: Optional[Union[str, Path]] = None,
env: Optional[Dict[str, str]] = None,
) -> None:
if not isinstance(command, list):
raise TypeError("The command must be provided as a list")
self.command = command
self.verbose = verbose
self.ret_stdout = ret_stdout
self.cwd = None if cwd is None else str(cwd)
self.env = env
def __call__(self, *args: Any, **kwargs: Any) -> str:
"""Call the cammand line with addidional arguments
The keyword arguments will be sent as --{key}={val}
The logs bufferized. They will be printed if the job fails, or sent as output of the function
Errors are provided with the internal stderr.
"""
full_command = (
self.command + [str(x) for x in args] + [f"--{x}={y}" for x, y in kwargs.items()]
) # TODO bad parsing
if self.verbose:
print(f"The following command is sent: \"{' '.join(full_command)}\"")
if self.ret_stdout:
with subprocess.Popen(
full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, cwd=self.cwd, env=self.env,
) as process:
stdout_buffer = io.StringIO()
stderr_buffer = io.StringIO()
try:
copy_process_streams(process, stdout_buffer, stderr_buffer, self.verbose)
except Exception as e:
process.kill()
process.wait()
raise OSError("Job got killed for an unknown reason.") from e
stdout = stdout_buffer.getvalue().strip()
stderr = stderr_buffer.getvalue().strip()
retcode = process.wait()
if stderr and (retcode and not self.verbose):
# We don't print is self.verbose, as it already happened before.
print(stderr, file=sys.stderr)
if retcode:
subprocess_error = subprocess.CalledProcessError(
retcode, process.args, output=stdout, stderr=stderr
)
raise OSError(stderr) from subprocess_error
return stdout
subprocess.Popen(full_command, shell=False, cwd=self.cwd, env=self.env,).wait()
return ""
# pylint: disable=too-many-locals
def copy_process_streams(process: subprocess.Popen, stdout: io.StringIO, stderr: io.StringIO, verbose: bool = False):
"""
Reads the given process stdout/stderr and write them to StringIO objects.
Make sure that there is no deadlock because of pipe congestion.
If `verbose` the process stdout/stderr are also copying to the interpreter stdout/stderr.
"""
def raw(stream: Optional[IO[bytes]]) -> IO[bytes]:
assert stream is not None
if isinstance(stream, io.BufferedIOBase):
stream = stream.raw
return stream
p_stdout, p_stderr = raw(process.stdout), raw(process.stderr)
stream_by_fd: Dict[int, Tuple[IO[bytes], io.StringIO, IO[str]]] = {
p_stdout.fileno(): (p_stdout, stdout, sys.stdout),
p_stderr.fileno(): (p_stderr, stderr, sys.stderr),
}
fds = list(stream_by_fd.keys())
poller = select.poll()
for fd in stream_by_fd:
poller.register(fd, select.POLLIN | select.POLLPRI)
while fds:
# `poll` syscall will wait until one of the registered file descriptors has content.
ready = poller.poll()
for fd, _ in ready:
p_stream, string, std = stream_by_fd[fd]
raw_buf = p_stream.read(2 ** 16)
if not raw_buf:
fds.remove(fd)
poller.unregister(fd)
continue
buf = raw_buf.decode()
string.write(buf)
string.flush()
if verbose:
std.write(buf)
std.flush()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/job_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from shutil import which
import requests
import tqdm
import zstandard as zstd
def download_single_file(url, save_dir, file_name=None):
os.makedirs(save_dir, exist_ok=True)
if file_name is None:
file_name = os.path.basename(url)
save_path = os.path.join(save_dir, file_name)
if os.path.exists(save_path):
print(f"File {save_path} already exists, skipping download.")
return save_path
with requests.get(url, stream=True) as read_file, open(save_path, "wb") as write_file:
total_length = int(read_file.headers.get("content-length"))
with tqdm.tqdm(total=total_length, unit="B", unit_scale=True, desc=file_name,) as pbar:
update_len = 0
for chunk in read_file.iter_content(chunk_size=8192):
if chunk:
write_file.write(chunk)
update_len += len(chunk)
if update_len >= 1000000:
pbar.update(update_len)
update_len = 0
return save_path
def extract_single_zst_file(input_path, save_dir, file_name, rm_input=False):
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, file_name)
if os.path.exists(save_path):
print(f"File {save_path} already exists, skipping extraction.")
return save_path
total_length = os.stat(input_path).st_size
with tqdm.tqdm(total=total_length, unit="B", unit_scale=True, desc=file_name,) as pbar:
dctx = zstd.ZstdDecompressor()
read_size = 131075
write_size = int(read_size * 4)
save_path = os.path.join(save_dir, file_name)
update_len = 0
with open(input_path, "rb") as in_f, open(save_path, "wb") as out_f:
for chunk in dctx.read_to_iter(in_f, read_size=read_size, write_size=write_size):
out_f.write(chunk)
update_len += read_size
if update_len >= 3000000:
pbar.update(update_len)
update_len = 0
if rm_input:
os.remove(input_path)
def convert_file_numbers(file_numbers_str):
final_list = []
split_comma = file_numbers_str.split(",")
for elem in split_comma:
if elem == "":
continue
if "-" in elem:
split_dash = elem.split("-")
final_list += list(range(int(split_dash[0]), int(split_dash[1]) + 1))
else:
final_list.append(int(elem))
return final_list
def split_list(inlist, ngroups):
"""Splits list into groups.
inlist = list(range(18)) # given list
ngroups = 5 # desired number of parts
Returns: [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9], [10, 11, 12, 13],
[14, 15, 16, 17]]
"""
nlen = len(inlist)
list_groups = []
for ii in range(ngroups):
idx_start = (ii * nlen) // ngroups
idx_end = ((ii + 1) * nlen) // ngroups
list_groups.append(inlist[idx_start:idx_end])
return list_groups
def is_tool(progname):
"""Check whether `name` is on PATH and marked as executable."""
# https://stackoverflow.com/a/34177358/3457624
return which(progname) is not None
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/file_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/data_utils/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import time
from nemo_launcher.utils.file_utils import download_single_file
NEMO_LAUNCHER_CI = os.getenv("NEMO_LAUNCHER_CI", "False").lower() in ("true", "t", "1")
VERSIONS = ["v1.1", "v2.0", "xquad"]
VERSION2PATHS = {
"v1.1": [
"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
],
"v2.0": [
"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json",
"https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json",
],
"xquad": [
f"https://raw.githubusercontent.com/deepmind/xquad/master/xquad.{lang}.json"
for lang in ["en", "es", "de", "el", "ru", "tr", "ar", "vi", "th", "zh", "hi"]
],
}
def download_squad(data_dir, versions):
os.makedirs(data_dir, exist_ok=True)
for v in versions:
if os.path.exists(os.path.join(data_dir, v)):
print(f"Skipped downloading SQuAD {v}. Already exists.")
# download might not finish in time in CI
if NEMO_LAUNCHER_CI:
time.sleep(5)
continue
print(f"Downloading SQuAD {v}...")
for url in VERSION2PATHS[v]:
download_single_file(url, os.path.join(data_dir, v))
print("\tCompleted!")
def get_versions(requested_versions):
requested_versions = requested_versions.split(",")
if "all" in requested_versions:
versions = VERSIONS
else:
versions = []
for v in requested_versions:
if v.lower() in VERSIONS:
versions.append(v)
else:
raise ValueError(f"SQuAD version \"{v}\" not found!")
versions = set(versions)
return list(versions)
def main(arguments):
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", help="directory to save data to", type=str, default="squad_data")
parser.add_argument(
"--versions",
help="SQuAD versions (v1.1, v2.0 or xquad) to download data for as a comma separated string",
type=str,
default="all",
)
args = parser.parse_args(arguments)
versions = get_versions(args.versions)
download_squad(args.data_dir, versions)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/data_utils/download_squad.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from .download_squad import download_squad
NEMO_LAUNCHER_CI = os.getenv("NEMO_LAUNCHER_CI", "False").lower() in ("true", "t", "1")
def prepare_squad_for_prompt_learning(data_dir, launcher_scripts_path):
squad_dir = data_dir
download_squad(squad_dir, ["v1.1"])
squad_v1_dir = os.path.join(squad_dir, "v1.1")
preprocess_script = launcher_scripts_path / "nemo_launcher/utils/data_utils/prompt_learning_squad_preprocessing.py"
os.system(f"python3 {preprocess_script} " f"--data-dir={squad_v1_dir} ")
def prepare_squad_for_fine_tuning(data_dir):
squad_dir = data_dir
download_squad(squad_dir, ["v1.1", "xquad"])
squad_v1_dir = os.path.join(squad_dir, "v1.1")
squad_xquad_dir = os.path.join(squad_dir, "xquad")
path2dev = {
**{f"{squad_v1_dir}/train-v1.1.json": False, f"{squad_v1_dir}/dev-v1.1.json": True,},
**{
f"{squad_xquad_dir}/xquad.{lang}.json": True
for lang in ["en", "es", "de", "el", "ru", "tr", "ar", "vi", "th", "zh", "hi"]
},
}
for path, dev in path2dev.items():
if (not os.path.exists(f"{os.path.splitext(path)[0]}_src.txt") or
not os.path.exists(f"{os.path.splitext(path)[0]}_tgt.txt") or
not os.path.exists(f"{os.path.splitext(path)[0]}_gpt.json")
):
preprocess_squad_for_fine_tuning(
fname=path, out_fname_prefix=os.path.splitext(path)[0], dev=dev,
)
def preprocess_squad_for_fine_tuning(fname, out_fname_prefix, dev=False):
x = json.load(open(fname, encoding='utf8'))
print(f"Preprocessing \"{fname}\" for fine-tuning...")
if (os.path.exists(f'{out_fname_prefix}_src.txt') and
os.path.exists(f'{out_fname_prefix}_tgt.txt') and
os.path.exists(f'{out_fname_prefix}_gpt.json')):
print(f"Skipped! Fine-tuning data existed at \"{out_fname_prefix}*.txt\"")
if NEMO_LAUNCHER_CI:
time.sleep(5)
return
with open(f'{out_fname_prefix}_src.txt', 'w') as f_src, open(f'{out_fname_prefix}_tgt.txt', 'w') as f_tgt, open(f'{out_fname_prefix}_gpt.json', 'w') as f_gpt:
for i in x['data']:
title = i['title'].replace('\n', '\\n')
for j in i['paragraphs']:
context = j['context'].replace('\n', '\\n')
for k in j['qas']:
question = k['question'].replace('\n', '\\n')
if len(k['answers']) > 0:
if dev:
answer = k['answers'][0]['text'].replace('\n', '\\n')
f_src.write(f"Title: {title} Paragraph: {context} Question: {question}\n")
f_tgt.write(f"{answer}\n")
input_text = f"{question} {title} Paragraph: {context}"
gpt_sample = {"input" : input_text, "output" : answer}
gpt_sample = json.dumps(gpt_sample)
f_gpt.write(f"{gpt_sample}\n")
else:
for a in k['answers']:
answer = a['text'].replace('\n', '\\n')
f_src.write(f"Title: {title} Paragraph: {context} Question: {question}\n")
f_tgt.write(f"{answer}\n")
input_text = f"{question} {title} Paragraph: {context}"
gpt_sample = {"input" : input_text, "output" : answer}
gpt_sample = json.dumps(gpt_sample)
f_gpt.write(f"{gpt_sample}\n")
print(f"Completed! Fine-tuning data saved at:")
print(f"- \"{out_fname_prefix}_src.txt\"")
print(f"- \"{out_fname_prefix}_tgt.txt\"")
print(f"- \"{out_fname_prefix}_gpt.txt\"")
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/data_utils/prepare_squad.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import time
from tqdm import tqdm
"""
Dataset preprocessing script for the SQuAD dataset: https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
data-dir: (str) The directory where the squad dataset was downloaded, files will be saved here
train-file: (str) Name of train set file, either train-v1.1.json or train-v2.0.json
dev-file: (str) Name of dev set file, either dev-v1.1.json or dev-v2.0.json
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'squad' for
example, the files would be saved as squad_train.jsonl, squad_val.jsonl, and squad_test.jsonl
include-topic-name: Whether to include the topic name for the paragraph in the data json. See the squad explaination
below for more context on what is ment by 'topic name'.
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
Saves train, val, and test files for the SQuAD dataset. The val and test splits are the same data, because the given test
split lacks ground truth answers.
An example of the processed output written to file:
{
"taskname": "squad",
"context": "Red is the traditional color of warning and danger. In the Middle Ages, a red flag announced that the defenders of a town or castle would fight to defend it, and a red flag hoisted by a warship meant they would show no mercy to their enemy. In Britain, in the early days of motoring, motor cars had to follow a man with a red flag who would warn horse-drawn vehicles, before the Locomotives on Highways Act 1896 abolished this law. In automobile races, the red flag is raised if there is danger to the drivers. In international football, a player who has made a serious violation of the rules is shown a red penalty card and ejected from the game.",
"question": "What did a red flag signal in the Middle Ages?",
"answer": " defenders of a town or castle would fight to defend it"
},
"""
NEMO_LAUNCHER_CI = os.getenv("NEMO_LAUNCHER_CI", "False").lower() in ("true", "t", "1")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default=".")
parser.add_argument("--train-file", type=str, default="train-v1.1.json")
parser.add_argument("--dev-file", type=str, default="dev-v1.1.json")
parser.add_argument("--save-name-base", type=str, default="squad")
parser.add_argument("--include-topic-name", action='store_true')
parser.add_argument("--random-seed", type=int, default=1234)
args = parser.parse_args()
train_data_dict = json.load(open(f"{args.data_dir}/{args.train_file}"))
dev_data_dict = json.load(open(f"{args.data_dir}/{args.dev_file}"))
train_data = train_data_dict['data']
val_data = dev_data_dict['data']
save_name_base = f"{args.data_dir}/{args.save_name_base}"
process_data(train_data, val_data, save_name_base, args.include_topic_name)
def process_data(train_data, val_data, save_name_base, include_topic):
train_set = extract_questions(train_data, include_topic, split="train")
val_set = extract_questions(val_data, include_topic, split="val")
test_set = extract_questions(val_data, include_topic, split="test")
gen_file(train_set, save_name_base, 'train')
gen_file(val_set, save_name_base, 'val')
gen_file(test_set, save_name_base, 'test', make_ground_truth=True)
gen_file(test_set, save_name_base, 'test', make_ground_truth=False)
def extract_questions(data, include_topic, split):
processed_data = []
# Iterate over topics, want to keep them seprate in train/val/test splits
for question_group in data:
processed_topic_data = []
topic = question_group['title']
questions = question_group['paragraphs']
# Iterate over paragraphs related to topics
for qa_group in questions:
context = qa_group['context']
qas = qa_group['qas']
# Iterate over questions about paragraph
for qa in qas:
question = qa['question']
try:
# Dev set has multiple right answers. Want all possible answers in test split ground truth
if split == "test":
answers = [qa['answers'][i]['text'] for i in range(len(qa['answers']))]
# Choose one anser from dev set if making validation split, train set only has one answer
else:
answers = qa['answers'][0]["text"]
except IndexError:
continue
example_json = {"taskname": "squad", "context": context, "question": question, "answer": answers}
if include_topic:
example_json["topic"] = topic
processed_topic_data.append(example_json)
processed_data.extend(processed_topic_data)
return processed_data
def gen_file(data, save_name_base, split_type, make_ground_truth=False):
save_path = f"{save_name_base}_{split_type}.jsonl"
if make_ground_truth:
save_path = f"{save_name_base}_{split_type}_ground_truth.jsonl"
if os.path.exists(save_path):
print(f"Skipped! {split_type} split exists in {save_path}")
if NEMO_LAUNCHER_CI:
time.sleep(5)
return
print(f"Saving {split_type} split to {save_path}")
with open(save_path, 'w') as save_file:
for example_json in tqdm(data):
# Dont want labels in the test set
if split_type == "test" and not make_ground_truth:
del example_json["answer"]
save_file.write(json.dumps(example_json) + '\n')
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/utils/data_utils/prompt_learning_squad_preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import functools
import itertools
import operator
import os
import pathlib
import re
import pynvml
class Device:
# assume nvml returns list of 64 bit ints
_nvml_bit_affinity = 64
_nvml_affinity_elements = (os.cpu_count() + _nvml_bit_affinity - 1) // _nvml_bit_affinity
def __init__(self, device_idx):
if "CUDA_VISIBLE_DEVICES" in os.environ:
remapped_device_ids = [int(id) for id in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
device_idx = remapped_device_ids[device_idx]
try:
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
except Exception as ex:
msg = f'Unable to get NVML handle for device {device_idx}'
raise RuntimeError(msg) from ex
def get_name(self):
return pynvml.nvmlDeviceGetName(self.handle)
def get_uuid(self):
return pynvml.nvmlDeviceGetUUID(self.handle)
def get_cpu_affinity(self, scope):
if scope == 'socket':
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_SOCKET
elif scope == 'node':
nvml_scope = pynvml.NVML_AFFINITY_SCOPE_NODE
else:
raise RuntimeError('Unknown scope')
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinityWithinScope(self.handle, Device._nvml_affinity_elements, nvml_scope):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def get_thread_siblings_list():
"""
Returns a list of 2-element integer tuples representing pairs of
hyperthreading cores.
"""
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(sorted(map(int, res[0])))
thread_siblings_list.append(pair)
thread_siblings_list = list(set(thread_siblings_list))
return thread_siblings_list
def build_thread_siblings_dict(siblings_list):
siblings_dict = {}
for siblings_tuple in siblings_list:
for core in siblings_tuple:
siblings_dict[core] = siblings_tuple
return siblings_dict
def group_list_by_key(the_list, key):
sorted_list = sorted(the_list, key=key)
grouped = [tuple(group) for key, group in itertools.groupby(sorted_list, key=key)]
return grouped
def group_by_siblings(affinities):
siblings_list = get_thread_siblings_list()
siblings_dict = build_thread_siblings_dict(siblings_list)
siblings_key = lambda x: siblings_dict.get(x, (x,))
affinities = [tuple(group_list_by_key(affinity, key=siblings_key)) for affinity in affinities]
return affinities
def group_by_node(socket_affinities, node_affinities):
socket_node_assigned_cores = collections.defaultdict(list)
for socket, node_cores in zip(socket_affinities, node_affinities):
socket_node_assigned_cores[socket].extend(node_cores)
socket_node_assigned_cores = {key: tuple(sorted(set(value))) for key, value in socket_node_assigned_cores.items()}
node_grouping = collections.defaultdict(list)
for socket_cores, assigned_cores in socket_node_assigned_cores.items():
unassigned_cores = sorted(list(set(socket_cores) - set(assigned_cores)))
for assigned_core in assigned_cores:
node_grouping[assigned_core].append(assigned_core)
for assigned, unassigned in zip(itertools.cycle(assigned_cores), unassigned_cores):
node_grouping[assigned].append(unassigned)
node_grouping = {key: tuple(value) for key, value in node_grouping.items()}
grouped_affinities = [tuple(node_grouping[item] for item in node_affinity) for node_affinity in node_affinities]
return grouped_affinities
def ungroup_by_nodes(affinities, scope):
if scope == 'socket':
affinities = [list(itertools.chain(*zip(*affinity))) for affinity in affinities]
elif scope == 'node':
affinities = [[group[0] for group in affinity] for affinity in affinities]
return affinities
def ungroup_by_siblings(affinities, cores):
if cores == 'all_logical':
affinities = [list(itertools.chain(*affinity)) for affinity in affinities]
elif cores == 'single_logical':
affinities = [[group[0] for group in affinity] for affinity in affinities]
else:
raise RuntimeError('Unknown cores mode')
return affinities
def check_core_count(affinities, min_cores=1, max_cores=None):
for gpu_id, affinity in enumerate(affinities):
if len(affinity) < min_cores:
raise RuntimeError(
f'Number of available physical cores for GPU {gpu_id} is less '
f'the predefinied minimum, min_cores={min_cores}, available '
f'physical cores: {affinity} (count={len(affinity)})'
)
if max_cores is not None:
affinities = [affinity[:max_cores] for affinity in affinities]
return affinities
def ungroup_all_and_check_count(affinities, scope, cores, min_cores=1, max_cores=None):
affinities = ungroup_by_nodes(affinities, scope)
affinities = check_core_count(affinities, min_cores, max_cores)
affinities = ungroup_by_siblings(affinities, cores)
return affinities
def check_affinities(affinities):
# sets of cores should be either identical or disjoint
for i, j in itertools.product(affinities, affinities):
if not set(i) == set(j) and not set(i).isdisjoint(set(j)):
raise RuntimeError(f'Sets of cores should be either identical or disjoint, ' f'but got {i} and {j}.')
def get_affinities(nproc_per_node, scope, exclude_unavailable_cores=True):
devices = [Device(i) for i in range(nproc_per_node)]
affinities = [dev.get_cpu_affinity(scope) for dev in devices]
if exclude_unavailable_cores:
available_cores = os.sched_getaffinity(0)
affinities = [sorted(list(set(affinity) & available_cores)) for affinity in affinities]
check_affinities(affinities)
return affinities
def get_grouped_affinities(nproc_per_node, exclude_unavailable_cores=True):
socket_affinities = get_affinities(nproc_per_node, 'socket', exclude_unavailable_cores)
node_affinities = get_affinities(nproc_per_node, 'node', exclude_unavailable_cores)
sibling_socket_affinities = group_by_siblings(socket_affinities)
sibling_node_affinities = group_by_siblings(node_affinities)
grouped_affinities = group_by_node(sibling_socket_affinities, sibling_node_affinities)
return grouped_affinities
def get_all(nproc_per_node, scope, cores, min_cores, max_cores):
"""
The process is assigned with all available physical CPU cores recommended by
pynvml for the GPU with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
affinities = get_affinities(nproc_per_node, scope)
affinities = group_by_siblings(affinities)
node_affinities = group_by_siblings(get_affinities(nproc_per_node, 'node'))
all_node_affinities = functools.reduce(operator.add, node_affinities)
affinities = [
tuple(sorted(affinity, key=lambda x: (0 if x in all_node_affinities else 1, x,),)) for affinity in affinities
]
affinities = check_core_count(affinities, min_cores, max_cores)
affinities = ungroup_by_siblings(affinities, cores)
return affinities
def get_single(nproc_per_node, scope, cores, min_cores=1, max_cores=1):
"""
The process is assigned with the first available physical CPU core from the
list of all physical CPU cores recommended by pynvml for the GPU with a
given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
ungrouped_affinities = ungroup_all_and_check_count(grouped_affinities, scope, cores, min_cores, max_cores)
return ungrouped_affinities
def get_single_unique(nproc_per_node, scope, cores, min_cores=1, max_cores=1):
"""
The process is assigned with a single unique available physical CPU core
from the list of all physical CPU cores recommended by pynvml for the GPU
with a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
affinities = []
assigned_groups = set()
for grouped_affinity in grouped_affinities:
for group in grouped_affinity:
if group not in assigned_groups:
affinities.append([group])
assigned_groups.add(group)
break
ungrouped_affinities = ungroup_all_and_check_count(affinities, scope, cores, min_cores, max_cores)
return ungrouped_affinities
def get_unique(
nproc_per_node, scope, cores, mode, min_cores, max_cores, balanced=True,
):
"""
The process is assigned with a unique subset of available physical CPU
cores from the list of all CPU cores recommended by pynvml for the GPU with
a given id.
Assignment automatically includes available hyperthreading siblings if
cores='all_logical'.
Args:
nproc_per_node: number of processes per node
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
mode: 'unique_contiguous' or 'unique_interleaved'
balanced: assign an equal number of physical cores to each process,
"""
grouped_affinities = get_grouped_affinities(nproc_per_node)
grouped_affinities_to_device_ids = collections.defaultdict(list)
for idx, grouped_affinity in enumerate(grouped_affinities):
grouped_affinities_to_device_ids[tuple(grouped_affinity)].append(idx)
# compute minimal number of physical cores per GPU across all GPUs and
# sockets, code assigns this number of cores per GPU if balanced == True
min_physical_cores_per_gpu = min(
[len(cores) // len(gpus) for cores, gpus in grouped_affinities_to_device_ids.items()]
)
grouped_unique_affinities = [None] * nproc_per_node
for (grouped_affinity, device_ids,) in grouped_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
if balanced:
cores_per_device = min_physical_cores_per_gpu
grouped_affinity = grouped_affinity[: devices_per_group * min_physical_cores_per_gpu]
else:
cores_per_device = len(grouped_affinity) // devices_per_group
for subgroup_id, device_id in enumerate(device_ids):
# In theory there should be no difference in performance between
# 'interleaved' and 'contiguous' pattern on Intel-based DGX-1,
# but 'contiguous' should be better for DGX A100 because on AMD
# Rome 4 consecutive cores are sharing L3 cache.
# TODO: code doesn't attempt to automatically detect layout of
# L3 cache, also external environment may already exclude some
# cores, this code makes no attempt to detect it and to align
# mapping to multiples of 4.
if mode == 'unique_interleaved':
unique_grouped_affinity = list(grouped_affinity[subgroup_id::devices_per_group])
elif mode == 'unique_contiguous':
unique_grouped_affinity = list(
grouped_affinity[subgroup_id * cores_per_device : (subgroup_id + 1) * cores_per_device]
)
else:
raise RuntimeError('Unknown set_unique mode')
grouped_unique_affinities[device_id] = unique_grouped_affinity
ungrouped_affinities = ungroup_all_and_check_count(grouped_unique_affinities, scope, cores, min_cores, max_cores)
return ungrouped_affinities
def set_affinity(
gpu_id,
nproc_per_node,
*,
mode='unique_contiguous',
scope='node',
cores='all_logical',
balanced=True,
min_cores=1,
max_cores=None,
):
"""
The process is assigned with a proper CPU affinity that matches CPU-GPU
hardware architecture on a given platform. Usually, setting proper affinity
improves and stabilizes the performance of deep learning training workloads.
This function assumes that the workload runs in multi-process single-device
mode (there are multiple training processes, and each process is running on
a single GPU). This is typical for multi-GPU data-parallel training
workloads (e.g., using `torch.nn.parallel.DistributedDataParallel`).
Available affinity modes:
* 'all' - the process is assigned with all available physical CPU cores
recommended by pynvml for the GPU with a given id.
* 'single' - the process is assigned with the first available
physical CPU core from the list of all physical CPU cores recommended by
pynvml for the GPU with a given id (multiple GPUs could be assigned with
the same CPU core).
* 'single_unique' - the process is assigned with a single unique
available physical CPU core from the list of all CPU cores recommended by
pynvml for the GPU with a given id.
* 'unique_interleaved' - the process is assigned with a unique subset of
available physical CPU cores from the list of all physical CPU cores
recommended by pynvml for the GPU with a given id, cores are assigned with
interleaved indexing pattern
* 'unique_contiguous' - (the default mode) the process is assigned with a
unique subset of available physical CPU cores from the list of all physical
CPU cores recommended by pynvml for the GPU with a given id, cores are
assigned with contiguous indexing pattern
Available "scope" modes:
* 'node' - sets the scope for pynvml affinity queries to NUMA node
* 'socket' - sets the scope for pynvml affinity queries to processor socket
Available "cores" modes:
* 'all_logical' - assigns the process with all logical cores associated with
a given corresponding physical core (i.e., automatically includes all
available hyperthreading siblings)
* 'single_logical' - assigns the process with only one logical core
associated with a given corresponding physical core (i.e., excludes
hyperthreading siblings)
'unique_contiguous' is the recommended mode for deep learning
training workloads on NVIDIA DGX machines.
Args:
gpu_id: integer index of a GPU, value from 0 to 'nproc_per_node' - 1
nproc_per_node: number of processes per node
mode: affinity mode
scope: scope for retrieving affinity from pynvml, 'node' or 'socket'
cores: 'all_logical' or 'single_logical'
balanced: assign an equal number of physical cores to each process,
affects only 'unique_interleaved' and
'unique_contiguous' affinity modes
min_cores: (default=1) the intended minimum number of physical cores per
process, code raises RuntimeError if the number of available cores
is less than 'min_cores'
max_cores: (default=None) the intended maxmimum number of physical cores
per process, the list of assigned cores is trimmed to the first
'max_cores' cores if max_cores is not None
Returns a set of logical CPU cores on which the process is eligible to run.
WARNING: On DGX A100, only half of the CPU cores have direct access to GPUs.
set_affinity with scope='node' restricts execution only to the CPU cores
directly connected to GPUs. On DGX A100, it will limit the code to half of
the CPU cores and half of CPU memory bandwidth (which may be fine for many
DL models). Use scope='socket' to use all available DGX A100 CPU cores.
WARNING: Intel's OpenMP implementation resets affinity on the first call to
an OpenMP function after a fork. It's recommended to run with env variable:
`KMP_AFFINITY=disabled` if the affinity set by gpu_affinity should be
preserved after a fork (e.g. in PyTorch DataLoader workers).
Example:
import argparse
import os
import gpu_affinity
import torch
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--local_rank',
type=int,
default=os.getenv('LOCAL_RANK', 0),
)
args = parser.parse_args()
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(args.local_rank, nproc_per_node)
print(f'{args.local_rank}: core affinity: {affinity}')
if __name__ == "__main__":
main()
Launch the example with:
python -m torch.distributed.launch --nproc_per_node <#GPUs> example.py
"""
if gpu_id >= nproc_per_node:
msg = f'gpu_id={gpu_id} should be smaller than ' f'nproc_per_node={nproc_per_node}'
raise RuntimeError(msg)
pynvml.nvmlInit()
if mode == 'all':
affinity = get_all(nproc_per_node, scope, cores, min_cores, max_cores)
elif mode == 'single':
affinity = get_single(nproc_per_node, scope, cores)
elif mode == 'single_unique':
affinity = get_single_unique(nproc_per_node, scope, cores)
elif mode == 'unique_interleaved' or mode == 'unique_contiguous':
affinity = get_unique(nproc_per_node, scope, cores, mode, min_cores, max_cores, balanced,)
else:
raise RuntimeError('Unknown affinity mode')
os.sched_setaffinity(0, affinity[gpu_id])
set_affinity = os.sched_getaffinity(0)
return set_affinity
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/gpu_affinity.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import hydra
def numa_mapping(local_rank, devices, numa_cfg):
"""Sets the GPU affinity for the NUMA mapping for the current GPU passed as local_rank.
It sets the NUMA mapping following the parameters in numa_cfg.
Arguments:
local_rank: int, local_rank as it will be passed to PyTorch.
devices: int, number of GPUs per node, or nproc_per_node.
numa_cfg: OmegaConf, config to set the numa mapping parameters.
"""
enable = numa_cfg.get("enable")
mode = numa_cfg.get("mode")
scope = numa_cfg.get("scope")
cores = numa_cfg.get("cores")
balanced = numa_cfg.get("balanced")
min_cores = numa_cfg.get("min_cores")
max_cores = numa_cfg.get("max_cores")
if enable:
from nemo_launcher.collections.gpu_affinity import set_affinity
affinity = set_affinity(
gpu_id=int(local_rank),
nproc_per_node=devices,
mode=mode,
scope=scope,
cores=cores,
balanced=balanced,
min_cores=min_cores,
max_cores=max_cores,
)
print(f"Setting NUMA mapping (GPU Affinity) for rank {local_rank}: {affinity}")
else:
print("No NUMA mapping was enabled, performance might be affected.")
@hydra.main(config_path="conf", config_name="numa_mapping")
def main(cfg):
rank = int(os.environ.get("LOCAL_RANK"))
devices = int(os.environ.get("SLURM_NTASKS_PER_NODE")) # TODO: Check BCP, interactive
numa_mapping(
local_rank=rank, devices=devices, numa_cfg=cfg,
)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/numa_mapping.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example usage:
python3 auto_blend.py \
model_type=<mt5/t5/gpt3> \
preprocessed_dir=<path/to/preprocessed_dir> \
blending_alpha=<blending_alpha>
"""
import math
import os
from collections import defaultdict
import hydra
@hydra.main(config_path="conf", config_name="auto_blend")
def generate_data_blend(cfg):
"""
Generate data blend as NeMo input `model.data.data_prefix` for binary dataset files
within the dataset folder based on the file sizes.
"""
model_type = cfg.get("model_type")
data_dir = cfg.get("preprocessed_dir")
alpha = cfg.get("blending_alpha")
data_files = os.listdir(data_dir)
split_size = defaultdict(int)
file_size = defaultdict(list)
for f in data_files:
if f.endswith(".bin"):
f_path = os.path.join(data_dir, f)
f_size = os.path.getsize(f_path)
if model_type == "mt5":
elements = f.split("_")
split = elements[0]
else:
split = f_path.strip(".bin")
split_size[split] += f_size
file_size[split].append((f_path.strip(".bin"), f_size))
split_ratio = {split: math.pow(split_size[split], alpha) for split in split_size}
total = sum(split_ratio.values())
split_ratio = {split: split_ratio[split] / total for split in split_ratio}
res = []
for split in file_size:
for prefix, size in file_size[split]:
res.extend([round(size / split_size[split] * split_ratio[split], 6), prefix])
print(str(res).replace(" ", ""))
if __name__ == "__main__":
generate_data_blend()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/auto_blend.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import hydra
from nemo.utils.get_rank import is_global_rank_zero
from omegaconf import OmegaConf
@hydra.main(config_path="conf", config_name="hparams_override")
def hparams_override(cfg):
"""
This script verrides hyper-parameters inside NeMo's `hparams.yaml` and will generate
a new yaml file called `hparams_override.yaml`. The new yaml file will be
fed into NeMo conversion scripts to convert training checkpoints to a .nemo
checkpoint.
"""
hparams_file = cfg.get("hparams_file")
if hparams_file is not None:
output_path = cfg.get("output_path")
hparams_override_file = os.path.join(output_path, "hparams_override.yaml")
vocab_file = cfg.get("vocab_file")
merge_file = cfg.get("merge_file")
tokenizer_model = cfg.get("tokenizer_model")
conf = OmegaConf.load(hparams_file)
if vocab_file is not None:
conf.cfg.tokenizer.vocab_file = vocab_file
if merge_file is not None:
conf.cfg.tokenizer.merge_file = merge_file
if tokenizer_model is not None:
conf.cfg.tokenizer.model = tokenizer_model
if "activations_checkpoint_granularity" in conf.cfg:
conf.cfg.activations_checkpoint_granularity = None
if "activations_checkpoint_method" in conf.cfg:
conf.cfg.activations_checkpoint_method = None
# if "sequence_parallel" in conf.cfg:
# conf.cfg.sequence_parallel = False
if conf.cfg.optim.name == "distributed_fused_adam":
conf.cfg.optim.name = "fused_adam"
if is_global_rank_zero():
with open(hparams_override_file, "w") as f:
OmegaConf.save(config=conf, f=f)
wait_time = 0
while not os.path.exists(hparams_override_file):
time.sleep(1)
wait_time += 1
if wait_time > 60:
raise TimeoutError('Timeout waiting for config file to be created.')
if __name__ == "__main__":
hparams_override()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/hparams_override.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import hydra
def _inject_model_parallel_rank(filepath, tensor_model_parallel_size=1, pipeline_model_parallel_size=1):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
tensor_model_parallel_rank = pipeline_model_parallel_rank = 0
if tensor_model_parallel_size > 1 or pipeline_model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
dirname = os.path.dirname(filepath)
basename = os.path.basename(filepath)
if pipeline_model_parallel_size is None or pipeline_model_parallel_size == 1:
filepath = f"{dirname}/mp_rank_{tensor_model_parallel_rank:02d}/{basename}"
else:
filepath = f"{dirname}/tp_rank_{tensor_model_parallel_rank:02d}_pp_rank_{pipeline_model_parallel_rank:03d}/{basename}"
return filepath
else:
return filepath
@hydra.main(config_path="conf", config_name="checkpoint_search")
def checkpoint_search(cfg):
"""
Search in the checkpoint folder for the latest checkpoint or a regex name.
The checkpoint path are injected based on model parallelism.
"""
# Checkpoint search
checkpoint_folder = cfg.checkpoint_folder
checkpoint_name = cfg.checkpoint_name
tensor_model_parallel_size = cfg.tensor_model_parallel_size
pipeline_model_parallel_size = cfg.pipeline_model_parallel_size
if checkpoint_name == "latest":
checkpoints = os.path.join(checkpoint_folder, "*.ckpt")
checkpoints = _inject_model_parallel_rank(
checkpoints, tensor_model_parallel_size, pipeline_model_parallel_size
)
checkpoint_list = glob.glob(checkpoints)
latest_checkpoint = max(checkpoint_list, key=os.path.getctime)
checkpoint_name = os.path.basename(latest_checkpoint)
checkpoint = os.path.join(checkpoint_folder, checkpoint_name)
checkpoint = _inject_model_parallel_rank(checkpoint, tensor_model_parallel_size, pipeline_model_parallel_size)
checkpoint_list = glob.glob(checkpoint)
if len(checkpoint_list) > 1:
raise ValueError("Too many checkpoints fit the checkpoint name pattern in conversion config.")
if len(checkpoint_list) == 0:
raise ValueError("No checkpoint found with the checkpoint name pattern in conversion config.")
checkpoint_name = os.path.basename(checkpoint_list[0])
print(checkpoint_name)
if __name__ == "__main__":
checkpoint_search()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/checkpoint_search.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to `pause_and_prime_dns_connections` in BCP platform.
"""
import os
import socket
import time
def pause_and_prime_dns_connections() -> None:
if int(os.environ.get("GROUP_RANK")) > 0:
time.sleep(20)
prime_dns_connections()
elif int(os.environ.get("LOCAL_RANK")) != 0:
time.sleep(10)
def prime_dns_connections() -> None:
me = "worker" + os.environ.get("GROUP_RANK") + ":" + os.environ.get("RANK")
master_addr = os.environ.get("MASTER_ADDR")
master_port = int(os.environ.get("MASTER_PORT"))
print(f"SPDNS: {me} Connecting to {master_addr}:{master_port}")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (master_addr, master_port)
timeout = time.time() + 300
connected = False
while not connected:
try:
sock.connect(server_address)
connected = True
except Exception:
time.sleep(2)
if time.time() > timeout:
print(f"{me} couldnt connect to {master_addr}:{master_port} timed out! (300s)")
sys.exit(110)
print(f"SPDNS: {me} connected to {master_addr}:{master_port}")
sock.close()
if __name__ == "__main__":
pause_and_prime_dns_connections()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/pause_and_prime_dns_connections.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynvml
import sys
import hydra
global cuda_capability
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
cuda_capability, _ = pynvml.nvmlDeviceGetCudaComputeCapability(handle)
pynvml.nvmlShutdown()
@hydra.main(version_base=None, config_path="conf", config_name="get_ln_sm_margin")
def get_ln_sm_margin(cfg):
"""
Set SM margin to LayerNorm layer at H100. This is to overlap LN kernel with communication kernels.
"""
global cuda_capability
if cuda_capability == 9:
print(4)
else:
print(0)
@hydra.main(version_base=None, config_path="conf", config_name="get_ag_overlap")
def get_ag_overlap(cfg):
"""
Disable AG overlap with P2P ring-exchange at H100 BF16 training.
FIXME: Fix the bug and remove this conditional setting.
"""
global cuda_capability
fp8 = cfg.get("fp8")
if cuda_capability == 9:
if fp8:
print(1)
else:
print(0)
else:
print(1)
if __name__ == "__main__":
elif sys.argv[1] == "name=get_ln_sm_margin":
get_ln_sm_margin()
elif sys.argv[1] == "name=get_ag_overlap":
get_ag_overlap()
else:
raise ValueError("The provided conditional config function does not exist.")
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/conditional_cfgs.py |
NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/export_scripts/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# !/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import configparser
import logging
import pathlib
import sys
import google.protobuf.json_format
import google.protobuf.text_format
import tritonclient.grpc
LOGGER = logging.getLogger(__name__)
def _get_model_parameters(config_ini):
"""
Read model parameters from FasterTransformer INI format.
Input:
config_ini: configparser.ConfigParser configuratio objects
Returns:
list of model parameters
"""
excluded_section_names = ["ft_instance_hyperparameter", "structure"]
sections_names_with_model_parameters = [s for s in config_ini.sections() if s not in excluded_section_names]
if not sections_names_with_model_parameters:
LOGGER.error(
"Could not find section with model parameters in model config.ini while it is required to fill templates"
)
sys.exit(-1)
def _get_model_name(section_name_):
model_name = config_ini.get(section_name_, "model_name", fallback=None)
if model_name is None:
model_name = config_ini.get(section_name_, "_name_or_path", fallback="unknown")
return model_name
params_from_model_config = {
section_name: {"model_type": config_ini.get(section_name, "model_type", fallback="GPT"),}
for section_name in sections_names_with_model_parameters
}
# ensure that for all models it is obtained same parameters
parameters_from_all_sections = list(set(map(lambda x: tuple(x.items()), params_from_model_config.values())))[0]
if len(parameters_from_all_sections) != len(list(params_from_model_config.values())[0]):
LOGGER.error(
"Found no consistency between model parameters: %s (%d != %d)",
params_from_model_config,
len(parameters_from_all_sections),
len(list(params_from_model_config.values())[0]),
)
sys.exit(-1)
params_from_model_config = list(params_from_model_config.values())[0]
return params_from_model_config
def _update_template(
config, model_name, default_model_filename, max_batch_size, parameters, just_update_parameters: bool = True
):
"""
Update config.pbtxt decoded from file.
Input:
config: Triton config
model_name: model name
default_model_filename: model file name
max_batch_size: maximum batch size for Triton configuration
just_update_parameters: replace or overwrite selector
Returns:
updated config
"""
config["name"] = model_name
config["default_model_filename"] = default_model_filename
config["max_batch_size"] = max_batch_size
parameters = {k: {"string_value": str(v)} for k, v in parameters.items()}
replace = not just_update_parameters
if replace:
config["parameters"] = parameters
else:
# overwrite
config["parameters"] = {**config["parameters"], **parameters}
return config
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
parser = argparse.ArgumentParser(description="Generate Triton model config file")
parser.add_argument("--model-train-name", help="Name of trained model", required=True)
parser.add_argument("--template-path", help="Path to template of Triton model config file", required=True)
parser.add_argument("--config-path", help="Path to output Triton model config file", required=True)
parser.add_argument("--ft-checkpoint", help="Path to FasterTransformer checkpoint", required=True)
parser.add_argument("--max-batch-size", type=int, help="Max batch size of Triton batcher", required=True)
parser.add_argument("--pipeline-model-parallel-size", type=int, help="Pipeline model parallel size", required=True)
parser.add_argument("--tensor-model-parallel-size", type=int, help="Tensor model parallel size", required=True)
parser.add_argument(
"--data-type", choices=["fp32", "fp16", "bf16"], help="Data type of weights in runtime", required=True
)
parser.add_argument(
"--int8-mode", action="store_true", help="Enable int8 mode in FasterTransformer Triton backend"
)
parser.add_argument(
"--enable-custom-all-reduce",
action="store_true",
help="Enable custom all reduce ops in FasterTransformer Triton backend",
)
args = parser.parse_args()
ft_checkpoint_path = pathlib.Path(args.ft_checkpoint)
config_ini_path = ft_checkpoint_path / "config.ini"
config_ini = configparser.ConfigParser()
with config_ini_path.open("r") as config_file:
config_ini.read_file(config_file)
# parse template
template_path = pathlib.Path(args.template_path)
template_payload = template_path.read_text()
model_config_proto = google.protobuf.text_format.Parse(
template_payload, tritonclient.grpc.model_config_pb2.ModelConfig()
)
triton_model_config_template = google.protobuf.json_format.MessageToDict(
model_config_proto, preserving_proto_field_name=True
)
# update template
params_from_model_config = _get_model_parameters(config_ini)
parameters = {
**{
"data_type": args.data_type.lower(),
"pipeline_para_size": args.pipeline_model_parallel_size,
"tensor_para_size": args.tensor_model_parallel_size,
"model_checkpoint_path": ft_checkpoint_path.as_posix(),
"int8_mode": int(args.int8_mode),
"enable_custom_all_reduce": int(args.enable_custom_all_reduce),
},
**params_from_model_config,
}
model_name = args.model_train_name
updated_triton_model_config = _update_template(
triton_model_config_template, model_name, ft_checkpoint_path.name, args.max_batch_size, parameters
)
# store template
updated_triton_model_config = google.protobuf.json_format.ParseDict(
updated_triton_model_config, tritonclient.grpc.model_config_pb2.ModelConfig()
)
updated_triton_model_config_payload = google.protobuf.text_format.MessageToBytes(updated_triton_model_config)
config_path = pathlib.Path(args.config_path)
config_path.parent.mkdir(parents=True, exist_ok=True)
with config_path.open("wb") as config_file:
config_file.write(updated_triton_model_config_payload)
LOGGER.info("Config file successfully generated and written to: %s", config_path)
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/export_scripts/prepare_triton_model_config.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from lm_eval import tasks
try:
from nemo.utils.get_rank import is_global_rank_zero
except ModuleNotFoundError:
print("Importing NeMo module failed, checkout the NeMo submodule")
def parse_args(parser_main):
# parser = argparse.ArgumentParser()
parser = parser_main.add_argument_group(title="download-tasks")
parser.add_argument("--tasks", default="all_tasks")
parser.add_argument("--cache_dir", default="")
# return parser.parse_args()
return parser_main
def main():
parser = argparse.ArgumentParser()
args, unknown_args = parse_args(parser).parse_known_args()
if args.tasks == "all_tasks":
task_names = tasks.ALL_TASKS
else:
task_names = args.tasks.split(",")
if is_global_rank_zero():
print("***** Downloading tasks data...")
tasks.get_task_dict(task_names, args.cache_dir)
print("***** Tasks data downloaded.")
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/download.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import os
import random
import string
import subprocess
import sys
import time
from datetime import datetime
from typing import Union
from lm_eval import base, evaluator, models, tasks, utils
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
from omegaconf import OmegaConf
def parse_args(parser_main):
# parser = argparse.ArgumentParser()
parser = parser_main.add_argument_group(title="evaluate-tasks")
# Experiment
parser.add_argument(
"--name",
dest="experiment_name",
type=str,
default="",
help="A string identifier/name for the experiment to be run "
"- it will be appended to the output directory name, before the timestamp",
)
parser.add_argument(
"--comment",
type=str,
default="",
help="An optional comment/description of the experiment. "
"Will be included in the configuration JSON file and "
"potentially other output files.",
)
parser.add_argument(
"--no_timestamp",
action="store_true",
help="If set, a timestamp and random suffix will NOT be appended to the output directory name "
"(unless no `name` is specified)",
)
parser.add_argument("--tasks", default="all_tasks")
parser.add_argument("--cache_dir", default="")
parser.add_argument(
"--eval_seed", type=int, default=1234, help="Random seed used for python, numpy, [pytorch, and cuda.]",
)
parser.add_argument(
"--limit", type=int, default=None, help="If specified, will limit evaluation set to this number of samples",
)
# I/O
parser.add_argument(
"--output_path",
type=str,
default=".",
help="Path to output root directory. Must exist. An experiment directory containing metrics, "
"predictions, configuration etc will be created inside.",
)
parser.add_argument(
"--serialize_predictions",
action="store_true",
help="If set, the model's predictions (and other information) will be serialized to disk.",
)
# H/W configuration
parser.add_argument("--batch_size", type=int, default=1)
# Warning: cuda device is the only way it could work.
parser.add_argument("--device", type=str, default="cuda")
# Model
parser.add_argument("--model", required=True)
parser.add_argument(
"--nemo_model", type=str, default=None, required=False, help="Pass path to model's .nemo file",
)
parser.add_argument(
"--checkpoint_folder",
type=str,
default=None,
required=False,
help="If not using a .nemo file. Path to PTL checkpoints saved during training. Ex: "
"/raid/nemo_experiments/megatron_gpt/checkpoints",
)
parser.add_argument(
"--checkpoint_name",
type=str,
default=None,
required=False,
help="If not using a .nemo file. Name of checkpoint to be used. Ex: "
"megatron_gpt--val_loss=6.34-step=649-last.ckpt",
)
parser.add_argument(
"--tensor_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--pipeline_model_parallel_size", type=int, default=1, required=False,
)
parser.add_argument(
"--hparams_file",
type=str,
default=None,
required=False,
help="If not using a .nemo file. Path to config for restoring. It's created during training and may need to be modified during restore if restore environment is different than training. Ex: /raid/nemo_experiments/megatron_gpt/hparams.yaml",
)
parser.add_argument("--precision", default=16, help="PyTorch Lightning Trainer precision flag")
parser.add_argument("--vocab_file", default=None)
parser.add_argument("--merge_file", default=None)
parser.add_argument(
"--prompt_dataset_paths",
default=None,
help="Jsonl-format prompt dataset for evaluation. Multiple dataset can be split with ','",
)
parser.add_argument(
"--disable_special_tokens",
action="store_true",
help="Whether to disable virtual tokens in prompt model evaluation. This is equivalent to evaluate without prompt-/p-tuning.",
)
# Prompt
parser.add_argument("--provide_description", action="store_true")
parser.add_argument("--num_fewshot", type=int, default=0)
parser.add_argument(
"--filter_shots",
action="store_true",
help="Filter examples used as shots in the prompt, "
"e.g. exclude examples of the same type as the sample under evaluation.",
)
# HANS
parser.add_argument("--ratio_positive", type=float, default=None, help="Ratio of examples with a positive label")
parser.add_argument(
"--mix_mode",
type=str,
default="shuffle",
choices=["shuffle", "interleave_first", "interleave_last", "pos_first", "neg_first"],
help="How to mix (arrange order of) positive and negative shot examples in the prompt",
)
parser.add_argument(
"--interleave_width",
type=int,
default=1,
help="The number of consecutive examples with the same label, when `mix_mode` is interleave",
)
# Generation tasks
parser.add_argument("--generate-max-token", type=int, default=0, help="Max tokens to generate.")
# return parser.parse_args()
return parser_main
def can_write_output(lm: Union[base.CachingLM, base.LM], args: argparse.Namespace) -> bool:
"""Only 1 process should print and dump results, this function would only return True
for 1 of the processes that has full output
"""
if isinstance(lm, base.CachingLM):
return True
elif lm.can_access_output():
return True
else:
return False
def setup_output_dir(args, local_args=None, unknown_args=None):
"""Prepare experiment output directories and save configuration.
Will UPDATE args
Input:
args: arguments object from argparse. Contains all arguments
local_args: arguments object from argparse, containing only arguments recognized by the parser of this script.
If specified, will also write these local arguments to a separate JSON file.
unknown_args: arguments object from argparse, containing only arguments NOT recognized by the parser of this script.
If specified, will also write these arguments to the local arguments JSON file.
Returns:
config: configuration dictionary
"""
# Create output directory
initial_timestamp = datetime.now()
output_path = args.output_path
if not os.path.isdir(output_path):
raise IOError(
"Root directory '{}', where the directory of the experiment will be created, must exist".format(
output_path
)
)
output_path = os.path.join(output_path, args.experiment_name)
formatted_timestamp = initial_timestamp.strftime("%Y-%m-%d_%H-%M-%S")
args.initial_timestamp = formatted_timestamp
if (not args.no_timestamp) or (len(args.experiment_name) == 0):
random.seed(args.eval_seed)
rand_suffix = "".join(random.choices(string.ascii_letters + string.digits, k=3))
output_path += "_" + formatted_timestamp + "_" + rand_suffix
# random.seed(args.eval_seed)
args.output_path = output_path
args.pred_dir = os.path.join(output_path, "predictions")
utils.create_dirs([args.pred_dir])
# Add file logging besides stdout
# file_handler = logging.FileHandler(os.path.join(args.output_path, 'output.log'))
# logging.addHandler(file_handler)
logging.info("Command:\n{}".format(" ".join(sys.argv))) # command used to run program
# Save configuration as a (pretty) json file
config = args.__dict__
# # TODO: Raises an error, because some megatron arguments are non-serializable
# with open(os.path.join(output_path, 'full_configuration.json'), 'w') as fp:
# json.dump(config, fp, indent=4)
with open(os.path.join(output_path, "command.txt"), "w") as fp:
fp.write(" ".join(sys.argv)) # command used to run program
fp.write("\nUnknown args: {}".format(unknown_args))
try:
git_hash = subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], cwd=os.path.dirname(os.path.abspath(__file__))
).decode()
git_diff = subprocess.check_output(["git", "diff"], cwd=os.path.dirname(os.path.abspath(__file__))).decode()
with open(os.path.join(output_path, "git.txt"), "w") as fp:
fp.write("Git hash: {}\n".format(git_hash))
fp.write(git_diff)
logging.info("Git hash: {}".format(git_hash))
except Exception as x:
logging.error("git version not found")
# raise x
if local_args:
local_config = local_args.__dict__ # local configuration dictionary
# update local configuration with the actual values used in the global configuration
for opt in local_config:
if opt in config:
local_config[opt] = config[opt]
with open(os.path.join(output_path, "eval_configuration.json"), "w") as fp:
json.dump(local_config, fp, indent=4)
logging.info("Stored configuration file(s) in '{}'".format(output_path))
return args
def _inject_model_parallel_rank(filepath, tensor_model_parallel_size=1, pipeline_model_parallel_size=1):
"""
Injects tensor/pipeline model parallel ranks into the filepath.
Does nothing if not using model parallelism.
"""
tensor_model_parallel_rank = pipeline_model_parallel_rank = 0
if tensor_model_parallel_size > 1 or pipeline_model_parallel_size > 1:
# filepath needs to be updated to include mp_rank
dirname = os.path.dirname(filepath)
basename = os.path.basename(filepath)
if pipeline_model_parallel_size is None or pipeline_model_parallel_size == 1:
filepath = f"{dirname}/mp_rank_{tensor_model_parallel_rank:02d}/{basename}"
else:
filepath = f"{dirname}/tp_rank_{tensor_model_parallel_rank:02d}_pp_rank_{pipeline_model_parallel_rank:03d}/{basename}"
return filepath
else:
return filepath
def main():
total_start_time = time.time()
parser = argparse.ArgumentParser()
eval_args, unknown_args = parse_args(parser).parse_known_args()
args = eval_args
assert args is not None
if "nemo-gpt3" in args.model:
assert args.device == "cuda", "devices == 'cuda' are required to run nemo evaluations."
checkpoint_folder = args.checkpoint_folder
checkpoint_name = args.checkpoint_name
hparams_file = args.hparams_file
tensor_model_parallel_size = args.tensor_model_parallel_size
pipeline_model_parallel_size = args.pipeline_model_parallel_size
vocab_file = args.vocab_file
merge_file = args.merge_file
hparams_override_file = None
if args.nemo_model is None: # Not loading from .nemo checkpoint
# Checkpoint search
if checkpoint_name == "latest":
checkpoints = os.path.join(checkpoint_folder, "*.ckpt")
checkpoints = _inject_model_parallel_rank(
checkpoints, tensor_model_parallel_size, pipeline_model_parallel_size
)
checkpoint_list = glob.glob(checkpoints)
latest_checkpoint = max(checkpoint_list, key=os.path.getctime)
checkpoint_name = os.path.basename(latest_checkpoint)
checkpoint = os.path.join(checkpoint_folder, checkpoint_name)
checkpoint = _inject_model_parallel_rank(checkpoint, tensor_model_parallel_size, pipeline_model_parallel_size)
checkpoint_list = glob.glob(checkpoint)
if len(checkpoint_list) > 1:
raise ValueError("Too many checkpoints fit the checkpoint name pattern in conversion config.")
if len(checkpoint_list) == 0:
raise ValueError("No checkpoint found with the checkpoint name pattern in conversion config.")
args.checkpoint_name = os.path.basename(checkpoint_list[0])
# Create hparam override file for vocab ,merge, and etc.
if hparams_file is not None:
hparams_override_file = os.path.join(args.output_path, "hparams_override.yaml")
conf = OmegaConf.load(hparams_file)
if vocab_file is not None:
conf.cfg.tokenizer.vocab_file = vocab_file
if merge_file is not None:
conf.cfg.tokenizer.merge_file = merge_file
if "activations_checkpoint_granularity" in conf.cfg:
conf.cfg.activations_checkpoint_granularity = None
if "activations_checkpoint_method" in conf.cfg:
conf.cfg.activations_checkpoint_method = None
if "sequence_parallel" in conf.cfg:
conf.cfg.sequence_parallel = False
if is_global_rank_zero():
with open(hparams_override_file, "w") as f:
OmegaConf.save(config=conf, f=f)
wait_time = 0
while not os.path.exists(hparams_override_file):
time.sleep(1)
wait_time += 1
if wait_time > 60:
raise TimeoutError('Timeout waiting for config file to be created.')
args.hparams_file = hparams_override_file
lm = models.get_model(args.model)(args, batch_size=args.batch_size)
# Determine whether process is allowed to write to disk
# can_write_output() limits the processes allowed to enter clause
write_permission = can_write_output(lm, args)
if write_permission:
args = setup_output_dir(args, eval_args, unknown_args)
if args.limit:
logging.warning(
"At most {} samples will be used. --limit SHOULD ONLY BE USED FOR TESTING. "
"REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.".format(args.limit)
)
if args.filter_shots:
logging.info("Few-shot example shots will be filtered")
else:
logging.info("Few-shot example shots will NOT be filtered")
# prompt tasks, forcing prompt task
if "prompt" in args.tasks:
assert args.prompt_dataset_paths is not None, "Prompt models evaluation requires dataset provided from user."
prompt_dataset_paths = args.prompt_dataset_paths.split(",")
task_names = args.tasks.split(",")
task_dict = tasks.get_prompt_task_dict(
task_names,
model=lm.model,
dataset_paths=prompt_dataset_paths,
disable_special_tokens=args.disable_special_tokens,
)
else:
# regular tasks
if args.tasks == "all_tasks":
task_names = tasks.ALL_TASKS
else:
task_names = args.tasks.split(",")
task_dict = tasks.get_task_dict(task_names, args.cache_dir)
if args.serialize_predictions:
no_serialization = [name for name, task in task_dict.items() if not hasattr(task, "serialize_results")]
if len(
no_serialization
): # Only serialize for those that have implemented the method, instead of raising exception
logging.error(
f"Model outputs for {no_serialization} task(s) will not be dumped. Please check the implementation of {no_serialization} to "
f"make sure you have implemented serialize_results."
)
raise Exception(
f"args.serialize_predictions is set for dumping results, but tasks: {no_serialization} do not implement the serialize_results method"
)
utils.set_seed(args.eval_seed)
results = evaluator.evaluate(
lm,
task_dict,
args.provide_description,
args.num_fewshot,
args.limit,
filter_shots=args.filter_shots,
serialize_predictions=args.serialize_predictions,
ratio_positive=args.ratio_positive,
mix_mode=args.mix_mode,
interleave_width=args.interleave_width,
)
if write_permission:
summary = json.dumps(results["results"], indent=2)
logging.info("\n" + summary)
with open(os.path.join(args.output_path, "metrics.json"), mode="w") as fp:
fp.write(summary)
if "output" in results:
# TODO(GEO): maybe add a for loop over "taskX" in results['output'][taskX] to store each task separately
# Store predictions, prompts, labels etc per document as a (pretty) json file
predictions_filepath = os.path.join(args.pred_dir, args.experiment_name + "_predictions.json")
logging.info("Stored predictions in '{}'".format(predictions_filepath))
with open(predictions_filepath, mode="w") as fp:
json.dump(results, fp, indent=4)
# MAKE TABLE
from pytablewriter import LatexTableWriter, MarkdownTableWriter
md_writer = MarkdownTableWriter()
latex_writer = LatexTableWriter()
md_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
latex_writer.headers = ["Task", "Version", "Metric", "Value", "", "Stderr"]
values = []
for k, dic in results["results"].items():
version = results["versions"][k]
for m, v in dic.items():
if m.endswith("_stderr"):
continue
if m + "_stderr" in dic:
se = dic[m + "_stderr"]
values.append([k, version, m, "%.4f" % v, "±", "%.4f" % se])
else:
values.append([k, version, m, "%.4f" % v, "", ""])
k = ""
version = ""
md_writer.value_matrix = values
latex_writer.value_matrix = values
if hparams_override_file is not None:
os.rename(hparams_override_file, os.path.join(args.output_path, "hparams_override.yaml"))
logging.info(
f"{args.model}, limit: {args.limit}, provide_description: {args.provide_description}, num_fewshot: {args.num_fewshot}, batch_size: {args.batch_size}"
)
logging.info("\n" + md_writer.dumps())
total_time = time.time() - total_start_time
logging.info("Total runtime: {} hours, {} minutes, {} seconds".format(*utils.readable_time(total_time)))
logging.info("Evaluation complete!")
if __name__ == "__main__":
main()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/evaluate.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
from collections.abc import Iterable
import numpy as np
import sacrebleu
import sklearn
# GEO: Why not numpy?
def mean(arr):
return sum(arr) / len(arr)
def pop_stddev(arr):
mu = mean(arr)
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / len(arr))
def sample_stddev(arr):
mu = mean(arr)
return math.sqrt(sum([(x - mu) ** 2 for x in arr]) / (len(arr) - 1))
def mean_stderr(arr):
return sample_stddev(arr) / math.sqrt(len(arr))
def median(arr): # GEO: Requires sorted array!
return arr[len(arr) // 2]
def matthews_corrcoef(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
return sklearn.metrics.matthews_corrcoef(golds, preds)
def f1_score(items):
unzipped_list = list(zip(*items))
golds = unzipped_list[0]
preds = unzipped_list[1]
fscore = sklearn.metrics.f1_score(golds, preds)
return np.max(fscore)
def acc_all(items):
# Only count as correct if all answers are labeled correctly for each question
question_scoring_dict = {}
preds = list(zip(*items))[0]
docs = list(zip(*items))[1]
for doc, pred in zip(docs, preds):
question_id = doc["idx"]["question"]
if question_id not in question_scoring_dict:
question_scoring_dict[question_id] = []
gold_label = doc["label"] == 1
question_scoring_dict[question_id].append(gold_label == pred)
acc = np.mean([int(all(x)) for x in question_scoring_dict.values()])
return acc
def acc_all_stderr(items):
# Only count as correct if all answers are labeled correctly for each question
question_scoring_dict = {}
preds = list(zip(*items))[0]
docs = list(zip(*items))[1]
for doc, pred in zip(docs, preds):
question_id = doc["idx"]["question"]
if question_id not in question_scoring_dict:
question_scoring_dict[question_id] = []
gold_label = doc["label"] == 1
question_scoring_dict[question_id].append(gold_label == pred)
acc = mean_stderr([int(all(x)) for x in question_scoring_dict.values()])
return acc
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
"""Compute max metric between prediction and each ground truth."""
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def perplexity(items):
return math.exp(-mean(items))
def weighted_mean(items):
a, b = zip(*items)
return sum(a) / sum(b)
def weighted_perplexity(items):
return math.exp(-weighted_mean(items))
def bleu(items):
"""The Bilingual Evaluation Understudy Score, or BLEU for short, is a metric
for evaluating a generated sentence to a reference sentence. It counts matching
n-grams in the candidate translation to n-grams in the reference text, where
1-gram or unigram would be each token and a bigram comparison would be each
word pair. The comparison is made regardless of word order
Source: https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
Paper: https://www.aclweb.org/anthology/P02-1040/
Higher is better
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_bleu(preds, refs).score
def chrf(items):
"""chrF++ is a tool for automatic evaluation of machine translation output
based on character n-gram precision and recall enhanced with word n-grams.
Source: https://github.com/m-popovic/chrF
Paper: https://www.aclweb.org/anthology/W15-3049.pdf
Higher is better # TODO I think
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_chrf(preds, refs).score
def ter(items):
"""Translation Error Rate is an error metric for machine translation that
measures the number of edits required to change a system output into one
of the references
Source: http://www.cs.umd.edu/~snover/tercom/
Paper: http://mt-archive.info/AMTA-2006-Snover.pdf
Lower is better
"""
refs = list(zip(*items))[0]
preds = list(zip(*items))[1]
refs, preds = _sacreformat(refs, preds)
return sacrebleu.corpus_ter(preds, refs).score
def is_non_str_iterable(obj):
return isinstance(obj, Iterable) and not isinstance(obj, str)
def _sacreformat(refs, preds):
"""Format refs and preds for sacrebleu corpus calculation. It is very particular"""
# Sacrebleu expects (List[str], List[List[str])
# e.g. sacrebleu.corpus_bleu([pred_t], [[ref1_stream], [ref2_stream], ...])
# Note [ref1_stream] is the first reference for each pred.
# So lists are size N and (M, N) for N preds and M possible refs for each pred
# This is a different order of dimensions that I would expect
# We expect refs to be List[str] or List[List[str]], the outer list corresponding to preds
# Must become List[List[str]] with the inner list corresponding to preds
if not is_non_str_iterable(refs):
refs = list(refs)
if not is_non_str_iterable(refs[0]):
refs = [[ref] for ref in refs]
refs = list(zip(*refs))
# Note the number of refs in each ref list much match the number of preds
# We expect preds to be List[str] or List[List[str]]. Must become List[str]
if not is_non_str_iterable(preds):
preds = list(preds)
if is_non_str_iterable(preds[0]):
assert len(preds[0]) == 1, f"Pred must be a str, was {preds[0]}"
preds = [pred[0] for pred in preds]
return refs, preds
## stderr stuff
class _bootstrap_internal:
def __init__(self, f, n):
self.f = f
self.n = n
def __call__(self, v):
i, xs = v
rnd = random.Random()
rnd.seed(i)
res = []
for _ in range(self.n):
res.append(self.f(rnd.choices(xs, k=len(xs))))
return res
def bootstrap_stderr(f, xs, iters, workers=4):
import multiprocessing as mp
pool = mp.Pool(workers)
# this gives a biased estimate of the stderr (i.e w/ the mean, it gives something
# equivalent to stderr calculated without Bessel's correction in the stddev.
# Unfortunately, I haven't been able to figure out what the right correction is
# to make the bootstrap unbiased - i considered multiplying by sqrt(n/(n-1)) but
# that would be ad-hoc and I can't prove that that would actually be an unbiased estimator)
# Thankfully, shouldn't matter because our samples are pretty big usually anyways
res = []
chunk_size = min(1000, iters)
from tqdm import tqdm
print("bootstrapping for stddev:", f.__name__)
for bootstrap in tqdm(
pool.imap(_bootstrap_internal(f, chunk_size), [(i, xs) for i in range(iters // chunk_size)]),
total=iters // chunk_size,
):
# sample w replacement
res.extend(bootstrap)
pool.close()
return sample_stddev(res)
def stderr_for_metric(metric, bootstrap_iters):
bootstrappable = [
median,
matthews_corrcoef,
f1_score,
perplexity,
bleu,
chrf,
ter,
]
if metric in bootstrappable:
return lambda x: bootstrap_stderr(metric, x, iters=bootstrap_iters)
stderr = {mean: mean_stderr, acc_all: acc_all_stderr}
return stderr.get(metric, None)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/metrics.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import os
import random
import re
import sys
import traceback
import numpy as np
from nemo.utils import logging
class ExitCodeError(Exception):
pass
def sh(x):
if os.system(x):
raise ExitCodeError()
def simple_parse_args_string(args_string):
"""
Parses something like
args1=val1,arg2=val2
Into a dictionary
"""
args_string = args_string.strip()
if not args_string:
return {}
arg_list = args_string.split(",")
args_dict = {}
for arg in arg_list:
k, v = arg.split("=", 1)
args_dict[k] = v
return args_dict
def join_iters(iters):
for iter in iters:
yield from iter
def chunks(iter, n):
arr = []
for x in iter:
arr.append(x)
if len(arr) == n:
yield arr
arr = []
if arr:
yield arr
def group(arr, fn):
res = collections.defaultdict(list)
for ob in arr:
res[fn(ob)].append(ob)
return list(res.values())
def general_detokenize(string):
string = string.replace(" n't", "n't")
string = string.replace(" )", ")")
string = string.replace("( ", "(")
string = string.replace("\" ", "\"")
string = string.replace(" \"", "\"")
string = re.sub(r" (['.,])", r"\1", string)
return string
def get_rolling_token_windows(token_list, prefix_token, max_seq_len, context_len):
"""
- context_len allows for a rolling window context, allowing each prediction window to potentially
condition on some context
:param token_list: list
List of tokens to be PREDICTED
:param max_seq_len: int
max_seq_len of model (or max_seq_len we want to use)
:param context_len: int
Amount of desired token context for prediction. Needs to be at least 1.
:param prefix_token: token
Dummy token like <eos> so the first token has something to condition on
:return: generator
Generator of tuples
(input_tokens, pred_tokens)
Note: Score only the last len(pred_tokens) logits of the LM
"""
assert 1 <= context_len <= max_seq_len
if not token_list:
return
# +1 offset, going from input->preds
pred_len = max_seq_len - context_len + 1
predicted = 0
# Special handling for first window: predict all tokens
first_seq_len = min(max_seq_len, len(token_list))
yield ([prefix_token] + token_list[: first_seq_len - 1], token_list[:first_seq_len])
predicted += first_seq_len
while predicted < len(token_list):
window_pred_len = min(len(token_list) - predicted, pred_len)
window_end = predicted + window_pred_len
yield (
token_list[window_end - max_seq_len - 1 : window_end - 1],
token_list[window_end - window_pred_len : window_end],
)
predicted += window_pred_len
def make_disjoint_window(pair):
"""Takes output from get_rolling_token_windows and makes the context not overlap with the continuation"""
a, b = pair
return a[: -(len(b) - 1)], b
class Reorderer:
def __init__(self, arr, fn):
self.size = len(arr)
arr = list(enumerate(arr))
arr = group(arr, lambda x: fn(x[1]))
arr = [([y[0] for y in x], x[0][1]) for x in arr]
arr.sort(key=lambda x: fn(x[1]))
self.arr = arr
def __len__(self):
return self.size
def get_reordered(self):
return [x[1] for x in self.arr]
def get_original(self, newarr):
res = [None] * self.size
cov = [False] * self.size
for (inds, _), v in zip(self.arr, newarr):
for ind in inds:
res[ind] = v
cov[ind] = True
assert all(cov)
return res
def readable_time(time_difference):
"""Convert a float measuring time difference in seconds into a tuple of (hours, minutes, seconds)"""
hours = time_difference // 3600
minutes = (time_difference // 60) % 60
seconds = time_difference % 60
return hours, minutes, seconds
class Obj(object):
def __init__(self, dict_):
self.__dict__.update(dict_)
def dict2obj(d):
"""Convert dict `d` into an object ("struct")"""
return json.loads(json.dumps(d), object_hook=Obj)
def load_config(args):
"""
Returns a dictionary with the full experiment configuration settings.
If a json file is specified with `--config`, its contents will overwrite the defaults or other arguments as
extracted by argparse.
"""
config = args.__dict__ # configuration dictionary
if args.config_filepath is not None:
logging.info("Reading configuration ...")
try: # dictionary containing the entire configuration settings in a hierarchical fashion
with open(args.config_filepath) as cnfg:
json_config = json.load(cnfg)
config.update(json_config)
except:
logging.critical("Failed to load configuration file. Check JSON syntax and verify that files exist")
traceback.print_exc()
sys.exit(1)
return config
def create_dirs(dirs):
"""
Input:
dirs: a list of directories to create, in case these directories are not found
Returns:
exit_code: 0 if success, -1 if failure
"""
try:
for dir_ in dirs:
if not os.path.exists(dir_):
os.makedirs(dir_, exist_ok=True)
return 0
except Exception as err:
print("Creating directories error: {0}".format(err))
exit(-1)
def set_seed(seed):
"""the seed state is shared across the entire program, regardless of module
(confirmed for Python random, but most likely true for the others too). Numpy is likely not thread safe."""
random.seed(seed)
np.random.seed(seed)
# torch.manual_seed(seed)
# if args.n_gpu > 0:
# torch.cuda.manual_seed_all(seed)
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import itertools
import random
import time
import lm_eval.metrics
from nemo.utils import logging
def evaluate(
lm,
task_dict,
provide_description,
num_fewshot,
limit,
bootstrap_iters=100000,
filter_shots=True,
serialize_predictions=False,
**kwargs,
):
# TODO: completely refactor this entire function to not be a huge mess, ideally breaking it down into smaller pieces
# GEO TODO: I have the impression that a lot of data content is duplicated in many structures
# (task, task_docs, reqs, requests, request_origin). Converting everything to HF dataset objects may be a good alternative
task_dict_items = [
(name, task) for name, task in task_dict.items() if (task.has_validation_docs() or task.has_test_docs())
]
results = collections.defaultdict(dict)
versions = collections.defaultdict(dict)
requests = collections.defaultdict(list)
requests_origin = collections.defaultdict(list)
# if we ever run into issues where the eval tasks don't fit in memory and we can't afford a machine with bigger memory,
# we can always modify this plumbing to support that, but i didn't want to include it just yet because overengineering is bad
# (or we could make it write the requests to disk and then read them back out again - probably using an sqlite db because of all the moving parts we have
# TODO: we need unit tests & sanity checks or something to ensure that the return of `validation_docs` is stable
docs = {}
# get lists of each type of request
for task_name, task in task_dict_items:
versions[task_name] = task.VERSION
# default to test doc, fall back to val doc if validation unavailable
# TODO: the test-fallback-to-val system isn't final, we should revisit it at some point
if task.has_test_docs():
task_doc_func = task.test_docs
elif task.has_validation_docs():
task_doc_func = task.validation_docs
else:
raise RuntimeError(
f"Task {task_name} has neither test docs nor validation docs, please verify data is properly configured for this task."
)
# deterministically shuffle docs and chop off the first `limit` because sometimes docs are in some kind of order
task_docs = list(task_doc_func())
task_docs = list(
zip(range(len(task_docs)), task_docs)
) # use original sample order as ID for evaluation samples
rnd = random.Random()
rnd.seed(42)
rnd.shuffle(task_docs)
logging.info("Found {} {} documents ...".format(len(task_docs), task_name))
logging.info("Building requests for '{}' ...".format(task_name))
# GEO: Maybe reqs = map(lambda x: task.construct_requests(x, task.fewshot_context(x)), itertools.islice(task_docs, 0, limit))
for doc_id, doc in itertools.islice(task_docs, 0, limit):
# NOTE: shot_ids and doc_ids are not global within the entire dataset, they are valid and unique within their respective sets:
# i.e. for shot_ids usually this is the training set, for doc_ids usually this is the validation or test set.
# The user is supposed to know which sets are used to draw shots from and which to evaluate on.
shot_ids, ctx = task.fewshot_context(
doc=doc,
provide_description=provide_description,
num_fewshot=num_fewshot,
rnd=rnd,
filter_shot_examples=filter_shots,
**kwargs,
)
if isinstance(doc, dict):
doc["doc_id"] = doc_id
doc["shot_ids"] = shot_ids
docs[(task_name, doc_id)] = doc
reqs = task.construct_requests(doc, ctx) # GEO: this is a tuple, like (ll_true, ll_false)
if not isinstance(reqs, (list, tuple)):
reqs = [reqs]
for i, req in enumerate(reqs):
requests[req.type].append(
req
) # key(s) are 'loglikelihood', etc. Each is associated with a list of Request objects, which contain (context_str, candidate_str)
# i: index in requests for a single task instance. Each doc has as many requests as multiple choice questions.
# doc_id: unique id that we can get back to a doc using `docs`. Just an index corresponding to the order of app. in `task_docs`
# GEO: TODO: does it really need the `doc`? is this list necessary?
requests_origin[req.type].append((i, task_name, doc, doc_id)) # key(s) are 'loglikelihood', etc.
# all responses for each (task, doc)
process_res_queue = collections.defaultdict(list) # GEO: not a Queue though...
# execute each type of request
for reqtype, reqs in requests.items():
# TODO: right now, this code runs multiple seperate LM requests for multiple Requests differing
# only in index. We could implement some kind of caching, but that would be more of a bandaid
# solution. we could also implement some kind of autogrouping here; they should end up next to each other.
# reqs is a list of request objects, as many as the samples * (num. possibile answers)
logging.info("Running {} {} requests ...".format(len(reqs), reqtype))
start_time = time.time()
resps = getattr(lm, reqtype)(
[req.args for req in reqs]
) # GEO: call to model for processing. (Maybe can be replaced by batching function)
logging.info("Done in {:.3f} s".format(time.time() - start_time))
if lm.can_access_output():
resps = [
x if req.index is None else x[req.index] for x, req in zip(resps, reqs)
] # list of loglikelihoods (floats)
else:
resps = [None] * len(reqs)
logging.debug("Putting results in a queue for metric calculation ...")
for resp, (i, task_name, doc, doc_id) in zip(resps, requests_origin[reqtype]):
process_res_queue[(task_name, doc_id)].append(
(i, resp)
) # depending on task, for each (task, doc_id) can contain e.g. [(0, loglikelihood0), (1, loglikelihood1)]
vals = collections.defaultdict(list)
serialized_output = collections.defaultdict(list)
# unpack results and sort back in order and return control to Task
if lm.can_access_output():
logging.debug("Calculating individual metrics ...")
for (task_name, doc_id), responses in process_res_queue.items():
responses.sort(key=lambda x: x[0]) # this sorts by class of answer, i.e. 0, 1, ...
responses = [x[1] for x in responses] # calculated loglikelihood for each class
task = task_dict[task_name]
doc = docs[(task_name, doc_id)]
metrics = task.process_results(doc, responses)
for metric, value in metrics.items():
vals[(task_name, metric)].append(value)
if hasattr(task, "serialize_results") and serialize_predictions:
output = task.serialize_results(doc, responses)
output["metrics"] = metrics
serialized_output[task_name].append(output)
# aggregate results
logging.info("Aggregating metrics ...")
for (task_name, metric), items in vals.items():
task = task_dict[task_name]
results[task_name][metric] = task.aggregation()[metric](items)
stderr = lm_eval.metrics.stderr_for_metric(task.aggregation()[metric], bootstrap_iters=bootstrap_iters)
if stderr is not None:
results[task_name][metric + "_stderr"] = stderr(items)
return_dict = {"results": results, "versions": versions}
# NOTE(GEO): consider returning only the IDs of samples and corresponding predictions.
# All other information can be looked up in post-processing based on ID. This will reduce storage and I/O operations.
if serialize_predictions:
return_dict["output"] = serialized_output
return return_dict
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/evaluator.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import enum
import hashlib
import json
import os
import re
from functools import partial
import numpy as np
import spacy
from lm_eval.metrics import mean, weighted_mean, weighted_perplexity
from sqlitedict import SqliteDict
def _SPACY_NLP(*args, **kwargs):
global _SPACY_NLP
nlp = spacy.load("en_core_web_sm")
_SPACY_NLP = nlp
return nlp(*args, **kwargs)
class LM(abc.ABC):
def __init__(self):
self.cache_hook = CacheHook(None)
@abc.abstractmethod
def loglikelihood(self, requests):
"""Compute log-likelihood of generating a continuation from a context.
Downstream tasks should attempt to use loglikelihoodikelihood instead of other
LM calls whenever possible.
:param requests: list
A list of pairs (context, continuation)
context: str
Context string. Implementations of LM must be able to handle an
empty context string.
continuation: str
The continuation over which log likelihood will be calculated. If
there is a word boundary, the space should be in the continuation.
For example, context="hello" continuation=" world" is correct.
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
@abc.abstractmethod
def loglikelihood_rolling(self, requests):
"""Compute full log-likelihood of a string, with no truncation, for perplexity computation
- We will use the full max context length of the model.
- For inputs that exceed the max context length, we divide the tokenized string into chunks of up to
the max context length.
- IMPORTANT: Each document's loglikelihood/perplexity is computed *separately*, unlike other implementations
which may simply concatenate multiple documents together.
- IMPORTANT: We maximize the amount of context for each prediction. Specifically, for inputs that we break into
multiple chunks, the last input will still a full-sized context.
Example:
Input tokens: [ 0 1 2 3 4 5 6 7 8 9 ]
Prefix: EOT
Max context length: 4
Resulting input/prediction pairs:
INPUT: EOT 0 1 2
PRED: 0 1 2 3
INPUT: 3 4 5 6
PRED: 4 5 6 7
INPUT: 5 6 7 8
PRED: 8 9
Observe that:
1. Each token is predicted exactly once
2. For the last pair, we provide the full context, but only score the last two tokens
:param requests: list
A list of strings
string: str
String for which we are computing per-toke loglikelihood
:return: list
A list of pairs (logprob, isgreedy)
logprob: float
The log probability of `continuation`
isgreedy:
Whether `continuation` would be generated by greedy sampling from `context`
"""
pass
# TODO: Add an optional max length
@abc.abstractmethod
def greedy_until(self, requests):
"""Generate greedily until a stopping sequence
:param requests: list
A list of pairs (context, until)
context: str
Context string
until: [str]
The string sequences to generate until. These string sequences
may each span across multiple tokens, or may be part of one token.
:return: list
A list of strings continuation
continuation: str
The generated continuation.
"""
pass
@classmethod
def create_from_arg_string(cls, arg_string, *args, **kwargs):
"""Constructor method, in case models need additional arguments
e.g. OpenAI API engine, paths for loading, other params
:param arg_string: str
Left up to individual model class to handle
"""
return cls()
def set_cache_hook(self, cache_hook):
self.cache_hook = cache_hook
@abc.abstractmethod
def can_access_output(self):
"""
Megatron model may use pipeline parallelism. In this case only the last GPU in a pipeline has access to the actual outputs.
We need to check for this and only do metrics computation on processes that can actually access results.
"""
pass
class ResultPreprocessing(enum.Enum):
NONE = enum.auto()
FIRST_SENTENCE = enum.auto()
class Task(abc.ABC):
"""A task represents an entire benchmark including its dataset, problems,
answers, and evaluation methods. See BoolQ for a simple example implementation
A `doc` can be any python object which represents one instance of evaluation.
This is usually a dictionary e.g.
{"question": ..., "answer": ...} or
{"question": ..., question, answer)
"""
RESULT_PREPROCESSING = ResultPreprocessing.NONE
def __init__(self):
self.download()
self._training_docs = None
self._fewshot_docs = None
def download(self):
"""Downloads the task dataset if necessary"""
pass
@abc.abstractmethod
def has_training_docs(self):
"""Whether the task has a training set"""
pass
@abc.abstractmethod
def has_validation_docs(self):
"""Whether the task has a validation set"""
pass
@abc.abstractmethod
def has_test_docs(self):
"""Whether the task has a test set"""
pass
def training_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def validation_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def test_docs(self):
"""
:return: Iterable[obj]
A iterable of any object, that doc_to_text can handle
"""
return []
def sample_examples(self, examples, k, rnd, **kwargs):
"""Sample k examples out of the iterable `examples`, using provided random number generator.
:param examples: iterable of tuples (doc_id, doc): iterable of shot examples
:param k: number of examples to be included in the prompt
:param rnd: initialized random number generator object, e.g. rnd = random.Random(1337)
:return: iterable of tuples (doc_id, doc): iterable of k shot examples
"""
return rnd.sample(examples, k)
def fewshot_examples(self, k, rnd, filter_func=None, **kwargs):
"""
Draw k samples from the training dataset using random generator `rnd`.
If `filter_func` is provided, it will be used to filter examples/documents (keep or exclude from sampling)
:param k: number of examples to be included in the prompt
:param rnd: initialized random number generator object, e.g. rnd = random.Random(1337)
:param filter_func: function taking an iterable and returning an iterable a potentially smaller, filtered iterable
:return: iterable of tuples (doc_id, doc): iterable of shot examples
"""
if self._training_docs is None:
self._training_docs = list(self.training_docs())
self._examples = list(
zip(range(len(self._training_docs)), self._training_docs)
) # NOTE: compute each time if necessary to save memory
if filter_func is not None:
examples = filter_func(self._examples)
else:
examples = self._examples
return self.sample_examples(examples, k, rnd, **kwargs)
@abc.abstractmethod
def doc_to_text(self, doc):
pass
@abc.abstractmethod
def doc_to_target(self, doc):
pass
@abc.abstractmethod
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
Each document has as many requests as loglikelihoods to be calculated (multiple choice questions).
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
pass
def preprocess_results(self, mode, results):
"""Preprocesses results based on some preprocessing mode.
:param mode:
The preprocessing mode, an enum value from ResultPreprocessing.
:param results:
The results of the requests created in construct_requests.
"""
if mode not in ResultPreprocessing:
raise ValueError(
f"Invalid mode, expected type {ResultPreprocessing.__name__} but got {type(mode).__name__}"
)
if mode is ResultPreprocessing.NONE:
preprocessed = results
elif mode is ResultPreprocessing.FIRST_SENTENCE:
preprocessed = []
for result in results:
if result:
spacy_doc = _SPACY_NLP(result)
preprocessed.append(str(next(spacy_doc.sents)))
else:
preprocessed.append(result)
else:
raise RuntimeError(f"Unimplemented mode: {mode}")
return preprocessed
def compute_doc_metrics(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document.
Results are initially preprocessed based on the value of the class attribute
`RESULT_PREPROCESSING` before being passed to `process_results`.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
results = self.preprocess_results(self.RESULT_PREPROCESSING, results)
return self.process_results(doc, results)
@abc.abstractmethod
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
pass
@abc.abstractmethod
def aggregation(self):
"""
:returns: {str: [metric_score] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metric scores
"""
pass
@abc.abstractmethod
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
pass
def fewshot_description(self):
return ""
def filter_shots(self, shot_examples, doc):
"""
Selectively keep only part of all possible shot examples,
potentially based on characteristics of the document under examination.
:param shot_examples: iterable of tuples (doc_id, doc)): samples to be used as shots
:param doc: sample, document under examination
:return: iterable of tuples (doc_id, doc): filtered iterable of shot examples
"""
raise (
NotImplementedError(
"`filter_shots` must be implemented in child Task in order to use `filter_shot_examples=True`"
)
)
def fewshot_context(self, doc, num_fewshot, provide_description, rnd, filter_shot_examples=False, **kwargs):
"""Construct and format full prompt string for a given sample, optionally including description and shot examples
:param doc: document object corresponding to the sample under examination
:param num_fewshot: number of examples to be included in the prompt
:param provide_description: (bool), whether to prepend natural language description
:param rnd: initialized random number generator object, e.g. rnd = random.Random(1337)
:param filter_shot_examples: If True, will make sure to exclude certain samples from the prompt context, based
on member `filter_shots` function
:return: (shot_ids, context_str): tuple of (iterable of shot example IDs, string correspoding to context/prompt)
"""
raw_description = self.fewshot_description()
description = (raw_description + "\n===\n\n") if provide_description and raw_description else ""
if num_fewshot == 0:
labeled_examples = ""
shot_ids = []
else:
# for sets with no training docs, draw from other set *but ensure no overlap with current doc*
if self.has_training_docs():
if filter_shot_examples:
fewshotex = self.fewshot_examples(
k=num_fewshot, rnd=rnd, filter_func=partial(self.filter_shots, doc=doc), **kwargs,
)
else:
fewshotex = self.fewshot_examples(k=num_fewshot, rnd=rnd, **kwargs)
else:
if self._fewshot_docs is None:
self._fewshot_docs = list(
self.validation_docs() if self.has_validation_docs() else self.test_docs()
)
self._fewshot_docs = list(zip(range(len(self._fewshot_docs)), self._fewshot_docs))
if filter_shot_examples:
fewshotex = self.filter_shots(self._fewshot_docs, doc)
else:
fewshotex = self._fewshot_docs
fewshotex = self.sample_examples(fewshotex, num_fewshot + 1, rnd, **kwargs)
# get rid of the doc that's the one we're evaluating, if it's in the fewshot
# works because dictionary-like objects support equality operation in Python
fewshotex = [x for x in fewshotex if x != doc][:num_fewshot]
shot_ids, shot_docs = zip(*fewshotex)
labeled_examples = (
"\n\n".join([self.doc_to_text(doc) + self.doc_to_target(doc) for doc in shot_docs]) + "\n\n"
)
example = self.doc_to_text(doc) # the document of interest, main part of the prompt
prompt_str = description + labeled_examples + example # the formatted prompt string
return shot_ids, prompt_str
class MultipleChoiceTask(Task):
def doc_to_target(self, doc):
return " " + doc["choices"][doc["gold"]]
def construct_requests(self, doc, ctx):
lls = [rf.loglikelihood(ctx, " {}".format(choice))[0] for choice in doc["choices"]] + [ # get likelihoods
rf.loglikelihood(ctx, " {}".format(choice))[2] for choice in doc["choices"] # get tokens
]
return lls
def process_results(self, doc, results):
gold = doc["gold"]
num_choices = len(doc["choices"])
logprobs = results[:num_choices]
choice_tokens = results[num_choices:]
assert len(logprobs) == len(choice_tokens)
normed_logprobs = [lp / len(x) for lp, x in zip(logprobs, choice_tokens)]
acc = 1.0 if np.argmax(logprobs) == gold else 0.0
acc_norm = 1.0 if np.argmax(normed_logprobs) == gold else 0
# NOTE(zhunliu): the previous normed setting is not ideal, norm by char
# completion_len = np.array([float(len(i)) for i in doc["choices"]])
# acc_norm = 1. if np.argmax(results / completion_len) == gold else 0.
return {
"acc": acc,
"acc_norm": acc_norm,
}
def serialize_results(self, doc, results):
num_choices = len(doc["choices"])
logprobs = results[:num_choices]
choice_tokens = results[num_choices:]
assert len(logprobs) == len(choice_tokens)
pred = np.argmax(logprobs)
normed_logprobs = [lp / len(x) for lp, x in zip(logprobs, choice_tokens)]
pred_norm = np.argmax(normed_logprobs)
model_choice = doc["choices"][pred]
model_choice_norm = doc["choices"][pred_norm]
gold_choice = doc["choices"][doc["gold"]]
return {
"format": self.doc_to_text(doc) + " {choices}",
"model_choice": model_choice,
"model_choice_norm": model_choice_norm,
"gold_choice": gold_choice,
"choices": dict(zip(doc["choices"], results[:num_choices])),
}
def higher_is_better(self):
return {
"acc": True,
"acc_norm": True,
}
def aggregation(self):
return {
"acc": mean,
"acc_norm": mean,
}
def get_answer_ctx(self):
"""Return the answer-prompting string for the question.
Most QA tasks has the format of "Question: xxx\nAnswer: "
In this case the answer-prompting string is "Answer: "
"""
raise NotImplementedError
class PerplexityTask(Task, abc.ABC):
def has_training_docs(self):
return False
def fewshot_description(self):
return ""
def fewshot_examples(self, k, rnd):
assert k == 0
return []
def fewshot_context(self, doc, num_fewshot, provide_description, rnd, **kwargs):
assert num_fewshot == 0
assert not provide_description
return ([], "")
def higher_is_better(self):
return {
"word_perplexity": False,
"byte_perplexity": False,
"bits_per_byte": False,
}
def doc_to_text(self, doc):
return ""
def doc_to_target(self, doc):
return doc
def construct_requests(self, doc, ctx):
assert not ctx
req = rf.loglikelihood_rolling(self.doc_to_target(doc))
return req
def process_results(self, doc, results):
(loglikelihood,) = results
words = self.count_words(doc)
bytes = self.count_bytes(doc)
return {
"word_perplexity": (loglikelihood, words),
"byte_perplexity": (loglikelihood, bytes),
"bits_per_byte": (-loglikelihood, self.count_bytes(doc)),
}
def aggregation(self):
return {
"word_perplexity": weighted_perplexity,
"byte_perplexity": weighted_perplexity,
"bits_per_byte": weighted_mean,
}
def count_bytes(self, doc):
return len(doc.encode("utf-8"))
def count_words(self, doc):
"""Downstream tasks with custom word boundaries should override this!"""
return len(re.split(r"\s+", doc))
req_ret_lens = {
"loglikelihood": 4,
"greedy_until": None,
"loglikelihood_rolling": None,
}
def hash_args(attr, args):
dat = json.dumps([attr] + list(args))
return hashlib.sha256(dat.encode("utf-8")).hexdigest()
class CacheHook:
def __init__(self, cachinglm):
if cachinglm is None:
self.dbdict = None
return
self.dbdict = cachinglm.dbdict
def add_partial(self, attr, req, res):
if self.dbdict is None:
return
hsh = hash_args(attr, req)
self.dbdict[hsh] = res
class CachingLM:
def __init__(self, lm, cache_db):
self.lm = lm
self.cache_db = cache_db
if os.path.dirname(cache_db):
os.makedirs(os.path.dirname(cache_db), exist_ok=True)
self.dbdict = SqliteDict(cache_db, autocommit=True)
# add hook to lm
lm.set_cache_hook(self.get_cache_hook())
def __getattr__(self, attr):
def fn(requests):
res = []
remaining_reqs = []
# figure out which ones are cached and which ones are new
for req in requests:
hsh = hash_args(attr, req)
if hsh in self.dbdict:
ob = self.dbdict[hsh]
assert ob is not None
res.append(ob)
else:
res.append(None)
remaining_reqs.append(req)
# actually run the LM
rem_res = getattr(self.lm, attr)(remaining_reqs)
# stick the new ones back into the list and also cache any of the new ones
resptr = 0
for req, r in zip(remaining_reqs, rem_res):
while res[resptr] is not None:
resptr += 1
res[resptr] = r
# caching
hsh = hash_args(attr, req)
self.dbdict[hsh] = r
self.dbdict.commit()
return res
return fn
def get_cache_hook(self):
return CacheHook(self)
class Request:
def __init__(self, type, args, index=None):
if type not in req_ret_lens.keys():
raise NotImplementedError("The request type {} is not implemented!".format(type))
self.type = type
self.args = args
self.index = index
def __iter__(self):
if req_ret_lens[self.type] is None:
raise IndexError("This request type does not return multiple arguments!")
i = 0
for i in range(req_ret_lens[self.type]):
yield Request(self.type, self.args, i)
def __getitem__(self, i):
if req_ret_lens[self.type] is None:
raise IndexError("This request type does not return multiple arguments!")
return Request(self.type, self.args, i)
def __eq__(self, other):
return self.type == other.type and self.args == other.args and self.index == other.index
def __repr__(self):
return f"Req_{self.type}{self.args}[{self.index}]\n"
class RequestFactory:
def __getattr__(self, attr):
def fn(*args):
return Request(attr, args)
return fn
rf = RequestFactory()
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/base.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from best_download import download_file
from lm_eval.base import Task, rf
from lm_eval.metrics import mean, perplexity
from lm_eval.utils import sh
class LAMBADA(Task):
VERSION = 0
def __init__(self, cache_dir=""):
self.cache_dir = cache_dir
super().__init__()
def download(self):
path = (
self.cache_dir
if self.cache_dir
else os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "data")
)
path = os.path.join(path, "lambada")
sh("mkdir -p " + path)
try:
if not os.path.exists(path + "/lambada_test.jsonl"):
download_file(
"https://openaipublic.blob.core.windows.net/gpt-2/data/lambada_test.jsonl",
local_file=path + "/lambada_test.jsonl",
expected_checksum="4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226",
)
except:
# fallback - for some reason best_download doesnt work all the time here
sh("wget https://openaipublic.blob.core.windows.net/gpt-2/data/lambada_test.jsonl -O data/lambada/lambada_test.jsonl")
sh(
'echo "4aa8d02cd17c719165fc8a7887fddd641f43fcafa4b1c806ca8abc31fabdb226 data/lambada/lambada_test.jsonl" | sha256sum --check'
)
self.cache_dir = path
def has_training_docs(self):
return False
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def training_docs(self):
pass
def validation_docs(self):
path = self.cache_dir
# with open("data/lambada/lambada_test.jsonl") as fh:
with open(path + "/lambada_test.jsonl") as fh:
for line in fh:
yield json.loads(line)
def test_docs(self):
pass
def preprocess(self, text):
text = text.replace("“", '"')
text = text.replace("”", '"')
text = text.replace("’", "'")
text = text.replace("‘", "'")
return text
def doc_to_text(self, doc):
return "\n" + self.preprocess(doc["text"].rsplit(" ", 1)[0]).strip()
def doc_to_target(self, doc):
return " " + self.preprocess(doc["text"].rsplit(" ", 1)[1])
def fewshot_description(self):
# TODO: figure out description
return ""
def construct_requests(self, doc, ctx):
ll, is_greedy, greedy_toks, cont_toks = rf.loglikelihood(ctx, self.doc_to_target(doc))
return ll, is_greedy, greedy_toks, cont_toks
def process_results(self, doc, results):
ll, is_greedy, *_ = results
return {"ppl": ll, "acc": int(is_greedy)}
def serialize_results(self, doc, results):
*_, greedy_toks, cont_toks = results
return {
"prompt": self.doc_to_text(doc),
"gold_answer": [x.replace("Ġ", " ") for x in cont_toks],
"model_answer": [x.replace("Ġ", " ") for x in greedy_toks],
}
def aggregation(self):
return {"ppl": perplexity, "acc": mean}
def higher_is_better(self):
return {"ppl": False, "acc": True}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/lambada.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from lm_eval.base import rf
from ..metrics import mean
from .common import HFTask
"""
This evaluation of Winogrande uses partial evaluation as described by
Trinh & Le in Simple Method for Commonsense Reasoning (2018).
Reference: https://arxiv.org/abs/1806.02847
"""
class Winogrande(HFTask):
VERSION = 0
DATASET_PATH = "winogrande"
DATASET_NAME = "winogrande_xl"
answer_to_num = {"1": 0, "2": 1}
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def has_test_docs(self):
return False
def doc_to_text(self, doc):
return self.partial_context(doc, doc["option" + doc["answer"]])
def fewshot_description(self):
# TODO: redo description
return "Winograd schema sentence including a either a ___ blank with a missing word, making the pronoun ambiguous, or the same with the word filled in."
@classmethod
def partial_context(cls, doc, option):
# Substitute the pronoun in the sentence with the specified option
# and ignore everything after.
pronoun_loc = doc["sentence"].index("_")
return doc["sentence"][:pronoun_loc] + option
def doc_to_target(self, doc):
return self.partial_target(doc)
@classmethod
def partial_target(cls, doc):
# The target is everything after the document specified pronoun.
pronoun_loc = doc["sentence"].index("_") + 1
return " " + doc["sentence"][pronoun_loc:].strip()
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
target = self.partial_target(doc)
lls = []
for option in [doc["option1"], doc["option2"]]:
partial_ctx = self.partial_context(doc, option)
full_ctx = self.append_context(ctx, partial_ctx)
# lls.append(rf.loglikelihood(full_ctx, target)[0])
ll, is_preds, model_toks, gold_toks = rf.loglikelihood(full_ctx, target)
lls.extend([ll, is_preds, model_toks, gold_toks])
return lls
@classmethod
def append_context(cls, ctx, partial_ctx):
ctx = ctx.split("\n\n") # Each fewshot context is on its own new line.
ctx.pop() # Remove the correct context put in by `doc_to_text`.
return "\n\n".join([*ctx, partial_ctx]) if ctx else partial_ctx
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
num_choices = 2
lls = results[::4]
is_preds = results[1::4]
model_toks = results[2::4]
gold_toks = results[3::4]
assert len(lls) == len(is_preds) == len(model_toks) == len(gold_toks) == num_choices, doc
return {
# "acc": np.argmax(results) == self.answer_to_num[doc["answer"]]
"acc": int(np.argmax(lls) == self.answer_to_num[doc["answer"]])
}
def serialize_results(self, doc, results):
num_choices = 2
lls = results[::4]
is_preds = results[1::4]
model_toks = results[2::4]
gold_toks = results[3::4]
assert len(lls) == len(is_preds) == len(model_toks) == len(gold_toks) == num_choices, doc
return {
"sentence": doc["sentence"],
"option1": doc["option1"],
"option2": doc["option2"],
"model_choice": int(np.argmax(lls)) + 1,
"gold_choice": self.answer_to_num[doc["answer"]] + 1,
}
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
return {"acc": mean}
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
return {"acc": True}
| NeMo-Megatron-Launcher-master | launcher_scripts/nemo_launcher/collections/eval_harness/lm_eval/tasks/winogrande.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.