repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
DeepSpeed
|
DeepSpeed-master/tests/unit/inference/test_model_profiling.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import time
import pytest
import torch
import deepspeed
from transformers import pipeline
from unit.common import DistributedTest
from deepspeed.accelerator import get_accelerator
@pytest.mark.inference
@pytest.mark.parametrize("use_cuda_events", [True, False])
@pytest.mark.parametrize("enable_cuda_graph", [True, False])
class TestModelProfiling(DistributedTest):
world_size = 1
def test(self, enable_cuda_graph, use_cuda_events):
task = "fill-mask"
model = "bert-base-cased"
dtype = torch.float16
query = "I am a [MASK] model"
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "1"))
pipe = pipeline(task, model, framework="pt", device=get_accelerator().device_name(local_rank))
pipe.model = deepspeed.init_inference(pipe.model,
dtype=dtype,
mp_size=world_size,
replace_with_kernel_inject=True,
enable_cuda_graph=enable_cuda_graph)
pipe.model.profile_model_time(use_cuda_events=use_cuda_events)
e2e_times = []
model_times = []
for _ in range(10):
get_accelerator().synchronize()
start = time.perf_counter_ns()
r = pipe(query)
get_accelerator().synchronize()
end = time.perf_counter_ns()
e2e_times.append((end - start) / 1e6) # convert ns to ms
model_times.extend(pipe.model.model_times())
for e2e_t, model_t in zip(e2e_times, model_times):
assert e2e_t >= model_t
| 1,813 | 31.981818 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/inference/test_inference.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import time
import torch
import pytest
import itertools
import deepspeed
from deepspeed.git_version_info import torch_info
from unit.common import DistributedTest
from packaging import version as pkg_version
from deepspeed.ops.op_builder import OpBuilder
from transformers import pipeline, AutoTokenizer
from transformers.models.t5.modeling_t5 import T5Block
from transformers.models.roberta.modeling_roberta import RobertaLayer
from huggingface_hub import HfApi
from deepspeed.model_implementations import DeepSpeedTransformerInference
from torch import nn
from deepspeed.accelerator import get_accelerator
rocm_version = OpBuilder.installed_rocm_version()
if rocm_version != (0, 0):
pytest.skip("skip inference tests on rocm for now", allow_module_level=True)
_bert_models = [
"bert-base-cased",
"bert-base-uncased",
"bert-large-cased",
"bert-large-uncased",
"bert-base-multilingual-cased",
"bert-base-multilingual-uncased",
"deepset/minilm-uncased-squad2",
"cross-encoder/ms-marco-MiniLM-L-12-v2",
"dslim/bert-base-NER",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"distilbert-base-cased-distilled-squad",
]
_roberta_models = [
"roberta-large",
"roberta-base",
"deepset/roberta-base-squad2",
"j-hartmann/emotion-english-distilroberta-base",
"Jean-Baptiste/roberta-large-ner-english",
]
_gpt_models = [
"gpt2",
"distilgpt2",
"Norod78/hebrew-bad_wiki-gpt_neo-tiny",
"EleutherAI/gpt-j-6b",
"EleutherAI/pythia-70m-deduped",
"bigscience/bloom-560m",
]
_opt_models = [
"facebook/opt-125m", # 125m, 1.7B, ..., 175B variants have the same model architecture.
"facebook/opt-350m", # 350m applies layer norm after attention layer which is different than other variants.
]
_test_models = set(_bert_models + _roberta_models + _gpt_models + _opt_models)
_test_tasks = [
"fill-mask", "question-answering", "text-classification", "token-classification", "text-generation",
"text2text-generation", "summarization", "translation"
]
# Get a list of all models and mapping from task to supported models
_hf_models = HfApi().list_models()
_hf_model_names = [m.modelId for m in _hf_models]
_hf_task_to_models = {task: [m.modelId for m in _hf_models if m.pipeline_tag == task] for task in _test_tasks}
# Get all combinations of task:model to test
_model_w_tasks = [(m, t) for m, t in itertools.product(*[_test_models, _test_tasks]) if m in _hf_task_to_models[t]]
# Assign to pytest variables for testing
pytest.model_w_tasks = _model_w_tasks
pytest.mt_names = [f"{m}-{t}" for m, t in pytest.model_w_tasks]
@pytest.fixture(scope="module", autouse=True)
def verify_models():
# Verify all test models are registered in HF
_test_models_not_found = [m for m in _test_models if m not in _hf_model_names]
if _test_models_not_found:
pytest.fail(f"Model(s) not found in HuggingFace: {_test_models_not_found}")
# Verify all models are assigned to at least one task
_models_to_be_tested = set(m for m, t in _model_w_tasks)
_missing_task_models = _models_to_be_tested.difference(_test_models)
if _missing_task_models:
pytest.fail(f"Model(s) do not have an assigned task: {_missing_task_models}")
# Fixture to add skips for certain configurations
@pytest.fixture()
def invalid_test(model_w_task, dtype, enable_cuda_graph, enable_triton):
model, task = model_w_task
msg = ""
if enable_cuda_graph and (torch_info["cuda_version"] == "0.0"):
msg = "CUDA not detected, cannot use CUDA Graph"
elif enable_cuda_graph and pkg_version.parse(torch.__version__) < pkg_version.parse("1.10"):
msg = "CUDA Graph is only available in torch versions >= 1.10"
elif "gpt-j-6b" in model:
if dtype != torch.half:
msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
elif enable_cuda_graph:
msg = f"Not enough GPU memory to run {model} with CUDA Graph enabled"
elif "gpt-neox-20b" in model: # TODO: remove this when neox issues resolved
msg = "Skipping gpt-neox-20b for now"
elif ("gpt-neox-20b" in model) and (dtype != torch.half):
msg = f"Not enough GPU memory to run {model} with dtype {dtype}"
elif ("bloom" in model) and (dtype != torch.half):
msg = f"Bloom models only support half precision, cannot use dtype {dtype}"
elif ("bert" not in model.lower()) and enable_cuda_graph:
msg = "Non bert/roberta models do no support CUDA Graph"
elif enable_triton and not (dtype in [torch.half]):
msg = "Triton is for fp16"
elif enable_triton and not deepspeed.HAS_TRITON:
msg = "triton needs to be installed for the test"
elif ("bert" not in model.lower()) and enable_triton:
msg = "Triton kernels do not support Non bert/roberta models yet"
return msg
""" Fixtures for inference config """
@pytest.fixture(params=pytest.model_w_tasks, ids=pytest.mt_names)
def model_w_task(request):
return request.param
@pytest.fixture(params=[torch.float, torch.half], ids=["fp32", "fp16"])
def dtype(request):
return request.param
@pytest.fixture(params=[True, False], ids=["CG", "noCG"])
def enable_cuda_graph(request):
return request.param
@pytest.fixture(params=[True, False], ids=["Triton", "noTriton"])
def enable_triton(request):
return request.param
""" Fixtures for running query """
@pytest.fixture
def query(model_w_task):
model, task = model_w_task
angle_bracket_mask_models = ["roberta", "camembert", "esm", "ibert", "luke", "mpnet", "yoso", "mpnet"]
if task == "fill-mask":
if any(map(lambda x: x in model, angle_bracket_mask_models)):
return "Hello I'm a <mask> model."
else:
return "Hell I'm a [MASK] model."
elif task == "question-answering":
return {
"question": "What's my name?",
"context": "My name is Clara and I live in Berkeley",
}
elif task == "text-classification":
return "DeepSpeed is the greatest"
elif task == "token-classification":
return "My name is jean-baptiste and I live in montreal."
elif task == "text-generation":
return "DeepSpeed is the greatest"
elif task == "text2text-generation":
return "Is this review positive or negative? Review: this is the best cast iron skillet you will ever buy"
elif task == "translation" or task == "summarization":
return "Hello, my dog is cute"
else:
NotImplementedError(f'query for task "{task}" is not implemented')
@pytest.fixture
def inf_kwargs(model_w_task):
model, task = model_w_task
if task == "text-generation":
if model == "EleutherAI/gpt-j-6b":
# This model on V100 is hitting memory problems that limit the number of output tokens
return {"do_sample": False, "max_length": 12}
return {"do_sample": False, "max_length": 20}
else:
return {}
""" Assertion fixture for verifying model outputs """
def fill_mask_assert(x, y):
return set(res["token_str"] for res in x) == set(res["token_str"] for res in y)
def question_answering_assert(x, y):
return x["answer"] == y["answer"]
def text_classification_assert(x, y):
return set(res["label"] for res in x) == set(res["label"] for res in y)
def token_classification_assert(x, y):
return set(ent["word"] for ent in x) == set(ent["word"] for ent in y)
def text_generation_assert(x, y):
return set(res["generated_text"] for res in x) == set(res["generated_text"] for res in y)
def text2text_generation_assert(x, y):
return set(res["generated_text"] for res in x) == set(res["generated_text"] for res in y)
def translation_assert(x, y):
return set(res["translation_text"] for res in x) == set(res["translation_text"] for res in y)
def summarization_assert(x, y):
return set(res["summary_text"] for res in x) == set(res["summary_text"] for res in y)
@pytest.fixture
def assert_fn(model_w_task):
model, task = model_w_task
assert_fn_dict = {
"fill-mask": fill_mask_assert,
"question-answering": question_answering_assert,
"text-classification": text_classification_assert,
"token-classification": token_classification_assert,
"text-generation": text_generation_assert,
"text2text-generation": text2text_generation_assert,
"translation": translation_assert,
"summarization": summarization_assert
}
assert_fn = assert_fn_dict.get(task, None)
if assert_fn is None:
NotImplementedError(f'assert_fn for task "{task}" is not implemented')
return assert_fn
# Used to verify DeepSpeed kernel injection worked with a model
def check_injection(model):
def verify_injection(module):
for child in module.children():
if isinstance(child, nn.ModuleList):
assert isinstance(child[0], DeepSpeedTransformerInference),\
"DeepSpeed-Inference Transformer kernels has not been injected in the model"
break
else:
verify_injection(child)
verify_injection(model)
@pytest.mark.inference
class TestModelTask(DistributedTest):
world_size = 1
def test(
self,
model_w_task,
dtype,
enable_cuda_graph,
enable_triton,
query,
inf_kwargs,
assert_fn,
invalid_test,
perf_meas=True,
):
if invalid_test:
pytest.skip(invalid_test)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# Load the model on CPU first to avoid OOM for large models @fp32
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
if dtype == torch.half:
pipe.model.half()
# Switch device to GPU after converting to half
device = torch.device(get_accelerator().device_name(local_rank))
pipe.device = device
pipe.model.to(device)
# Warm-up queries for perf measurement
#for i in range(10):
# _ = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
start = time.time()
bs_output = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
bs_time = time.time() - start
args = {
'mp_size': 1,
'dtype': dtype,
'replace_with_kernel_inject': True,
'enable_cuda_graph': enable_cuda_graph,
'use_triton': enable_triton,
'triton_autotune': False,
}
if pipe.tokenizer.model_max_length < deepspeed.ops.transformer.inference.config.DeepSpeedInferenceConfig(
).max_out_tokens:
args.update({'max_out_tokens': pipe.tokenizer.model_max_length})
pipe.model = deepspeed.init_inference(pipe.model, **args)
check_injection(pipe.model)
# Warm-up queries for perf measurement
#for i in range(10):
# _ = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
start = time.time()
ds_output = pipe(query, **inf_kwargs)
get_accelerator().synchronize()
ds_time = time.time() - start
if perf_meas:
print(
f"model={model}, task={task}, dtype={dtype}, cuda_graph={enable_cuda_graph}, triton={enable_triton}, bs_time={bs_time}, ds_time={ds_time}"
)
# facebook/opt* and some bigscient/bloom* models are not matching
# baseline exactly, adding an exception to them for now
if ("opt" in model) or ("bloom" in model):
bs_output = pipe(query, **inf_kwargs)
# These performance tests are only measuring the time for a single
# inference request, we just want to check that performance isn't terrible
#assert ds_time <= (bs_time * 1.1)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize("model_w_task", [("EleutherAI/gpt-neo-1.3B", "text-generation"),
("EleutherAI/gpt-neox-20b", "text-generation"),
("bigscience/bloom-3b", "text-generation"),
("EleutherAI/gpt-j-6b", "text-generation")],
ids=["gpt-neo", "gpt-neox", "bloom", "gpt-j"])
class TestMPSize(DistributedTest):
world_size = 2
def test(
self,
model_w_task,
dtype,
query,
inf_kwargs,
assert_fn,
invalid_test,
):
if invalid_test:
pytest.skip(invalid_test)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model,
mp_size=self.world_size,
dtype=dtype,
replace_with_kernel_inject=True)
check_injection(pipe.model)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize("model_w_task", [("tiiuae/falcon-7b", "text-generation")], ids=["falcon"])
class TestAutoTP(DistributedTest):
world_size = 1
def test(
self,
model_w_task,
query,
inf_kwargs,
assert_fn,
):
# TODO: enable this test for H100 tests
pytest.skip("Not enough GPU memory for this on V100 runners")
model, task = model_w_task
dtype = torch.bfloat16
local_rank = int(os.getenv("LOCAL_RANK", "0"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
tokenizer = AutoTokenizer.from_pretrained(model, use_fast=True)
pipe = pipeline(task,
model=model,
tokenizer=tokenizer,
torch_dtype=dtype,
trust_remote_code=True,
device=torch.device("cpu"),
framework="pt")
#bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model, mp_size=self.world_size, replace_with_kernel_inject=False)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
#print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
#assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize(
"model_w_task, injection_policy",
[
(("google/t5-v1_1-small", "text2text-generation"), {
T5Block: ('SelfAttention.o', 'EncDecAttention.o', 'DenseReluDense.wo')
}),
(("roberta-large", "fill-mask"), {
RobertaLayer: ('output.dense')
}),
],
ids=["t5", "roberta"],
)
@pytest.mark.parametrize("dtype", [torch.float], ids=["fp32"])
@pytest.mark.parametrize("enable_cuda_graph", [False], ids=["noCG"])
class TestInjectionPolicy(DistributedTest):
world_size = [1, 2]
def test(
self,
model_w_task,
injection_policy,
query,
inf_kwargs,
assert_fn,
invalid_test,
dtype,
enable_cuda_graph,
):
if invalid_test:
pytest.skip(invalid_test)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "2"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model,
mp_size=world_size,
dtype=dtype,
injection_policy=injection_policy)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.seq_inference
@pytest.mark.parametrize(
"model_w_task",
[
("Helsinki-NLP/opus-mt-en-de", "translation"),
],
ids=[
"marian",
],
)
@pytest.mark.parametrize("dtype", [torch.float16], ids=["fp16"])
@pytest.mark.parametrize("enable_cuda_graph", [False], ids=["noCG"])
class TestAutoTensorParallelism(DistributedTest):
world_size = [2]
def test(
self,
model_w_task,
query,
inf_kwargs,
assert_fn,
invalid_test,
dtype,
enable_cuda_graph,
):
if invalid_test:
pytest.skip(invalid_test)
model, task = model_w_task
local_rank = int(os.getenv("LOCAL_RANK", "0"))
world_size = int(os.getenv("WORLD_SIZE", "2"))
# We have to load these large models on CPU with pipeline because not
# enough GPU memory
pipe = pipeline(task, model=model, device=torch.device("cpu"), framework="pt")
bs_output = pipe(query, **inf_kwargs)
pipe.model = deepspeed.init_inference(pipe.model, mp_size=world_size, dtype=dtype)
# Switch device to GPU so that input tensors are not on CPU
pipe.device = torch.device(get_accelerator().device_name(local_rank))
ds_output = pipe(query, **inf_kwargs)
print(local_rank, "baseline", bs_output)
print(local_rank, "deepspeed", ds_output)
assert assert_fn(bs_output, ds_output)
@pytest.mark.nightly
@pytest.mark.parametrize(
"model_family, model_name",
(
["gpt2", "EleutherAI/gpt-neo-2.7B"],
["gpt2", "EleutherAI/gpt-j-6b"],
["gpt2", "gpt2-xl"],
),
)
@pytest.mark.parametrize("task", ["lambada_standard"])
class TestLMCorrectness(DistributedTest):
world_size = 1
def test(self, model_family, model_name, task):
# imports here to avoid import errors when pytest collects tests
import lm_eval
import lm_eval.models
import lm_eval.tasks
import lm_eval.evaluator
local_rank = os.getenv("LOCAL_RANK", "0")
device = torch.device(get_accelerator().device_name(local_rank))
dtype = torch.float
task_dict = lm_eval.tasks.get_task_dict([task])
if 'gpt-j-6b' in model_name:
dtype = torch.half
lm = lm_eval.models.get_model(model_family).create_from_arg_string(f"pretrained={model_name}",
{"device": "cpu"})
setattr(lm, model_family, getattr(lm, model_family).half().to(device))
lm._device = device
else:
lm = lm_eval.models.get_model(model_family).create_from_arg_string(
f"pretrained={model_name}", {"device": get_accelerator().device_name()})
get_accelerator().synchronize()
start = time.time()
bs_output = lm_eval.evaluator.evaluate(lm=lm, task_dict=task_dict)
get_accelerator().synchronize()
bs_time = time.time() - start
ds_model = deepspeed.init_inference(
getattr(lm, model_family),
mp_size=1,
dtype=dtype,
replace_with_kernel_inject=True,
enable_cuda_graph=False,
)
check_injection(ds_model)
setattr(lm, model_family, ds_model)
get_accelerator().synchronize()
start = time.time()
ds_output = lm_eval.evaluator.evaluate(lm=lm, task_dict=task_dict)
get_accelerator().synchronize()
ds_time = time.time() - start
ppl_diff = abs(bs_output["results"][task]["ppl"] - ds_output["results"][task]["ppl"])
#assert ds_time <= bs_time
assert ppl_diff < 0.01
| 20,794 | 34.668954 | 154 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/inference/test_checkpoint_sharding.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pytest
import torch
import deepspeed
from deepspeed.model_implementations import DeepSpeedTransformerInference
from unit.common import DistributedTest, DistributedFixture
from transformers import AutoConfig, AutoModelForCausalLM
import deepspeed.comm as dist
from huggingface_hub import snapshot_download
from transformers.utils import is_offline_mode
def check_dtype(model, expected_dtype):
def find_dtype(module):
for child in module.children():
if isinstance(child, DeepSpeedTransformerInference):
return child.attention.attn_qkvw.dtype
else:
found_dtype = find_dtype(child)
if found_dtype:
return found_dtype
found_dtype = find_dtype(model)
assert found_dtype, "Did not find DeepSpeedTransformerInference in model"
assert (found_dtype == expected_dtype), f"Expected transformer dtype {expected_dtype}, but found {found_dtype}"
@pytest.fixture(
params=["bigscience/bloom-560m", "EleutherAI/gpt-j-6B", "EleutherAI/gpt-neo-125M", "facebook/opt-125m"])
def model_name(request):
return request.param
@pytest.fixture(params=[torch.float16, torch.int8], ids=["fp16", "int8"])
def dtype(request):
return request.param
class save_shard(DistributedFixture):
world_size = 2
def run(self, model_name, class_tmpdir):
# Only write a checkpoint if one does not exist
if not os.path.isdir(os.path.join(class_tmpdir, model_name)):
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": True,
"dtype": torch.float16,
"enable_cuda_graph": False,
"tensor_parallel": {
"tp_size": world_size
},
"save_mp_checkpoint_path": os.path.join(str(class_tmpdir), model_name),
}
# Load model and save sharded checkpoint
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
model = deepspeed.init_inference(model, config=inf_config)
@pytest.mark.seq_inference
class TestCheckpointShard(DistributedTest):
world_size = 2
def test(self, model_name, dtype, class_tmpdir, save_shard):
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": True,
"dtype": dtype,
"enable_cuda_graph": False,
"tensor_parallel": {
"tp_size": world_size
},
"checkpoint": os.path.join(class_tmpdir, model_name, "ds_inference_config.json"),
}
# Load model on meta tensors
model_config = AutoConfig.from_pretrained(model_name)
# Note that we use half precision to load initially, even for int8
with deepspeed.OnDevice(dtype=torch.float16, device="meta"):
model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16)
model = model.eval()
model = deepspeed.init_inference(model, config=inf_config)
check_dtype(model, dtype)
@pytest.mark.seq_inference
class TestCheckpointShardinAutoTP(DistributedTest):
world_size = 2
def test(self, model_name, class_tmpdir):
def write_checkpoints_json(model_name, class_tmpdir):
import json
from pathlib import Path
local_rank = int(os.getenv("LOCAL_RANK", "0"))
if local_rank == 0:
# download only on first process
cached_repo_dir = snapshot_download(
model_name,
local_files_only=is_offline_mode(),
cache_dir=os.getenv("TRANSFORMERS_CACHE", None),
ignore_patterns=["*.safetensors", "*.msgpack", "*.h5"],
)
file_list = [str(entry) for entry in Path(cached_repo_dir).rglob("*.[bp][it][n]") if entry.is_file()]
data = {"type": "ds_model", "checkpoints": file_list, "version": 1.0}
os.makedirs(os.path.join(class_tmpdir, model_name), exist_ok=True)
json.dump(data, open(os.path.join(class_tmpdir, model_name, "ds_inference_config.json"), "w"))
dist.barrier()
world_size = int(os.getenv("WORLD_SIZE", "1"))
inf_config = {
"replace_with_kernel_inject": False,
"tensor_parallel": {
"tp_size": world_size
},
"checkpoint": os.path.join(class_tmpdir, model_name, "ds_inference_config.json"),
}
write_checkpoints_json(model_name, class_tmpdir)
# Load model on meta tensors
model_config = AutoConfig.from_pretrained(model_name)
# Note that we use half precision to load initially, even for int8
with deepspeed.OnDevice(dtype=torch.bfloat16, device="meta"):
model = AutoModelForCausalLM.from_config(model_config, torch_dtype=torch.bfloat16)
model = model.eval()
model = deepspeed.init_inference(model, config=inf_config)
| 5,200 | 37.525926 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/pipe/test_pipe_module.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import copy
import torch
import torch.nn as nn
import deepspeed.comm as dist
import pytest
import deepspeed
from deepspeed.pipe import PipelineModule
from deepspeed.utils import RepeatingLoader
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
HIDDEN_DIM = 32
LAYERS = 8
@pytest.fixture
def sequential_model():
model = torch.nn.Sequential(
*[nn.Linear(HIDDEN_DIM, HIDDEN_DIM) for _ in range(LAYERS)],
nn.Linear(HIDDEN_DIM, 1),
)
return model
@pytest.fixture
def simple_config():
config_dict = {
"train_batch_size": 2,
"train_micro_batch_size_per_gpu": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.001,
"betas": [0.9, 0.999],
"eps": 1e-8,
"weight_decay": 3e-7
}
},
"pipeline": {
"activation_checkpoint_interval": 1
}
}
return config_dict
@pytest.fixture
def batch_input():
return torch.randn(1, HIDDEN_DIM)
class TestPipeModuleSequential(DistributedTest):
world_size = 2
@pytest.mark.parametrize("activation_checkpoints", [False, True])
def test(self, sequential_model, simple_config, batch_input, activation_checkpoints):
base_model = copy.deepcopy(sequential_model)
base_input = batch_input.clone().detach()
base_output = base_model(base_input)
base_output = base_output
base_params = sum(p.numel() for p in base_model.parameters())
pipe_model = copy.deepcopy(sequential_model)
pipe_model = PipelineModule(layers=pipe_model, num_stages=2)
# Ensure all parameters are accounted for.
my_params = sum(p.numel() for p in pipe_model.parameters())
total_pipe_params = torch.LongTensor([my_params]).to(get_accelerator().device_name())
dist.all_reduce(total_pipe_params)
total_pipe_params = total_pipe_params.item()
assert total_pipe_params == base_params
pipe_model, _, _, _ = deepspeed.initialize(config=simple_config,
model=pipe_model,
model_parameters=[p for p in pipe_model.parameters()])
if activation_checkpoints:
deepspeed.checkpointing.configure(None,
deepspeed_config=pipe_model.config,
partition_activations=True,
contiguous_checkpointing=True,
num_checkpoints=9)
if pipe_model.is_first_stage or pipe_model.is_last_stage:
pipe_input = base_input.clone().detach().to(get_accelerator().device_name())
# label 0 is meaningless
dataset = [(pipe_input, 0)]
loader = RepeatingLoader(dataset)
data_iter = iter(loader)
else:
data_iter = None
pipe_output = pipe_model.eval_batch(data_iter=data_iter)
base_output = base_output.to('cpu')
pipe_output = pipe_output.to('cpu')
assert torch.allclose(base_output, pipe_output, atol=1e-4)
| 3,377 | 30.277778 | 105 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/autotuning/test_autotuning.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import pytest
from unit.simple_model import create_config_from_dict
from deepspeed.launcher import runner as dsrun
from deepspeed.autotuning.autotuner import Autotuner
from deepspeed.autotuning.scheduler import ResourceManager
RUN_OPTION = 'run'
TUNE_OPTION = 'tune'
def test_command_line():
'''Validate handling of command line arguments'''
for opt in [RUN_OPTION, TUNE_OPTION]:
dsrun.parse_args(args=f"--num_nodes 1 --num_gpus 1 --autotuning {opt} foo.py".split())
for error_opts in [
"--autotuning --num_nodes 1 --num_gpus 1 foo.py".split(),
"--autotuning test --num_nodes 1 -- num_gpus 1 foo.py".split(), "--autotuning".split()
]:
with pytest.raises(SystemExit):
dsrun.parse_args(args=error_opts)
@pytest.mark.parametrize("arg_mappings",
[
None,
{
},
{
"train_micro_batch_size_per_gpu": "--per_device_train_batch_size"
},
{
"train_micro_batch_size_per_gpu": "--per_device_train_batch_size",
"gradient_accumulation_steps": "--gradient_accumulation_steps"
},
{
"train_batch_size": "-tbs"
}
]) # yapf: disable
def test_resource_manager_arg_mappings(arg_mappings):
rm = ResourceManager(args=None,
hosts="worker-0, worker-1",
num_gpus_per_node=4,
results_dir=None,
exps_dir=None,
arg_mappings=arg_mappings)
if arg_mappings is not None:
for k, v in arg_mappings.items():
assert k.strip() in rm.arg_mappings.keys()
assert arg_mappings[k.strip()].strip() == rm.arg_mappings[k.strip()]
@pytest.mark.parametrize("active_resources",
[
{"worker-0": [0, 1, 2, 3]},
{"worker-0": [0, 1, 2, 3], "worker-1": [0, 1, 2, 3]},
{"worker-0": [0], "worker-1": [0, 1, 2], "worker-2": [0, 1, 2]},
{"worker-0": [0, 1], "worker-2": [4, 5]}
]
) # yapf: disable
def test_autotuner_resources(tmpdir, active_resources):
config_dict = {"autotuning": {"enabled": True, "exps_dir": os.path.join(tmpdir, 'exps_dir'), "arg_mappings": {}}}
config_path = create_config_from_dict(tmpdir, config_dict)
args = dsrun.parse_args(args=f'--autotuning {TUNE_OPTION} foo.py --deepspeed_config {config_path}'.split())
tuner = Autotuner(args=args, active_resources=active_resources)
expected_num_nodes = len(list(active_resources.keys()))
assert expected_num_nodes == tuner.exp_num_nodes
expected_num_gpus = min([len(v) for v in active_resources.values()])
assert expected_num_gpus == tuner.exp_num_gpus
| 3,248 | 40.126582 | 117 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/sparse_attention/test_sparse_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# DeepSpeed note, some parts of code taken & adapted from commit c368a9fd1b2c9dee4cc94de9a6bb0be3d447be41
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_softmax.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/test_matmul.py
# https://github.com/ptillet/torch-blocksparse/blob/master/tests/utils
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import SparseAttnBuilder
from unit.util import skip_on_arch, skip_on_cuda
if not deepspeed.ops.__compatible_ops__[SparseAttnBuilder.NAME]:
pytest.skip("sparse attention op is not compatible on this system", allow_module_level=True)
def dense_to_sparse(w, mask, block):
"""Converts dense matrix with explicit zeros to sparse matrix
"""
Z = w.size(0)
ret = torch.empty((Z, mask.sum(), block, block), dtype=w.dtype, device=w.device)
nnz = mask.nonzero()
h, i, j = nnz[:, 0], nnz[:, 1], nnz[:, 2]
for zz in range(Z):
for idx, (hh, ii, jj) in enumerate(zip(h, i, j)):
ret[zz, idx, :, :] = w[zz, hh, ii * block:(ii + 1) * block, jj * block:(jj + 1) * block]
return ret
def sparse_to_dense(w, mask, block, zero=0):
"""Converts sparse matrix to dense matrix with explicit zeros
"""
maskedw = w.clone()
for bz, wz in enumerate(range(0, w.size(0))):
for bh, wh in enumerate(range(0, w.size(1))):
for bi, wi in enumerate(range(0, w.size(2), block)):
for bj, wj in enumerate(range(0, w.size(3), block)):
if mask[bh, bi, bj] == 0:
maskedw[wz, wh, wi:wi + block, wj:wj + block] = zero
#maskedw[wz, wh, wi : wi+block, wj : wj+block] *= mask[bh, bi, bj]
return maskedw
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def make_layout(rho, shape):
probs = torch.Tensor([rho, 1 - rho])
generator = torch.distributions.categorical.Categorical(probs)
layout = generator.sample(shape)
return layout
def run_softmax_reference(x, scale, dx, kp_mask, attn_mask, layout, block):
x = sparse_to_dense(x, layout, block, zero=float('-inf'))
x.retain_grad()
if kp_mask is not None:
bcattn_mask = attn_mask[None, None, :, :] + torch.zeros_like(x)
x[bcattn_mask == 0] = float('-inf')
y = torch.softmax(x * scale + kp_mask[:, None, None, :], -1)
else:
y = torch.softmax(x * scale, -1)
y.backward(dx)
dx = x.grad.clone()
dx = dense_to_sparse(dx, layout, block)
y = dense_to_sparse(y, layout, block)
return y, dx
def run_softmax_sparse(x, scale, dx, kp_mask, attn_mask, layout, block):
from deepspeed.ops.sparse_attention.softmax import Softmax
sparse_softmax = Softmax(layout, block, bench=False)
dx = dense_to_sparse(dx, layout, block)
x = dense_to_sparse(x, layout, block)
x.retain_grad()
y = sparse_softmax(x,
scale=scale,
key_padding_mask=kp_mask,
key_padding_mask_mode='add',
attn_mask=attn_mask,
attn_mask_mode='mul')
y.backward(dx)
dx = x.grad.clone()
x.grad.zero_()
return x, dx
def init_softmax_inputs(Z, H, M, N, scale, rho, block, dtype, dense_x=True, layout=None):
if layout is None:
layout = make_layout(rho, (H, M // block, N // block))
if dense_x:
x = torch.rand((Z, H, M, N), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
else:
x = torch.rand((Z, layout.sum(), block, block),
dtype=dtype,
requires_grad=True,
device=get_accelerator().device_name())
dx = torch.rand_like(x)
bool_attn_mask = torch.randint(low=0,
high=2,
size=(N, N),
dtype=torch.bool,
requires_grad=False,
device=get_accelerator().device_name())
fp_attn_mask = bool_attn_mask.type(dtype)
kp_mask = torch.randint(low=0,
high=2,
size=(Z, N),
dtype=dtype,
requires_grad=False,
device=get_accelerator().device_name())
kp_mask[kp_mask == 1.] = float('-inf')
return layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask
@pytest.mark.parametrize("block", [16, 32])
@pytest.mark.parametrize("width", [256, 576])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_softmax(block, width, dtype):
valid_cuda_versions = [101, 102, 110, 111]
skip_on_arch(min_arch=7)
skip_on_cuda(valid_cuda=valid_cuda_versions)
Z = 2
H = 4
scale = 0.4
rho = 0.4
M = N = width
layout, x, dx, bool_attn_mask, fp_attn_mask, kp_mask = init_softmax_inputs(Z,
H,
M,
N,
scale,
rho,
block,
dtype,
layout=None)
ref_y, ref_dx = run_softmax_reference(x, scale, dx, kp_mask, bool_attn_mask, layout, block)
st_y, st_dx = run_softmax_sparse(x, scale, dx, kp_mask, fp_attn_mask, layout, block)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
def run_matmul_reference(x, w, mode, trans_a, trans_b, layout, block, dy):
x = sparse_to_dense(x, layout, block) if mode == 'dsd' else x
w = sparse_to_dense(w, layout, block) if mode == 'dds' else w
x.retain_grad()
w.retain_grad()
xx = x.transpose(2, 3) if trans_a else x
ww = w.transpose(2, 3) if trans_b else w
y = torch.matmul(xx, ww)
y = sparse_to_dense(y, layout, block) if mode == 'sdd' else y
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
w.grad.zero_()
y = dense_to_sparse(y, layout, block) if mode == 'sdd' else y
dx = dense_to_sparse(dx, layout, block) if mode == 'dsd' else dx
dw = dense_to_sparse(dw, layout, block) if mode == 'dds' else dw
return y, dx, dw
def run_matmul_sparse(x, w, mode, trans_a, trans_b, layout, block, dy):
from deepspeed.ops.sparse_attention.matmul import MatMul
x = dense_to_sparse(x, layout, block) if mode == 'dsd' else x
w = dense_to_sparse(w, layout, block) if mode == 'dds' else w
dy = dense_to_sparse(dy, layout, block) if mode == 'sdd' else dy
op = MatMul(layout, block, mode, trans_a=trans_a, trans_b=trans_b)
x.retain_grad()
w.retain_grad()
y = op(x, w)
y.backward(dy)
dx = x.grad.clone()
dw = w.grad.clone()
x.grad.zero_()
return y, dx, dw
def init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout):
torch.manual_seed(1)
AS0 = K if trans_a else M
AS1 = M if trans_a else K
BS0 = N if trans_b else K
BS1 = K if trans_b else N
shape = {'sdd': (M, N), 'dsd': (AS0, AS1), 'dds': (BS0, BS1)}[mode]
x = torch.rand((Z, H, AS0, AS1), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
w = torch.rand((Z, H, BS0, BS1), dtype=dtype, requires_grad=True, device=get_accelerator().device_name())
dy = torch.rand((Z, H, M, N), dtype=dtype, device=get_accelerator().device_name())
if layout is None:
layout = make_layout(rho, (H, shape[0] // block, shape[1] // block))
else:
assert list(layout.shape) == [H, shape[0] // block, shape[1] // block]
x.retain_grad()
w.retain_grad()
return x, w, dy, shape, layout
testdata = [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['sdd', 'dds']\
for trans_a in [False]\
for trans_b in [False, True]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float16]\
for mode in ['dsd']\
for trans_a in [False, True]\
for trans_b in [False]\
] + [
(16, dtype, mode, trans_a, trans_b)\
for dtype in [torch.float32]\
for mode in ['sdd', 'dsd', 'dds']\
for trans_a in [False]\
for trans_b in [False]\
] + [
(block, torch.float16, mode, False, False)\
for block in [16, 32, 64]\
for mode in ['sdd', 'dsd', 'dds']\
]
@pytest.mark.parametrize("block, dtype, mode, trans_a, trans_b", testdata)
def test_matmul(block, dtype, mode, trans_a, trans_b):
valid_cuda_versions = [101, 102, 110, 111]
skip_on_arch(min_arch=7)
skip_on_cuda(valid_cuda=valid_cuda_versions)
Z = 3
H = 2
M = 128
N = 256
K = 192
rho = 0.5
x, w, dy, shape, layout = init_matmul_inputs(Z, H, M, N, K, rho, mode, trans_a, trans_b, block, dtype, layout=None)
ref_y, ref_dx, ref_dw = run_matmul_reference(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
st_y, st_dx, st_dw = run_matmul_sparse(x.clone(), w.clone(), mode, trans_a, trans_b, layout, block, dy)
assert allclose(ref_y, st_y)
assert allclose(ref_dx, st_dx)
assert allclose(ref_dw, st_dw)
| 9,912 | 38.337302 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/aio/test_aio.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import os
import filecmp
import torch
import deepspeed
import deepspeed.comm as dist
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
from unit.common import DistributedTest
KILO_BYTE = 1024
BLOCK_SIZE = KILO_BYTE
QUEUE_DEPTH = 2
IO_SIZE = 4 * BLOCK_SIZE
IO_PARALLEL = 2
if not deepspeed.ops.__compatible_ops__[AsyncIOBuilder.NAME]:
pytest.skip('Skip tests since async-io is not compatible', allow_module_level=True)
def _skip_for_invalid_environment(use_cuda_device=True, use_cuda_pinned_tensor=True):
if not get_accelerator().is_available():
if use_cuda_device:
pytest.skip("GPU tensors only supported in CUDA environments.")
if use_cuda_pinned_tensor:
pytest.skip("CUDA-pinned tensors only supported in CUDA environments.")
def _get_local_rank():
if get_accelerator().is_available():
return dist.get_rank()
return 0
def _do_ref_write(tmpdir, index=0):
file_suffix = f'{_get_local_rank()}_{index}'
ref_file = os.path.join(tmpdir, f'_py_random_{file_suffix}.pt')
ref_buffer = os.urandom(IO_SIZE)
with open(ref_file, 'wb') as f:
f.write(ref_buffer)
return ref_file, ref_buffer
def _get_test_write_file(tmpdir, index):
file_suffix = f'{_get_local_rank()}_{index}'
return os.path.join(tmpdir, f'_aio_write_random_{file_suffix}.pt')
def _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffer, index=0):
test_file = _get_test_write_file(tmpdir, index)
test_buffer = get_accelerator().ByteTensor(list(ref_buffer))
return test_file, test_buffer
def _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, aio_handle=None, index=0):
test_file = _get_test_write_file(tmpdir, index)
if aio_handle is None:
test_buffer = get_accelerator().pin_memory(torch.ByteTensor(list(ref_buffer)))
else:
tmp_buffer = torch.ByteTensor(list(ref_buffer))
test_buffer = aio_handle.new_cpu_locked_tensor(len(ref_buffer), tmp_buffer)
test_buffer.data.copy_(tmp_buffer)
return test_file, test_buffer
def _validate_handle_state(handle, single_submit, overlap_events):
assert handle.get_single_submit() == single_submit
assert handle.get_overlap_events() == overlap_events
assert handle.get_thread_count() == IO_PARALLEL
assert handle.get_block_size() == BLOCK_SIZE
assert handle.get_queue_depth() == QUEUE_DEPTH
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("single_submit", [True, False])
@pytest.mark.parametrize("overlap_events", [True, False])
class TestRead(DistributedTest):
world_size = 1
reuse_dist_env = True
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
def test_parallel_read(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events):
_skip_for_invalid_environment(use_cuda_device=False, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if use_cuda_pinned_tensor:
aio_buffer = get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
else:
aio_buffer = h.new_cpu_locked_tensor(IO_SIZE, torch.empty(0, dtype=torch.uint8))
_validate_handle_state(h, single_submit, overlap_events)
ref_file, _ = _do_ref_write(tmpdir)
read_status = h.sync_pread(aio_buffer, ref_file)
assert read_status == 1
with open(ref_file, 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffer.tolist()
if not use_cuda_pinned_tensor:
h.free_cpu_locked_tensor(aio_buffer)
@pytest.mark.parametrize("cuda_device", [True, False])
def test_async_read(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
use_cpu_locked_tensor = False
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if cuda_device:
aio_buffer = torch.empty(IO_SIZE, dtype=torch.uint8, device=get_accelerator().device_name())
elif use_cuda_pinned_tensor:
aio_buffer = get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
else:
aio_buffer = h.new_cpu_locked_tensor(IO_SIZE, torch.empty(0, dtype=torch.uint8))
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
ref_file, _ = _do_ref_write(tmpdir)
read_status = h.async_pread(aio_buffer, ref_file)
assert read_status == 0
wait_status = h.wait()
assert wait_status == 1
with open(ref_file, 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffer.tolist()
if use_cpu_locked_tensor:
h.free_cpu_locked_tensor(aio_buffer)
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("single_submit", [True, False])
@pytest.mark.parametrize("overlap_events", [True, False])
class TestWrite(DistributedTest):
world_size = 1
reuse_dist_env = True
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
def test_parallel_write(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events):
_skip_for_invalid_environment(use_cuda_device=False, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_file, ref_buffer = _do_ref_write(tmpdir)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
if use_cuda_pinned_tensor:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer)
else:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, h)
_validate_handle_state(h, single_submit, overlap_events)
write_status = h.sync_pwrite(aio_buffer, aio_file)
assert write_status == 1
if not use_cuda_pinned_tensor:
h.free_cpu_locked_tensor(aio_buffer)
assert os.path.isfile(aio_file)
filecmp.clear_cache()
assert filecmp.cmp(ref_file, aio_file, shallow=False)
@pytest.mark.parametrize("cuda_device", [True, False])
def test_async_write(self, tmpdir, use_cuda_pinned_tensor, single_submit, overlap_events, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_file, ref_buffer = _do_ref_write(tmpdir)
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
use_cpu_locked_tensor = False
if cuda_device:
aio_file, aio_buffer = _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffer)
elif use_cuda_pinned_tensor:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer)
else:
aio_file, aio_buffer = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffer, h)
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
write_status = h.async_pwrite(aio_buffer, aio_file)
assert write_status == 0
wait_status = h.wait()
assert wait_status == 1
if use_cpu_locked_tensor:
h.free_cpu_locked_tensor(aio_buffer)
assert os.path.isfile(aio_file)
filecmp.clear_cache()
assert filecmp.cmp(ref_file, aio_file, shallow=False)
@pytest.mark.sequential
@pytest.mark.parametrize("use_cuda_pinned_tensor", [True, False])
@pytest.mark.parametrize("cuda_device", [True, False])
class TestAsyncQueue(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
@pytest.mark.parametrize("async_queue", [2, 3])
def test_read(self, tmpdir, async_queue, use_cuda_pinned_tensor, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_files = []
for i in range(async_queue):
f, _ = _do_ref_write(tmpdir, i)
ref_files.append(f)
single_submit = True
overlap_events = True
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
use_cpu_locked_tensor = False
if cuda_device:
aio_buffers = [
torch.empty(IO_SIZE, dtype=torch.uint8, device=get_accelerator().device_name())
for _ in range(async_queue)
]
elif use_cuda_pinned_tensor:
aio_buffers = [
get_accelerator().pin_memory(torch.empty(IO_SIZE, dtype=torch.uint8, device='cpu'))
for _ in range(async_queue)
]
else:
tmp_tensor = torch.empty(0, dtype=torch.uint8)
aio_buffers = [h.new_cpu_locked_tensor(IO_SIZE, tmp_tensor) for _ in range(async_queue)]
use_cpu_locked_tensor = True
_validate_handle_state(h, single_submit, overlap_events)
for i in range(async_queue):
read_status = h.async_pread(aio_buffers[i], ref_files[i])
assert read_status == 0
wait_status = h.wait()
assert wait_status == async_queue
for i in range(async_queue):
with open(ref_files[i], 'rb') as f:
ref_buffer = list(f.read())
assert ref_buffer == aio_buffers[i].tolist()
if use_cpu_locked_tensor:
for t in aio_buffers:
h.free_cpu_locked_tensor(t)
@pytest.mark.parametrize("async_queue", [2, 3])
def test_write(self, tmpdir, use_cuda_pinned_tensor, async_queue, cuda_device):
_skip_for_invalid_environment(use_cuda_device=cuda_device, use_cuda_pinned_tensor=use_cuda_pinned_tensor)
ref_files = []
ref_buffers = []
for i in range(async_queue):
f, buf = _do_ref_write(tmpdir, i)
ref_files.append(f)
ref_buffers.append(buf)
single_submit = True
overlap_events = True
h = AsyncIOBuilder().load().aio_handle(BLOCK_SIZE, QUEUE_DEPTH, single_submit, overlap_events, IO_PARALLEL)
aio_files = []
aio_buffers = []
for i in range(async_queue):
if cuda_device:
f, buf = _get_test_write_file_and_cuda_buffer(tmpdir, ref_buffers[i], i)
elif use_cuda_pinned_tensor:
f, buf = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffers[i], None, i)
else:
f, buf = _get_test_write_file_and_cpu_buffer(tmpdir, ref_buffers[i], h, i)
aio_files.append(f)
aio_buffers.append(buf)
use_cpu_locked_tensor = not (cuda_device or use_cuda_pinned_tensor)
_validate_handle_state(h, single_submit, overlap_events)
for i in range(async_queue):
read_status = h.async_pwrite(aio_buffers[i], aio_files[i])
assert read_status == 0
wait_status = h.wait()
assert wait_status == async_queue
if use_cpu_locked_tensor:
for t in aio_buffers:
h.free_cpu_locked_tensor(t)
for i in range(async_queue):
assert os.path.isfile(aio_files[i])
filecmp.clear_cache()
assert filecmp.cmp(ref_files[i], aio_files[i], shallow=False)
| 11,978 | 36.551724 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/adam/test_cpu_adam.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import numpy as np
import pytest
from cpuinfo import get_cpu_info
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.adam import FusedAdam
from deepspeed.ops.op_builder import CPUAdamBuilder
from unit.common import DistributedTest
if not deepspeed.ops.__compatible_ops__[CPUAdamBuilder.NAME]:
pytest.skip("cpu-adam is not compatible", allow_module_level=True)
pytest.cpu_vendor = get_cpu_info()["vendor_id_raw"].lower()
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
y = second.detach().numpy()
print("ATOL", atol)
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="param-update mismatch!", atol=atol)
def _compare_optimizers(model_size, param1, optimizer1, param2, optimizer2):
for i in range(10):
param1.grad = torch.randn(model_size, device=param1.device).to(param1.dtype)
param2.grad = param1.grad.clone().detach().to(device=param2.device, dtype=param2.dtype)
optimizer1.step()
optimizer2.step()
tolerance = param1.float().norm().detach().numpy() * 1e-2
check_equal(param1.float().norm(), param2.float().cpu().norm(), atol=tolerance, verbose=True)
@pytest.mark.parametrize('dtype', [torch.half, torch.float], ids=["fp16", "fp32"])
@pytest.mark.parametrize('model_size',
[
(64),
(22),
#(55),
(128),
(1024),
(1048576),
]) # yapf: disable
class TestCPUAdam(DistributedTest):
world_size = 1
reuse_dist_env = True
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
@pytest.mark.skipif(not get_accelerator().is_available(), reason="only supported in CUDA environments.")
def test_fused_adam_equal(self, dtype, model_size):
if ("amd" in pytest.cpu_vendor) and (dtype == torch.half):
pytest.skip("cpu-adam with half precision not supported on AMD CPUs")
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_data = torch.randn(model_size, device='cpu').to(dtype)
cpu_param = torch.nn.Parameter(cpu_data)
cuda_param = torch.nn.Parameter(cpu_data.to(get_accelerator().device_name()))
# tolerance = cpu_param.float().norm().detach().numpy() * 1e-2
# check_equal(cpu_param.float().norm(),
# cuda_param.float().cpu().norm(),
# atol=tolerance,
# verbose=True)
cpu_optimizer = DeepSpeedCPUAdam([cpu_param])
cuda_optimizer = FusedAdam([cuda_param])
_compare_optimizers(model_size=model_size,
param1=cpu_param,
optimizer1=cpu_optimizer,
param2=cuda_param,
optimizer2=cuda_optimizer)
def test_torch_adamw_equal(self, dtype, model_size):
if get_accelerator().is_available():
if ("amd" in pytest.cpu_vendor) and (dtype == torch.half):
pytest.skip("cpu-adam with half precision not supported on AMD CPUs")
ref_param_device = get_accelerator().device_name()
else:
if dtype == torch.half:
pytest.skip("torch.optim.AdamW with half precision only supported in CUDA environments.")
ref_param_device = 'cpu'
from deepspeed.ops.adam import DeepSpeedCPUAdam
cpu_data = torch.randn(model_size, device='cpu').to(dtype)
cpu_param = torch.nn.Parameter(cpu_data)
ref_param = torch.nn.Parameter(cpu_data.to(ref_param_device))
cpu_optimizer = DeepSpeedCPUAdam([cpu_param])
ref_optimizer = torch.optim.AdamW([ref_param])
_compare_optimizers(model_size=model_size,
param1=cpu_param,
optimizer1=cpu_optimizer,
param2=ref_param,
optimizer2=ref_optimizer)
class TestCPUAdamGPUError(DistributedTest):
def test_cpu_adam_gpu_error(self):
model_size = 64
from deepspeed.ops.adam import DeepSpeedCPUAdam
device = get_accelerator().device_name(0) # 'cuda:0' or 'xpu:0'
param = torch.nn.Parameter(torch.randn(model_size, device=device))
optimizer = DeepSpeedCPUAdam([param])
param.grad = torch.randn(model_size, device=device)
with pytest.raises(AssertionError):
optimizer.step()
| 4,900 | 37.289063 | 108 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/adam/test_adamw.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.ops.adam import FusedAdam
from deepspeed.ops.adam import DeepSpeedCPUAdam
from unit.common import DistributedTest
from unit.simple_model import SimpleModel
# yapf: disable
#'optimizer, zero_offload, torch_adam, adam_w_mode, resulting_optimizer
adam_configs = [["AdamW", False, False, False, (FusedAdam, True)],
["AdamW", False, True, False, (torch.optim.AdamW, None)],
["AdamW", True, False, False, (DeepSpeedCPUAdam, True)],
["AdamW", True, True, False, (torch.optim.AdamW, None)],
["AdamW", False, False, True, (FusedAdam, True)],
["AdamW", False, True, True, (torch.optim.AdamW, None)],
["AdamW", True, False, True, (DeepSpeedCPUAdam, True)],
["AdamW", True, True, True, (torch.optim.AdamW, None)],
["Adam", False, False, False, (FusedAdam, False)],
["Adam", False, True, False, (torch.optim.Adam, None)],
["Adam", True, False, False, (DeepSpeedCPUAdam, False)],
["Adam", True, True, False, (torch.optim.Adam, None)],
["Adam", False, False, True, (FusedAdam, True)],
["Adam", False, True, True, (torch.optim.AdamW, None)],
["Adam", True, False, True, (DeepSpeedCPUAdam, True)],
["Adam", True, True, True, (torch.optim.AdamW, None)]]
@pytest.mark.parametrize(
'optimizer, zero_offload, torch_adam, adam_w_mode, resulting_optimizer',
adam_configs)
class TestAdamConfigs(DistributedTest):
world_size = 1
reuse_dist_env = True
def test(self,
optimizer,
zero_offload,
torch_adam,
adam_w_mode,
resulting_optimizer):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": optimizer,
"params": {
"lr": 0.00015,
"torch_adam": torch_adam,
"adam_w_mode": adam_w_mode
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
},
"zero_optimization": {
"stage": 2,
"cpu_offload": zero_offload
}
}
model = SimpleModel(10)
model, _, _, _ = deepspeed.initialize(config=config_dict,
model=model,
model_parameters=model.parameters())
# get base optimizer under zero
ds_optimizer = model.optimizer.optimizer
opt_class, adam_w_mode = resulting_optimizer
assert isinstance(ds_optimizer, opt_class)
if adam_w_mode in [True, False]:
assert ds_optimizer.adam_w_mode == adam_w_mode
| 3,044 | 38.545455 | 82 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/quantizer/test_fake_quantization.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops import op_builder
quantizer_cuda_module = None
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (2e-2, 5e-3), torch.float16: (2e-2, 5e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def quantize_dequantize_ref(inputs, bit, num_groups=1):
# quantize
q_range = 2**bit
input_flat = inputs.float().reshape(num_groups, -1).contiguous()
input_flat = torch.nan_to_num(input_flat, nan=0.0)
input_min = input_flat.amin(-1, keepdim=True)
input_max = input_flat.amax(-1, keepdim=True)
scale = q_range / (2 * torch.max(input_min.abs(), input_max.abs() + 1e-5))
input_flat = (input_flat * scale).round().clamp(-q_range // 2, q_range // 2 - 1)
# dequantize
dequant_flat = torch.t(input_flat.to(torch.int8)) / scale.view(-1).to(torch.float16)
return torch.t(dequant_flat).reshape(inputs.shape)
def run_quant_dequant(inputs, groups, bits):
global quantizer_cuda_module
if quantizer_cuda_module is None:
quantizer_cuda_module = op_builder.QuantizerBuilder().load()
return quantizer_cuda_module.ds_quantize_fp16(inputs, groups, bits)
@pytest.mark.inference_ops
@pytest.mark.parametrize("tensor_shape", [(16, 4096), (128, 256)])
# Test with two tensor shapes as (16, 4096) and (128, 256).
@pytest.mark.parametrize("groups", [1, 16])
# Test with number of quant groups as 1 and 16.
# Note that we have an explicit boundary for groups as ((size / groups) - 1) / 4096 + 1) <= MAX_REG.
def test_fake_quant_dequant(tensor_shape, groups):
input_tensor = torch.rand((tensor_shape), dtype=torch.float16).to(get_accelerator().device_name())
# 8-bit quantization.
ref_input_8bit = input_tensor.clone().detach()
ds_input_8bit = input_tensor.clone().detach()
ref_out_8bit = quantize_dequantize_ref(ref_input_8bit, 8, groups)
# run_quant_dequant will do quantize then dequantize, and return the dequantized value.
ds_out_8bit = run_quant_dequant(ds_input_8bit, groups, 8)
assert (allclose(ds_out_8bit, ref_out_8bit))
# 4-bit quantization.
ref_input_4bit = input_tensor.clone().detach()
ds_input_4bit = input_tensor.clone().detach()
ref_out_4bit = quantize_dequantize_ref(ref_input_4bit, 4, groups)
ds_out_4bit = run_quant_dequant(ds_input_4bit, groups, 4)
assert (allclose(ds_out_4bit, ref_out_4bit))
| 2,549 | 37.059701 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/quantizer/test_quantize.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
from deepspeed.ops import op_builder
from deepspeed.accelerator import get_accelerator
inference_module = None
def run_quantize_ds(activations, num_groups, q_bits, is_symmetric_quant):
global inference_module
if inference_module is None:
inference_module = op_builder.QuantizerBuilder().load()
return inference_module.quantize(activations, num_groups, q_bits,
inference_module.Symmetric if is_symmetric_quant else inference_module.Asymmetric)
def run_dequantize_ds(activations, params, num_groups, q_bits, is_symmetric_quant):
global inference_module
if inference_module is None:
inference_module = op_builder.QuantizerBuilder().load()
return inference_module.dequantize(
activations,
params,
num_groups,
q_bits,
inference_module.Symmetric if is_symmetric_quant else inference_module.Asymmetric,
)
def get_q_props(q_bits):
q_range = 2**q_bits
q_min = -(2**(q_bits - 1))
q_max = (2**(q_bits - 1) - 1)
q_min = torch.IntTensor([q_min]).to(device=get_accelerator().device_name())
q_max = torch.IntTensor([q_max]).to(device=get_accelerator().device_name())
return q_range, q_max, q_min
def get_scale_zero_point(q_bits, is_symmetric_quant, max, min, absmax, scales=None, zero_points=None):
q_range, q_max, q_min = get_q_props(q_bits)
if is_symmetric_quant:
scale = torch.empty_like(absmax)
for i, x in enumerate(absmax):
scale[i] = torch.ones_like(x) if x == 0 else q_range / (2 * x)
zero_point = torch.zeros(scale.shape, dtype=torch.float32, device=get_accelerator().device_name())
else:
scale = torch.empty_like(max)
for i, x in enumerate(max):
scale[i] = torch.ones_like(x) if max[i] == min[i] else q_range / (max[i] - min[i])
zero_point = q_min - (min * scale)
return scale, zero_point
def int4x2to2xint4(int4X2tensor):
high = int4X2tensor >> 4
low = (int4X2tensor << 4) >> 4
return torch.stack((high, low), dim=-1).flatten()
def run_float_quantize(q_bits, is_symmetric_quant, activations_ref, num_groups):
# Reference implementation
# https://pytorch.org/docs/stable/quantization-support.html
activations_ref = activations_ref.reshape(num_groups, -1).to(dtype=torch.float32)
max_abs_activations_ref = torch.amax(torch.abs(activations_ref), dim=-1).view(num_groups, -1)
max_activations_ref = torch.amax(activations_ref, dim=-1).view(num_groups, -1)
min_activations_ref = torch.amin(activations_ref, dim=-1).view(num_groups, -1)
_, q_max, q_min = get_q_props(q_bits)
scale, zero_point = get_scale_zero_point(q_bits, is_symmetric_quant, max_activations_ref, min_activations_ref,
max_abs_activations_ref)
data_f = activations_ref * scale
if not is_symmetric_quant:
data_f = data_f + zero_point
data_i32 = torch.round(data_f).to(dtype=torch.int32)
data_i32 = torch.minimum(torch.maximum(data_i32, q_min.expand_as(data_i32)), q_max.expand_as(data_i32))
data_i8 = data_i32.to(dtype=torch.int8)
scales = (1.0 / scale).reshape(-1, 1)
offsets = zero_point.reshape(-1, 1)
params = torch.cat((scales, offsets), dim=-1)
return data_i8, params
def run_float_dequantize(q_bits, is_symmetric_quant, data_i8, params, num_groups):
data_f = data_i8.reshape(num_groups, -1).to(dtype=torch.float32)
scales = params[:, 0].reshape(-1, 1)
offsets = params[:, 1].reshape(-1, 1)
if not is_symmetric_quant:
data_f = data_f - offsets
else:
assert offsets.allclose(torch.zeros_like(offsets))
data_f = data_f * scales
return data_f
@pytest.mark.inference_ops
@pytest.mark.parametrize("num_groups", [1, 13, 512])
@pytest.mark.parametrize("num_elems", [8, 16, 32, 64, 128, 256, 4096, 8192, 12288, 16384])
@pytest.mark.parametrize("is_symmetric_quant", [True, False])
@pytest.mark.parametrize("q_bits", [4, 8])
@pytest.mark.parametrize("directed_case", ["all_zeros", None])
def test_float_quantize(num_elems, num_groups, is_symmetric_quant, q_bits, directed_case):
# fix seed
torch.manual_seed(num_elems)
if directed_case == "all_zeros":
activations_ds = torch.zeros((num_groups, num_elems),
dtype=torch.float16,
device=get_accelerator().device_name())
else:
activations_ds = torch.randn((num_groups, num_elems),
dtype=torch.float16,
device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
ref_out_tensor, ref_params = run_float_quantize(q_bits, is_symmetric_quant, activations_ref, num_groups)
ref_dequantized_tensor = run_float_dequantize(q_bits, is_symmetric_quant, ref_out_tensor, ref_params, num_groups)
# we need to convert the tensor to float64 to avoid overflow
ref_quantization_error = torch.sum(torch.abs((activations_ref - ref_dequantized_tensor).to(torch.float64)))
ds_out_tensor, ds_out_params = run_quantize_ds(activations_ds, num_groups, q_bits, is_symmetric_quant)
ds_dequantized_tensor = run_dequantize_ds(ds_out_tensor, ds_out_params, num_groups, q_bits, is_symmetric_quant)
assert torch.all(torch.isfinite(ds_dequantized_tensor))
ds_quantization_error = torch.sum(torch.abs((activations_ds - ds_dequantized_tensor).to(torch.float64)))
assert (ds_quantization_error <= ref_quantization_error * 1.05)
| 5,713 | 36.84106 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/adagrad/test_cpu_adagrad.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import numpy as np
import pytest
import deepspeed
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import CPUAdagradBuilder
from unit.common import DistributedTest
if not deepspeed.ops.__compatible_ops__[CPUAdagradBuilder.NAME]:
pytest.skip("cpu-adagrad is not compatible", allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
x = first.detach().numpy()
y = second.detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="param-update mismatch!", atol=atol)
class TestCPUAdagrad(DistributedTest):
world_size = 1
requires_cuda_env = False
if not get_accelerator().is_available():
init_distributed = False
set_dist_env = False
def test_cpu_adagrad_opt(self, model_size=64):
device = 'cpu'
rng_state = torch.get_rng_state()
param = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn(model_size, device=device))
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
rng_state = torch.get_rng_state()
param.grad = torch.randn(model_size, device=device)
torch.set_rng_state(rng_state)
param1.grad = torch.randn(model_size, device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
def test_cpu_adagrad_opt_sparse_embedding(self, model_size=32, vocabulary_size=64, dim=16):
device = 'cpu'
rng_state = torch.get_rng_state()
def gen_sparse_grad(vocabulary_size, dim, num_indices, dtype, device):
i = torch.randint(vocabulary_size, size=(1, num_indices), dtype=torch.int64, device=device)
v = torch.randn(num_indices, dim, dtype=dtype, device=device)
t = torch.sparse_coo_tensor(i, v, (vocabulary_size, dim), device=device)
t = t.coalesce()
new_i = (t.indices().view(-1, 1).repeat(1, dim) * dim + torch.tensor(range(dim))).flatten().unsqueeze(0)
new_v = t.values().flatten()
new_t = torch.sparse_coo_tensor(new_i, new_v, (vocabulary_size * dim, ), device=device)
new_t = new_t.coalesce()
new_t.requires_grad = False
return new_t
voc_size = vocabulary_size
dim = dim
num_indices = int(model_size // dim)
dtype = torch.float32
param = torch.nn.Parameter(torch.randn((voc_size * dim, ), dtype=dtype, device=device), requires_grad=True)
torch.set_rng_state(rng_state)
param1 = torch.nn.Parameter(torch.randn((voc_size * dim, ), dtype=dtype, device=device), requires_grad=True)
torch.set_rng_state(rng_state)
optimizer = DeepSpeedCPUAdagrad([param])
optimizer1 = torch.optim.Adagrad([param1])
for i in range(10):
torch.set_rng_state(rng_state)
param.grad = gen_sparse_grad(voc_size, dim, num_indices, dtype=dtype, device=device)
torch.set_rng_state(rng_state)
param1.grad = gen_sparse_grad(voc_size, dim, num_indices, dtype=dtype, device=device)
optimizer.step()
optimizer1.step()
check_equal(param, param1, atol=1e-2, verbose=True)
class TestCPUAdagradGPUError(DistributedTest):
def test_cpu_adagrad_gpu_error(self):
model_size = 64
device = get_accelerator().device_name(0) # 'cuda:0' or 'xpu:0'
param = torch.nn.Parameter(torch.randn(model_size, device=device))
optimizer = DeepSpeedCPUAdagrad([param])
param.grad = torch.randn(model_size, device=device)
with pytest.raises(AssertionError):
optimizer.step()
| 4,156 | 37.137615 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/spatial/test_nhwc_bias_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
from deepspeed.ops.transformer.inference.bias_add import nhwc_bias_add
from deepspeed.accelerator import get_accelerator
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-3, 5e-4), torch.float16: (3e-2, 2e-3), torch.int8: (1, 1)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def ref_bias_add(activations, bias):
return activations + bias.reshape(1, -1, 1, 1)
channels_list = [192, 384, 320, 576, 640, 768, 960, 1152, 1280, 1536, 1600, 1920, 2240, 2560]
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add(activations.clone().detach(), bias)
ds_vals = nhwc_bias_add(activations, bias)
assert allclose(ds_vals, ref_vals)
def ref_bias_add_add(activations, bias, other):
return (activations + bias.reshape(1, -1, 1, 1)) + other
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
other = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add_add(activations.clone().detach(), bias, other)
ds_vals = nhwc_bias_add(activations, bias, other=other)
assert allclose(ds_vals, ref_vals)
def ref_bias_add_bias_add(activations, bias, other, other_bias):
return (activations + bias.reshape(1, -1, 1, 1)) + (other + other_bias.reshape(1, -1, 1, 1))
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2, 10])
@pytest.mark.parametrize("image_size", [16, 32, 64])
@pytest.mark.parametrize("channels", channels_list)
def test_bias_add_bias_add(batch, image_size, channels):
activations = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
other = torch.randn((batch, channels, image_size, image_size),
dtype=torch.float16,
device=get_accelerator().device_name()).to(memory_format=torch.channels_last)
bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
other_bias = torch.randn((channels), dtype=torch.float16, device=get_accelerator().device_name())
ref_vals = ref_bias_add_bias_add(activations.clone().detach(), bias, other, other_bias)
ds_vals = nhwc_bias_add(activations, bias, other=other, other_bias=other_bias)
assert allclose(ds_vals, ref_vals)
| 3,671 | 41.697674 | 107 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/accelerators/test_accelerator_backward.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import torch
import pytest
import random
import copy
import os
from torch import nn
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from deepspeed.accelerator import get_accelerator
from unit.modeling import BertConfig, BertLayerNorm, BertEncoder as BertEncoderPostln
from unit.modelingpreln import BertEncoder as BertEncoderPreln
from unit.common import DistributedTest, is_rocm_pytorch
#if not deepspeed.ops.__installed_ops__['transformer']:
#pytest.skip(
# "transformer kernels are temporarily disabled because of unexplained failures",
# allow_module_level=True)
def check_equal(first, second, atol=1e-2, verbose=False):
diction_x = {}
diction_y = {}
if verbose:
for i, (x, y) in enumerate(zip(first, second)):
print(x[1], y[1])
for i, (x, y) in enumerate(zip(first, second)):
k = 0
while (diction_x.get((k, x[1])) is not None):
k = k + 1
diction_x[k, x[1]] = x[0]
k = 0
while (diction_y.get((k, y[1])) is not None):
k = k + 1
diction_y[k, y[1]] = y[0]
if verbose:
print()
for i, (x, y) in enumerate(zip(diction_x, diction_y)):
print(x, y)
for i, (x, y) in enumerate(zip(diction_x, diction_y)):
if (x[0] == 1): continue
if verbose:
print("checking ", x[1], ":")
y = diction_y[x[0], x[1]]
x = diction_x[x[0], x[1]]
if verbose:
print(((x == float('inf')).nonzero(as_tuple=True)[0]))
print(((y == float('inf')).nonzero(as_tuple=True)[0]))
x = x.cpu().detach().numpy()
y = y.cpu().detach().numpy()
avgx = np.sum(abs(x), dtype=float)
countx = x.shape[0]
for i in range(len(x.shape) - 1):
countx *= x.shape[i + 1]
avgx = np.sum(avgx)
tolerance = 1
if avgx != float('inf') and avgx != -float('inf'):
avgx = avgx / countx
tolerance = avgx * atol
if verbose:
print("tolerance is ", tolerance)
x = x.flatten()
y = y.flatten()
print("x = {}".format(x))
print("y = {}".format(y))
if any(x == float('inf')) or any(x == -float('inf')):
print("found infinity in x")
if any(y == float('inf')) or any(y == -float('inf')):
print("found infinity in y")
print(np.linalg.norm(x.astype('float64')))
print(np.linalg.norm(y.astype('float64')))
print('-' * 80)
#toler = np.linalg.norm(x.astype('float64')) * 0.0005
np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i), atol=tolerance)
def zero_grad(variables):
for variable in variables:
variable.grad.zero_()
device = torch.device(get_accelerator().device_name())
kwargs_fp32 = {'dtype': torch.float, 'device': device, 'requires_grad': True}
kwargs_fp16 = {'dtype': torch.half, 'device': device, 'requires_grad': True}
class DSEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(DSEncoder, self).__init__()
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList([
copy.deepcopy(DeepSpeedTransformerLayer(config, weights, biases)) for _ in range(config.num_hidden_layers)
])
self.grads = []
self.pre_or_post = config.pre_layer_norm
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
raise NotImplementedError("`checkpoint` is not defined below")
#l = 0
#num_layers = len(self.layer)
#chunk_length = math.ceil(math.sqrt(num_layers))
#while l < num_layers:
# hidden_states = checkpoint.checkpoint(
# custom(
# l, # noqa: F821
# l + chunk_length),
# hidden_states,
# attention_mask * 1)
# l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask, grads=self.grads)
hidden_states.register_hook(lambda x, self=self: self.grads.append([x, "hidden_state"]))
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers or checkpoint_activations:
if (self.pre_or_post):
hidden_states = self.FinalLayerNorm(hidden_states)
all_encoder_layers.append(hidden_states)
return all_encoder_layers
def get_grads(self):
return self.grads
def create_models(ds_config):
bert_config = BertConfig(vocab_size_or_config_json_file=119547,
hidden_size=ds_config.hidden_size,
num_hidden_layers=ds_config.num_hidden_layers,
num_attention_heads=ds_config.heads,
intermediate_size=ds_config.intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=ds_config.hidden_dropout_ratio,
attention_probs_dropout_prob=ds_config.attn_dropout_ratio,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=ds_config.initializer_range)
weights = []
biases = []
for i in range(4):
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.hidden_size)))
weights[i].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[4].data.fill_(1.0)
weights.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size, ds_config.hidden_size)))
weights[5].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.intermediate_size)))
weights[6].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[7].data.fill_(1.0)
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[0].data.zero_()
for i in range(4):
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[i + 1].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size)))
biases[5].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[6].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[7].data.zero_()
if (ds_config.pre_layer_norm):
bert_encoder = BertEncoderPreln(bert_config, weights, biases)
else:
bert_encoder = BertEncoderPostln(bert_config, weights, biases)
ds_encoder = DSEncoder(ds_config, weights, biases)
if ds_config.fp16:
bert_encoder.half()
ds_encoder.half()
bert_encoder.to(get_accelerator().device_name())
ds_encoder.to(get_accelerator().device_name())
return bert_encoder, ds_encoder
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def run_backward(ds_config, seq_len, atol=1e-2, verbose=False):
set_seed(123)
bert_encoder, ds_encoder = create_models(ds_config)
# prepare test data
kwargs = kwargs_fp16 if ds_config.fp16 else kwargs_fp32
hidden_states = torch.randn(ds_config.batch_size, seq_len, ds_config.hidden_size, **kwargs)
input_mask = torch.randn(ds_config.batch_size, 1, 1, seq_len, **kwargs)
Y = torch.randn(ds_config.batch_size, seq_len, ds_config.hidden_size, **kwargs)
# run baseline
base_results = bert_encoder(hidden_states,
input_mask,
output_all_encoded_layers=False,
checkpoint_activations=False)
loss = (Y - base_results[0]).pow(2).sum() / 64
loss.backward()
base_grads = bert_encoder.get_grads()
# run ds
ds_results = ds_encoder(hidden_states, input_mask, output_all_encoded_layers=False, checkpoint_activations=False)
loss = (Y - ds_results[0]).pow(2).sum() / 64
loss.backward()
ds_grads = ds_encoder.get_grads()
# check grads
check_equal(base_grads, ds_grads, atol=atol, verbose=verbose)
# NOTE: Keep these different params as they have helped find divergence in behavior between AMD and NVIDIA.
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16, atol',
[
(64,160,128,2,24,False,True, 0.2),
(64,1600,128,2,4,False,True, 0.2),
(8,1600,128,25,3,True,True, 0.05),
(8,160,128,2,3,True,True, 0.1),
(8,1600,128,2,3,True,True, 0.05),
]) # yapf: disable
class TestCUDABackward(DistributedTest):
world_size = 1
if is_rocm_pytorch():
#This is to flush denorms in forward pass. Please refer to https://github.com/pytorch/pytorch/blob/main/docs/source/notes/numerical_accuracy.rst#reduced-precision-fp16-and-bf16-gemms-and-convolutions-on-amd-instinct-mi200-devices
os.environ['ROCBLAS_INTERNAL_FP16_ALT_IMPL'] = '1'
def test_backward(self, is_preln, use_fp16, batch_size, hidden_size, seq_len, heads, num_layers, atol):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and (use_fp16 is True or is_preln is False):
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_backward(ds_config, seq_len, atol=atol, verbose=True)
| 10,934 | 37.914591 | 237 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/accelerators/test_accelerator_forward.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import numpy as np
import torch
import pytest
import random
import copy
from torch import nn
from unit.modelingpreln import BertEncoder as BertEncoderPreln
from unit.modeling import BertLayerNorm, BertConfig, BertEncoder as BertEncoderPostln
from deepspeed import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
from deepspeed.accelerator import get_accelerator
from unit.common import DistributedTest
def check_equal(first, second, atol=1e-2, verbose=False):
if verbose:
print()
for i, (x, y) in enumerate(zip(first, second)):
x = x[0].cpu().detach().numpy()
y = y[0].cpu().detach().numpy()
if verbose:
print("x = {}".format(x.flatten()))
print("y = {}".format(y.flatten()))
print('-' * 80)
np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i), atol=atol)
def zero_grad(variables):
for variable in variables:
variable.grad.zero_()
device = torch.device(get_accelerator().device_name())
kwargs_fp32 = {'dtype': torch.float, 'device': device, 'requires_grad': True}
kwargs_fp16 = {'dtype': torch.half, 'device': device, 'requires_grad': True}
class DSEncoder(nn.Module):
def __init__(self, config, weights, biases):
super(DSEncoder, self).__init__()
self.FinalLayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.layer = nn.ModuleList([
copy.deepcopy(DeepSpeedTransformerLayer(config, weights, biases)) for _ in range(config.num_hidden_layers)
])
self.grads = []
self.pre_or_post = config.pre_layer_norm
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, checkpoint_activations=False):
all_encoder_layers = []
def custom(start, end):
def custom_forward(*inputs):
layers = self.layer[start:end]
x_ = inputs[0]
for layer in layers:
x_ = layer(x_, inputs[1])
return x_
return custom_forward
if checkpoint_activations:
raise NotImplementedError("`checkpoint` below is not defined")
#l = 0
#num_layers = len(self.layer)
#chunk_length = math.ceil(math.sqrt(num_layers))
#while l < num_layers:
# hidden_states = checkpoint.checkpoint(
# custom(
# l, # noqa: F821
# l + chunk_length),
# hidden_states,
# attention_mask * 1)
# l += chunk_length
# decoder layers
else:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers or checkpoint_activations:
if (self.pre_or_post):
hidden_states = self.FinalLayerNorm(hidden_states)
all_encoder_layers.append(hidden_states)
return all_encoder_layers
def create_models(ds_config):
bert_config = BertConfig(vocab_size_or_config_json_file=119547,
hidden_size=ds_config.hidden_size,
num_hidden_layers=ds_config.num_hidden_layers,
num_attention_heads=ds_config.heads,
batch_size=ds_config.batch_size,
intermediate_size=ds_config.intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=ds_config.hidden_dropout_ratio,
attention_probs_dropout_prob=ds_config.attn_dropout_ratio,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=ds_config.initializer_range,
fp16=ds_config.fp16)
weights = []
biases = []
for i in range(4):
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.hidden_size)))
weights[i].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[4].data.fill_(1.0)
weights.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size, ds_config.hidden_size)))
weights[5].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size, ds_config.intermediate_size)))
weights[6].data.normal_(mean=0.0, std=ds_config.initializer_range)
weights.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
weights[7].data.fill_(1.0)
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[0].data.zero_()
for i in range(4):
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[i + 1].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.intermediate_size)))
biases[5].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[6].data.zero_()
biases.append(nn.Parameter(torch.Tensor(ds_config.hidden_size)))
biases[7].data.zero_()
if (ds_config.pre_layer_norm):
bert_encoder = BertEncoderPreln(bert_config, weights, biases)
else:
bert_encoder = BertEncoderPostln(bert_config, weights, biases)
ds_encoder = DSEncoder(ds_config, weights, biases)
if ds_config.fp16:
bert_encoder.half()
ds_encoder.half()
bert_encoder.to(get_accelerator().device_name())
ds_encoder.to(get_accelerator().device_name())
return bert_encoder, ds_encoder
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def run_forward(ds_config, seq_len, atol=1e-2, verbose=False, test_bsz=None):
set_seed(123)
bert_encoder, ds_encoder = create_models(ds_config)
bsz = ds_config.batch_size if test_bsz is None else test_bsz
# prepare test data
kwargs = kwargs_fp16 if ds_config.fp16 else kwargs_fp32
hidden_states = torch.randn(bsz, seq_len, ds_config.hidden_size, **kwargs)
input_mask = torch.randn(bsz, 1, 1, seq_len, **kwargs)
# run baseline
base_results = bert_encoder(hidden_states,
input_mask,
output_all_encoded_layers=False,
checkpoint_activations=False)
# run ds
ds_results = ds_encoder(hidden_states, input_mask, output_all_encoded_layers=False, checkpoint_activations=False)
# check forward evaluation
check_equal(base_results, ds_results, atol=atol, verbose=verbose)
# FP16 test cases can only run on the devices support FP16.
@pytest.mark.sequential
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
(64,160,128,2,24,False,True),
#(8,2048,2048,32,1,True,True),
(8,160,128,2,3,True,True),
(8,160,128,2,3,False,True),
(8,1600,128,2,3,True,True),
(8,1600,128,25,3,True,True),
(8,1600,128,25,3,False,True),
(8,256,52,4,3,True,True),
(3,1024,51,16,3,True,False),
(3,1024,54,16,3,True,True),
(8,1024,381,16,3,True,False),
(8,1024,384,16,3,True,True),
(8,1024,384,16,3,True,True),
(8,1024,119,16,3,True,False),
(8,1024,120,16,3,True,True),
(8,1024,509,16,3,True,False),
(8,1024,512,16,3,True,True),
(64,1024,56,16,3,False,False),
(64,1024,53,16,3,False,True),
(64,1024,24,16,3,False,False),
(64,1024,21,16,3,False,True),
(8,1024,384,16,3,False,False),
(8,1024,384,16,3,False,True),
(8,1024,512,16,3,False,False),
(8,1024,511,16,3,False,True),
(8,1536,128,24,3,False,False),
(8,1536,128,24,3,False,True),
(8,2048,128,32,3,False,False),
(8,2048,128,32,3,False,True),
(8,2560,128,40,3,False,False),
(8,2560,128,40,3,False,True),
(8,128,128,2,3,True,False),
(8,128,128,2,3,True,True),
(8,4096,128,64,3,True,True),
(8,8192,128,64,3,False,True),
(1,256,2048,32,3,True,True),
]) # yapf: disable
class TestCUDAForward(DistributedTest):
world_size = 1
reuse_dist_env = True
def test_forward(self, batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_forward(ds_config, seq_len, atol=3e-2)
@pytest.mark.parametrize('batch_size, small_bsz, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
(8,3,1024,512,16,3,True,False),
(8,7,1024,512,16,3,True,True),
(8,3,1024,512,16,3,False,False),
(8,7,1024,512,16,3,False,True),
]) # yapf: disable
class TestCUDAForwardSmallBatchSize(DistributedTest):
world_size = 1
def test_forward_with_small_bsz(self, batch_size, small_bsz, hidden_size, seq_len, heads, num_layers, is_preln,
use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
run_forward(ds_config, seq_len, atol=3e-2, test_bsz=small_bsz)
@pytest.mark.parametrize('batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16',
[
#(64,1024,128,16,3,True,False),
#(64,1024,128,16,3,True,True),
#(64,1024,128,16,3,False,False),
#(64,1024,128,16,3,False,True),
]) # yapf: disable
class TestCUDAForwardStochastic(DistributedTest):
world_size = 1
def test_forward_stochastic(self, batch_size, hidden_size, seq_len, heads, num_layers, is_preln, use_fp16):
# Only run fp16 test cases on devices with FP16 capability.
if not get_accelerator().is_fp16_supported() and use_fp16 is True:
return
ds_config = DeepSpeedTransformerConfig()
ds_config.layer_id = None
ds_config.batch_size = batch_size
ds_config.hidden_size = hidden_size
ds_config.intermediate_size = 4 * hidden_size
ds_config.heads = heads
ds_config.attn_dropout_ratio = 0.0
ds_config.hidden_dropout_ratio = 0.0
ds_config.num_hidden_layers = num_layers
ds_config.pre_layer_norm = is_preln
ds_config.initializer_range = 0.02
ds_config.fp16 = use_fp16
ds_config.stochastic_mode = True
run_forward(ds_config, seq_len, atol=7e-2)
| 12,953 | 40.652733 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_moe_res_matmult.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def run_moe_res_matmul_reference(residual, coef1, coef2, output):
return residual * coef1 + output * coef2
def run_moe_res_matmul_ds(residual, coef, output):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
coef_t = coef.transpose(-1, -2).contiguous()
return inference_module.moe_res_matmul(residual, coef_t, output)
@pytest.mark.inference_ops
@pytest.mark.parametrize("hidden_dim", [16, 64])
@pytest.mark.parametrize("c", [1, 4])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_moe_residual_matmul(hidden_dim, c, dtype):
residual_ds = torch.randn((c, hidden_dim * c, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff1 = torch.randn((1, 1, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff2 = torch.randn((1, 1, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
out_ds = torch.randn((c, hidden_dim * c, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
coeff_ds = torch.cat((coeff1, coeff2), dim=-1)
residual_ref = residual_ds.clone().detach()
coeff_ref = coeff_ds.clone().detach()
out_ref = out_ds.clone().detach()
ds_out = run_moe_res_matmul_ds(residual_ds, coeff_ds, out_ds)
ref_out = run_moe_res_matmul_reference(residual_ref, coeff1, coeff2, out_ref)
assert (allclose(ds_out, ref_out))
| 1,885 | 37.489796 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_bias_geglu.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
from deepspeed.accelerator import get_accelerator
from deepspeed.utils.types import ActivationFuncType
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_geglu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
# Explicitly using the default GeLU
activations = activations + bias.reshape(1, 1, -1)
hidden_states, gate = activations.chunk(2, dim=-1)
return hidden_states * torch.nn.functional.gelu(gate.to(torch.float32)).to(activations.dtype)
def run_bias_geglu_ds(activation, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_GELU)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_geglu(batch, sequence, channels, dtype):
activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name())
bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name())
ds_out = run_bias_geglu_ds(activation, bias)
ref_out = run_bias_geglu_reference(activation, bias)
assert (allclose(ds_out, ref_out))
def run_gated_silu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
# Explicitly using the default GeLU
activations = activations + bias.reshape(1, 1, -1)
hidden_states, gate = activations.chunk(2, dim=-1)
return hidden_states * torch.nn.functional.silu(gate.to(torch.float32)).to(activations.dtype)
def run_gated_silu_ds(activation, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.gated_activation(activation, bias, ActivationFuncType.GATED_SILU)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_gated_silu(batch, sequence, channels, dtype):
activation = torch.randn((batch, sequence, channels * 2), dtype=dtype, device=get_accelerator().device_name())
bias = torch.randn((channels * 2), dtype=dtype, device=get_accelerator().device_name())
ds_out = run_gated_silu_ds(activation, bias)
ref_out = run_gated_silu_reference(activation, bias)
assert (allclose(ds_out, ref_out))
| 3,078 | 38.987013 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_layer_norm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
try:
import triton # noqa: F401
from deepspeed.ops.transformer.inference.triton import (
layer_norm,
layer_norm_residual,
)
except ImportError:
print("triton import failed")
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def ref_implementation(vals, gamma, beta, epsilon, channels, dtype):
vals_f = vals.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
return torch.nn.functional.layer_norm(vals_f, (channels, ), weight=gamma_f, bias=beta_f, eps=epsilon).to(dtype)
def ds_implementation(vals, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.layer_norm(vals, gamma, beta, epsilon)
def ds_triton_implementation(vals, gamma, beta, epsilon):
return layer_norm(vals, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
@pytest.mark.parametrize("use_triton_ops", [False, True])
def test_layer_norm(batch, seq_len, channels, dtype, use_triton_ops):
if not deepspeed.HAS_TRITON and use_triton_ops:
pytest.skip("triton has to be installed for the test")
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
ref_output = ref_implementation(vals, gamma, beta, epsilon, channels, dtype)
if use_triton_ops:
new_output = ds_triton_implementation(vals, gamma, beta, epsilon)
if dtype != torch.float16: # fp16 supported in triton
return
else:
new_output = ds_implementation(vals, gamma, beta, epsilon)
if not allclose(new_output, ref_output):
#print(new_output - ref_output)
assert allclose(new_output, ref_output)
def residual_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, dtype):
vals_f = vals.to(torch.float32)
bias_f = bias.to(torch.float32).reshape(1, 1, -1)
res_f = res.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
return torch.nn.functional.layer_norm(vals_f + bias_f + res_f, (channels, ),
weight=gamma_f,
bias=beta_f,
eps=epsilon).to(dtype)
def residual_ds_implementation(vals, bias, res, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module._layer_norm_residual(vals, bias, res, gamma, beta, epsilon)
def residual_ds_triton_implementation(vals, bias, res, gamma, beta, epsilon):
return layer_norm_residual(vals, bias, res, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
@pytest.mark.parametrize("use_triton_ops", [False, True])
def test_layer_norm_residual(batch, seq_len, channels, dtype, use_triton_ops):
if not deepspeed.HAS_TRITON and use_triton_ops:
pytest.skip("triton has to be installed for the test")
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
bias = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
if use_triton_ops:
new_output = residual_ds_triton_implementation(vals, bias, residual, gamma, beta, epsilon)
if dtype != torch.float16: # fp16 supported in triton
return
else:
new_output = residual_ds_implementation(vals, bias, residual, gamma, beta, epsilon)
ref_output = residual_ref_implementation(vals, bias, residual, gamma, beta, epsilon, channels, dtype)
print((new_output - ref_output).abs().max())
assert allclose(new_output, ref_output)
def residual_store_ref_implementation(vals, bias, res, gamma, beta, epsilon, channels, dtype):
vals_f = vals.to(torch.float32)
bias_f = bias.to(torch.float32).reshape(1, 1, -1)
res_f = res.to(torch.float32)
gamma_f = gamma.to(torch.float32)
beta_f = beta.to(torch.float32)
res_output = vals_f + bias_f + res_f
norm_output = torch.nn.functional.layer_norm(res_output, (channels, ), weight=gamma_f, bias=beta_f,
eps=epsilon).to(dtype)
return norm_output, res_output.to(dtype)
def residual_store_ds_implementation(vals, bias, res, gamma, beta, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.layer_norm_residual_store_pre_ln_res(vals, bias, res, gamma, beta, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_layer_norm_residual_store_pre_ln_res(batch, seq_len, channels, dtype):
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=get_accelerator().current_device_name())
bias = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name())
beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name())
epsilon = 1e-5
# Need to run the reference first since there's an in-place component to ours
ref_norm_output, norm_res_output = residual_store_ref_implementation(vals, bias, residual, gamma, beta, epsilon,
channels, dtype)
ds_norm_output, ds_res_output = residual_store_ds_implementation(vals, bias, residual, gamma, beta, epsilon)
assert allclose(ds_res_output, norm_res_output)
assert allclose(ds_norm_output, ref_norm_output)
@pytest.mark.inference_ops
@pytest.mark.parametrize("M", [4])
@pytest.mark.parametrize("N", [4])
@pytest.mark.parametrize("dtype", [torch.float16])
@pytest.mark.parametrize("residual", [True, False])
@pytest.mark.parametrize("input_bias", [True, False])
def test_triton_layer_norm(M, N, dtype, residual, input_bias, eps=1e-5, device='cuda'):
if not deepspeed.HAS_TRITON:
pytest.skip("triton has to be installed for the test")
torch.manual_seed(0)
# create data
x_shape = (M, N)
w_shape = (x_shape[-1], )
weight = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=False)
bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=False)
x_bias = torch.rand(w_shape, dtype=dtype, device='cuda', requires_grad=False)
x = -2.3 + 0.5 * torch.randn(x_shape, dtype=dtype, device='cuda')
dy = .1 * torch.randn_like(x)
if residual:
res = torch.rand(x_shape, dtype=dtype, device='cuda', requires_grad=False)
else:
res = torch.zeros(x_shape, dtype=dtype, device='cuda', requires_grad=False)
x.requires_grad_(True)
# forward pass
if residual or input_bias:
y_tri = layer_norm_residual(x, x_bias if input_bias else None, res, weight, bias, eps)
else:
y_tri = layer_norm(x, weight, bias, eps)
y_ref = torch.nn.functional.layer_norm(x + res + (x_bias if input_bias else 0), w_shape, weight, bias,
eps).to(dtype)
# compare
#print(f"y_tri={y_tri}, y_ref={y_ref}")
triton.testing.assert_almost_equal(y_tri, y_ref)
| 8,934 | 43.232673 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/inference_test_utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.accelerator import get_accelerator
TOLERANCES = None
def get_tolerances():
global TOLERANCES
if TOLERANCES is None:
TOLERANCES = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}
if get_accelerator().is_bf16_supported():
# Note: BF16 tolerance is higher than FP16 because of the lower precision (7 (+1) bits vs
# 10 (+1) bits)
TOLERANCES[torch.bfloat16] = (4.8e-1, 3.2e-2)
return TOLERANCES
DTYPES = None
def get_dtypes():
global DTYPES
if DTYPES is None:
DTYPES = [torch.float16, torch.float32]
try:
if get_accelerator().is_bf16_supported():
DTYPES.append(torch.bfloat16)
except (AssertionError, AttributeError):
pass
return DTYPES
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = get_tolerances()[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
| 1,062 | 24.309524 | 101 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_residual_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
TOLERANCES = None
def get_tolerances():
global TOLERANCES
if TOLERANCES is None:
# Residual add, as a sequence of casted additions, currently requires a higher tolerance
# than the other operators for FP16. We should instead better align the behaviors
# of the reference to match our kernel implementation (TODO(cmikeh2))
TOLERANCES = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 4e-3)}
if get_accelerator().is_bf16_supported():
# Note: BF16 tolerance is higher than FP16 because of the lower precision (7 (+1) bits vs
# 10 (+1) bits)
TOLERANCES[torch.bfloat16] = (4.8e-1, 3.2e-2)
return TOLERANCES
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = get_tolerances()[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
@pytest.fixture(scope="module")
def inference_module():
return InferenceBuilder().load()
def res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size=1, pre_attn_norm=True):
if pre_attn_norm:
hidden_state += (residual + final_bias + attn_output + attn_bias) / mp_size
else:
hidden_state += residual + final_bias
return hidden_state
def res_add_bias_ref_gptj(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias, mp_size):
hidden_state += attn_output + (residual + final_bias) / mp_size
if add_attn_bias:
hidden_state += attn_bias / mp_size
return hidden_state
def run_residual_add_reference(hidden_state, residual, attn_output, attn_bias, final_bias, mlp_after_attn,
add_attn_bias, mp_size, pre_attn_norm):
if mlp_after_attn:
return res_add_bias_ref(hidden_state, residual, attn_output, attn_bias, final_bias, mp_size, pre_attn_norm)
else:
return res_add_bias_ref_gptj(hidden_state, residual, attn_output, attn_bias, final_bias, add_attn_bias,
mp_size)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("hidden_dim", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
@pytest.mark.parametrize("mlp_after_attn", [True, False])
@pytest.mark.parametrize("add_bias", [True, False])
@pytest.mark.parametrize("mp_size", [1, 2])
@pytest.mark.parametrize("pre_attn_norm", [True, False])
@pytest.mark.parametrize("use_triton_ops", [True, False])
def test_residual_add(inference_module, batch, sequence, hidden_dim, dtype, mlp_after_attn, add_bias, mp_size,
pre_attn_norm, use_triton_ops):
if not deepspeed.HAS_TRITON and use_triton_ops and dtype == torch.float16:
pytest.skip("triton has to be installed for the test")
ds_out = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
residual = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
attn_output = torch.randn((batch, sequence, hidden_dim), dtype=dtype, device=get_accelerator().device_name())
final_bias = torch.randn((hidden_dim), dtype=dtype, device=get_accelerator().device_name())
attn_bias = torch.randn((hidden_dim), dtype=dtype, device=get_accelerator().device_name())
ref_out = ds_out.clone()
ref_out = run_residual_add_reference(ref_out, residual, attn_output, attn_bias, final_bias, mlp_after_attn,
add_bias, mp_size, pre_attn_norm)
res_add_args = [
ds_out, residual, attn_output, attn_bias, final_bias, mp_size, mlp_after_attn, add_bias, pre_attn_norm
]
if use_triton_ops:
from deepspeed.ops.transformer.inference.triton import residual_add_bias
ds_out = residual_add_bias(*res_add_args)
if dtype == torch.float16:
ds_out = inference_module.residual_add_bias_fp16(*res_add_args)
elif dtype == torch.float32:
ds_out = inference_module.residual_add_bias_fp32(*res_add_args)
elif dtype == torch.bfloat16:
ds_out = inference_module.residual_add_bias_bf16(*res_add_args)
else:
if dtype == torch.float16:
ds_out = inference_module.residual_add_bias_fp16(*res_add_args)
elif dtype == torch.float32:
ds_out = inference_module.residual_add_bias_fp32(*res_add_args)
else:
raise ValueError(f"Unsupported dtype: {dtype}")
if not allclose(ds_out, ref_out):
print((ds_out - ref_out).abs().max())
print((ds_out - ref_out).abs().mean())
print((ds_out - ref_out))
assert (allclose(ds_out, ref_out))
assert (allclose(ds_out, ref_out))
| 5,157 | 41.983333 | 115 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
| 95 | 18.2 | 38 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_matmul.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (5e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def run_matmul_ref(a, b):
return torch.matmul(a, b)
def run_matmul_ds(a, b, use_triton_ops=False):
if use_triton_ops:
from deepspeed.ops.transformer.inference.triton import matmul_4d as matmul
return matmul(a, b)
assert use_triton_ops, "Only triton softmax is supported for now"
@pytest.mark.inference_ops
@pytest.mark.parametrize("B", [1, 2])
@pytest.mark.parametrize("H", [1, 2, 16])
@pytest.mark.parametrize("M", [1, 7, 8, 128])
@pytest.mark.parametrize("K", [2, 5, 16, 128])
@pytest.mark.parametrize("N", [1, 2, 8, 512])
@pytest.mark.parametrize("dtype", [torch.float16])
@pytest.mark.parametrize("use_triton_ops", [True])
def test_matmul_4d(B, H, M, K, N, dtype, use_triton_ops):
if not deepspeed.HAS_TRITON and use_triton_ops:
pytest.skip("triton has to be installed for the test")
# skip autotune in testing
from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul
fp16_matmul.skip_autotune()
a_ds = torch.randn((B, H, M, K), dtype=dtype, device='cuda')
b_ds = torch.randn((B, H, K, N), dtype=dtype, device='cuda')
a_ref = a_ds.clone().detach()
b_ref = b_ds.clone().detach()
ds_out = run_matmul_ds(a_ds, b_ds, use_triton_ops)
ref_out = run_matmul_ref(a_ref, b_ref)
assert (allclose(ds_out, ref_out))
| 1,911 | 30.866667 | 90 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_bias_add.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_add_reference(activations, bias):
return activations + bias
def run_bias_add_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_add_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_add_bf16(activations, bias)
else:
return inference_module.bias_add_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_add(batch, sequence, channels, dtype):
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_add_ds(activations_ds, bias_ds)
ref_out = run_bias_add_reference(activations_ref, bias_ref)
if not allclose(ds_out, ref_out):
print((ds_out - ref_out).abs().max())
assert (allclose(ds_out, ref_out))
| 1,863 | 34.169811 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_bias_gelu.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
from packaging import version as pkg_version
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_gelu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally and using the tanh approximation
return torch.nn.functional.gelu(activations.to(torch.float32) + bias.to(torch.float32),
approximate='tanh').to(activations.dtype)
def run_bias_gelu_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_gelu_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_gelu_bf16(activations, bias)
else:
return inference_module.bias_gelu_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_gelu(batch, sequence, channels, dtype):
if pkg_version.parse(torch.__version__) < pkg_version.parse("1.12"):
pytest.skip("gelu implementation matches only after torch 1.12")
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_gelu_ds(activations_ds, bias_ds)
ref_out = run_bias_gelu_reference(activations_ref, bias_ref)
assert (allclose(ds_out, ref_out))
| 2,213 | 37.842105 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_rms_norm.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import deepspeed
import torch
import pytest
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder # type: ignore
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
def ref_implementation(vals, gamma, epsilon):
variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True)
vals = vals * torch.rsqrt(variance + epsilon)
if gamma.dtype in [torch.float16, torch.bfloat16]:
vals = vals.to(gamma.dtype)
return gamma * vals
def ds_implementation(vals, gamma, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.rms_norm(vals, gamma, epsilon)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_rms_norm(batch, seq_len, channels, dtype):
device = get_accelerator().current_device_name()
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
gamma = torch.randn((channels), dtype=dtype, device=device)
epsilon = 1e-5
ref_output = ref_implementation(vals, gamma, epsilon)
new_output = ds_implementation(vals, gamma, epsilon)
assert allclose(new_output, ref_output)
def pre_ds_implementation(vals, residual, gamma, epsilon):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
return inference_module.pre_rms_norm(vals, residual, gamma, epsilon)
def pre_ref_implementation(vals, residual, gamma, epsilon):
residual = vals.to(torch.float32) + residual.to(torch.float32)
vals = residual
variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True)
vals = vals * torch.rsqrt(variance + epsilon)
if gamma.dtype in [torch.float16, torch.bfloat16]:
vals = vals.to(gamma.dtype)
return gamma * vals, residual.to(gamma.dtype)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 32])
@pytest.mark.parametrize("seq_len", [1, 128])
@pytest.mark.parametrize("channels", [384, 512, 768, 1024, 2048, 8192, 14432])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
def test_pre_norm(batch, seq_len, channels, dtype):
device = get_accelerator().current_device_name()
vals = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
residual = torch.randn((batch, seq_len, channels), dtype=dtype, device=device)
gamma = torch.randn((channels), dtype=dtype, device=device)
epsilon = 1e-5
ref_output = pre_ref_implementation(vals, residual, gamma, epsilon)
new_output = pre_ds_implementation(vals, residual, gamma, epsilon)
assert allclose(new_output[0], ref_output[0])
#assert allclose(new_output[1], ref_output[1])
| 3,190 | 34.455556 | 90 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_attention.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
# reference timplementation
def ref_torch_attention(q, k, v, mask, sm_scale):
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
p = torch.softmax(p.float() + mask, dim=-1).half()
ref_out = torch.matmul(p, v)
return ref_out
# test attention operator
@pytest.mark.inference_ops
@pytest.mark.parametrize("Z", [1]) # batch
@pytest.mark.parametrize("H", [12]) # heads
@pytest.mark.parametrize("N_CTX", [4, 128]) # sequence length
@pytest.mark.parametrize("D_HEAD", [64, 128])
@pytest.mark.parametrize("causal", [True, False])
def test_attention(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
if not deepspeed.HAS_TRITON:
pytest.skip("triton has to be installed for the test")
# skip autotune in testing
from deepspeed.ops.transformer.inference.triton.matmul_ext import fp16_matmul
fp16_matmul.skip_autotune()
import triton
from deepspeed.ops.transformer.inference.triton.attention import compute_attention
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5)
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5)
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5)
sm_scale = 0.3
# reference implementation
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
score = p
mask = torch.zeros((Z, H, N_CTX, N_CTX), dtype=dtype, device="cuda")
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
if causal:
for z in range(Z):
for h in range(H):
mask[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float() + mask, dim=-1).half()
softmax_out = p
ref_out = torch.matmul(p, v)
context = ref_out
# adjust it to expected tensor format and run test
qkv = torch.randn((Z, N_CTX, 3 * H * D_HEAD), dtype=dtype, device='cuda', requires_grad=False)
qkv[:, :, :H * D_HEAD] = q.permute(0, 2, 1, 3).contiguous().reshape((Z, N_CTX, H * D_HEAD))
qkv[:, :, 1 * H * D_HEAD:2 * H * D_HEAD] = k.permute(0, 2, 1, 3).contiguous().reshape((Z, N_CTX, H * D_HEAD))
qkv[:, :, 2 * H * D_HEAD:] = v.permute(0, 2, 1, 3).contiguous().reshape((Z, N_CTX, H * D_HEAD))
tri_out = compute_attention(qkv,
input_mask=mask,
layer_past=None,
alibi=None,
scale=sm_scale,
head_size=D_HEAD,
triangular=False,
use_cuda_flash=False,
use_triton_flash=False,
use_ds_attention=False)
tri_out = tri_out.reshape((Z, N_CTX, H, D_HEAD)).permute(0, 2, 1, 3)
triton.testing.allclose(ref_out, tri_out)
triton.testing.assert_almost_equal(ref_out, tri_out)
| 3,055 | 40.297297 | 113 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_bias_relu.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import InferenceBuilder
from .inference_test_utils import allclose, get_dtypes
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def run_bias_relu_reference(activations, bias):
# Expected behavior is that of casting to float32 internally
return torch.nn.functional.relu(activations.to(torch.float32) + bias.to(torch.float32)).to(activations.dtype)
def run_bias_relu_ds(activations, bias):
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_relu_fp16(activations, bias)
elif activations.dtype == torch.bfloat16:
return inference_module.bias_relu_bf16(activations, bias)
else:
return inference_module.bias_relu_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", get_dtypes())
def test_bias_relu(batch, sequence, channels, dtype):
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device=get_accelerator().device_name())
bias_ds = torch.randn((channels), dtype=dtype, device=get_accelerator().device_name())
activations_ref = activations_ds.clone().detach()
bias_ref = bias_ds.clone().detach()
ds_out = run_bias_relu_ds(activations_ds, bias_ds)
ref_out = run_bias_relu_reference(activations_ref, bias_ref)
assert (allclose(ds_out, ref_out))
| 1,932 | 36.173077 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_gelu.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def version_appropriate_gelu(activations):
global torch_minor_version
if torch_minor_version is None:
torch_minor_version = int(torch.__version__.split('.')[1])
# If torch version = 1.12
if torch_minor_version < 12:
return torch.nn.functional.gelu(activations)
else:
return torch.nn.functional.gelu(activations, approximate='tanh')
def run_gelu_reference(activations):
# Expected behavior is that of casting to float32 internally and using the tanh approximation
return version_appropriate_gelu(activations.to(torch.float32)).to(activations.dtype)
def run_gelu_ds(activations, use_triton_ops=False):
if use_triton_ops:
from deepspeed.ops.transformer.inference.triton import gelu
return gelu(activations)
channels = activations.shape[-1]
bias = torch.zeros((channels), dtype=activations.dtype, device='cuda')
global inference_module
if inference_module is None:
inference_module = InferenceBuilder().load()
if activations.dtype == torch.float16:
return inference_module.bias_gelu_fp16(activations, bias)
else:
return inference_module.bias_gelu_fp32(activations, bias)
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255])
@pytest.mark.parametrize("channels", [512, 1232, 4096])
@pytest.mark.parametrize("dtype", [torch.float16])
@pytest.mark.parametrize("use_triton_ops", [True, False])
def test_gelu(batch, sequence, channels, dtype, use_triton_ops):
activations_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda')
activations_ref = activations_ds.clone().detach()
if not deepspeed.HAS_TRITON and use_triton_ops:
pytest.skip("triton has to be installed for the test")
ds_out = run_gelu_ds(activations_ds, use_triton_ops)
ref_out = run_gelu_reference(activations_ref)
assert (allclose(ds_out, ref_out))
| 2,546 | 34.873239 | 97 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/unit/ops/transformer/inference/test_softmax.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import pytest
import torch
import deepspeed
from deepspeed.ops.op_builder import InferenceBuilder
if not deepspeed.ops.__compatible_ops__[InferenceBuilder.NAME]:
pytest.skip("Inference ops are not available on this system", allow_module_level=True)
inference_module = None
torch_minor_version = None
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (5e-4, 5e-5), torch.float16: (3e-2, 2e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def run_softmax_reference(input):
return torch.nn.functional.softmax(input, dim=-1)
def run_softmax_ds(input, use_triton_ops=False):
if use_triton_ops:
from deepspeed.ops.transformer.inference.triton import softmax
# return torch.empty_like(input)
return softmax(input)
assert use_triton_ops, "Only triton softmax is supported for now"
@pytest.mark.inference_ops
@pytest.mark.parametrize("batch", [1, 2])
@pytest.mark.parametrize("sequence", [1, 128, 255, 1232])
@pytest.mark.parametrize("channels", [512, 4096])
@pytest.mark.parametrize("dtype", [torch.float16, torch.float32])
@pytest.mark.parametrize("use_triton_ops", [True])
def test_softmax(batch, sequence, channels, dtype, use_triton_ops):
if not deepspeed.HAS_TRITON and use_triton_ops:
pytest.skip("triton has to be installed for the test")
input_ds = torch.randn((batch, sequence, channels), dtype=dtype, device='cuda')
input_ref = input_ds.clone().detach()
ds_out = run_softmax_ds(input_ds, use_triton_ops)
ref_out = run_softmax_reference(input_ref)
assert (allclose(ds_out, ref_out))
| 1,716 | 32.019231 | 90 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/perf/adam_test1.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
from deepspeed.accelerator import get_accelerator
device = 'cpu'
model_size = 1 * 1024**3
param = torch.nn.Parameter(torch.ones(model_size, device=device))
param_fp16 = torch.nn.Parameter(torch.ones(model_size, dtype=torch.half, device=get_accelerator().device_name(0)))
optimizer = DeepSpeedCPUAdam([param])
#torch.set_num_threads(128)
param.grad = torch.ones(model_size, device=device)
avg = 0
for i in range(100):
start = time.time()
optimizer.step(fp16_param_groups=[param_fp16])
stop = time.time()
avg += (stop - start)
param.grad = torch.ones(model_size, device=device) * 2
print("Elapsed Time is ", avg / 100)
| 808 | 28.962963 | 114 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/perf/adam_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adam import DeepSpeedCPUAdam
import time
NUM_ITERS = 100
def _test_perf(param, optimizer_func):
optimizer = optimizer_func(param)
avg = 0
for i in range(NUM_ITERS):
for i, p in enumerate(param):
p.grad = torch.ones_like(p) * 2
start = time.time()
optimizer.step()
stop = time.time()
avg += (stop - start)
return avg / NUM_ITERS
def _main():
device = 'cpu'
model_size = 1 * 1024**3
group_size = [model_size, 274432]
param = [torch.nn.Parameter(torch.ones(size, device=device)) for size in group_size]
torch_time = _test_perf(param, torch.optim.Adam)
ds_time = _test_perf(param, DeepSpeedCPUAdam)
print(f"Step time: {torch_time=} {ds_time=}")
_main()
| 881 | 22.210526 | 88 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/perf/adagrad_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad
import time
NUM_ITERS = 100
def _test_perf(param, optimizer_func):
optimizer = optimizer_func(param)
avg = 0
for i in range(NUM_ITERS):
for i, p in enumerate(param):
p.grad = torch.ones_like(p) * 2
start = time.time()
optimizer.step()
stop = time.time()
avg += (stop - start)
return avg / NUM_ITERS
def _main():
device = 'cpu'
model_size = 1 * 1024**3
group_size = [model_size, 274432]
param = [torch.nn.Parameter(torch.ones(size, device=device)) for size in group_size]
torch_time = _test_perf(param, torch.optim.Adagrad)
ds_time = _test_perf(param, DeepSpeedCPUAdagrad)
print(f"Step time: {torch_time=} {ds_time=}")
_main()
| 893 | 22.526316 | 88 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/small_model_debugging/test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from deepspeed.pt.deepspeed_linear import LinearModuleForZeroStage3
from deepspeed.pt.log_utils import logger
from deepspeed.accelerator import get_accelerator
def see_memory_usage(message):
# Print message except when distributed but not rank 0
logger.info(message)
logger.info(
"Memory Allocated %s GigaBytes ",
get_accelerator().memory_allocated() / (1024 * 1024 * 1024),
)
logger.info(
"Max Memory Allocated %s GigaBytes",
get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),
)
logger.info(
"Cache Allocated %s GigaBytes",
get_accelerator().memory_cached() / (1024 * 1024 * 1024),
)
logger.info(
"Max cache Allocated %s GigaBytes",
get_accelerator().max_memory_cached() / (1024 * 1024 * 1024),
)
tens = torch.rand(1024, 16384, dtype=torch.half, device=torch.device(get_accelerator().device_name()))
tens_back = tens.detach().clone()
#linear_bk = torch.nn.functional.linear
#torch.nn.functional.linear = deepspeed.pt.deepspeed_linear.LinearFunctionForZeroStage3.apply
model = LinearModuleForZeroStage3(16384, 16384)
model.to(get_accelerator().device_name()).half()
see_memory_usage("Before forward")
y = model(tens)
see_memory_usage("After forward")
model.weight.data = torch.zeros(1, dtype=torch.half, device=torch.device(get_accelerator().device_name()))
see_memory_usage("After weight zero")
y.backward(tens_back)
| 1,557 | 28.396226 | 106 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/small_model_debugging/test_mics_config.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Testing on a 8 GPUs node
NDEV_PER_NODE=2 torchrun --nnodes 1 --nproc-per-node 8 test_mics_config.py
"""
import os
import json
import argparse
import torch
import deepspeed
from torch.utils.data.distributed import DistributedSampler
import deepspeed.comm as dist
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.layers2 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim)])
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden = x
hidden = self.linear(hidden)
return self.cross_entropy_loss(hidden, y)
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def get_data_loader(model, total_samples, hidden_dim, device):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=torch.float)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
def get_args(tmpdir, config_dict):
parser = argparse.ArgumentParser()
parser.add_argument('--zero', type=int, default=3)
parser.add_argument('--local_rank', type=int)
parser.add_argument('--mics_shard_size', default=2, type=int)
parser.add_argument('--mics_hierarchical_params_gather', default=False, action='store_true')
args = parser.parse_args() #args=''
config_dict["zero_optimization"]["stage"] = args.zero
config_dict["zero_optimization"]["mics_shard_size"] = args.mics_shard_size
config_dict["zero_optimization"]["mics_hierarchical_params_gather"] = args.mics_hierarchical_params_gather
# print('config_dict["zero_optimization"]', config_dict["zero_optimization"])
config_path = create_config_from_dict(tmpdir, config_dict)
args.deepspeed_config = config_path
return args
def print0(msg):
if dist.get_rank() == 0:
print(msg, flush=True)
rank = int(os.environ['RANK'])
print('seed:', 2222 + rank)
torch.random.manual_seed(2222 + rank)
config_dict = {
"train_batch_size": 8,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
}
},
"fp16": {
"enabled": False,
"initial_scale_power": 15
},
"zero_optimization": {
"stage": 3,
"reduce_bucket_size": 20,
"mics_shard_size": 4,
"mics_hierarchical_params_gather": True,
"stage3_model_persistence_threshold": 10
}
}
# "initial_scale_power": 15
args = get_args('/tmp/', config_dict)
hidden_dim = 32
with deepspeed.zero.MiCS_Init(config_dict_or_path=config_dict):
model = SimpleModel(hidden_dim, empty_grad=False)
# print('------> init model with deepspeed.zero.Init()')
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=True)
def print_params(tag, model):
if dist.get_rank() == 0:
for n, p in model.named_parameters():
print0("{} {}:{}".format(tag, n, p))
data_loader = get_data_loader(model=model, total_samples=1000, hidden_dim=hidden_dim, device=model.device)
#print_params('pre-train', model)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0:
print("LOSS:", loss.item())
model.backward(loss)
model.step()
#print_params('step={}'.format(n), model)
if n == 5: break
| 4,292 | 31.037313 | 110 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/small_model_debugging/test_model.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import os
import json
import argparse
import torch
import deepspeed
from torch.utils.data.distributed import DistributedSampler
import deepspeed.comm as dist
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim, bias=True)
self.linear = torch.nn.Linear(hidden_dim, hidden_dim, bias=False)
if empty_grad:
self.layers2 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim,
hidden_dim)]) #QuantizeLinear(hidden_dim, hidden_dim)
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden = x
hidden1 = self.linear(hidden)
hidden2 = self.linear(hidden1)
return self.cross_entropy_loss(hidden2, y)
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def get_data_loader(model, total_samples, hidden_dim, device):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=torch.half)
train_label = torch.empty(total_samples, dtype=torch.long, device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
sampler = DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, sampler=sampler)
return train_loader
def get_args(tmpdir, config_dict):
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--zero', type=int, default=0)
parser.add_argument('--zero_hpz_partition_size', type=int, default=1)
args = parser.parse_args() #args=''
config_dict["zero_optimization"]["stage"] = args.zero
config_dict["zero_optimization"]["zero_hpz_partition_size"] = args.zero_hpz_partition_size
print('config_dict["zero_optimization"]', config_dict["zero_optimization"])
config_path = create_config_from_dict(tmpdir, config_dict)
args.deepspeed_config = config_path
return args
def print0(msg):
if dist.get_rank() == 0:
print(msg, flush=True)
rank = int(os.environ['RANK'])
print('seed:', 2222 + rank)
torch.random.manual_seed(2222 + rank)
config_dict = {
"train_batch_size": 256,
"steps_per_print": 1,
"optimizer": {
"type": "Adam",
"params": {
"lr": 0.00015,
}
},
"fp16": {
"enabled": True,
"initial_scale_power": 8
},
"zero_optimization": {
"stage": 0,
"reduce_bucket_size": 20,
"zero_hpz_partition_size": 1,
"reduce_scatter": True,
"zero_quantized_weights": False,
"zero_quantized_gradients": False
}
}
# "initial_scale_power": 15
args = get_args('/tmp/', config_dict)
hidden_dim = 4 * 1024
model = SimpleModel(hidden_dim, empty_grad=False)
model, _, _, _ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=True)
def print_params(tag, model):
if dist.get_rank() == 0:
for n, p in model.named_parameters():
print0("{} {}:{}".format(tag, n, p))
data_loader = get_data_loader(model=model, total_samples=256, hidden_dim=hidden_dim, device=model.device)
#print_params('pre-train', model)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
if dist.get_rank() == 0:
print("LOSS:", loss.item())
model.backward(loss)
model.step()
#print_params('step={}'.format(n), model)
#if n == 5: break
| 4,036 | 31.039683 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/small_model_debugging/stage3_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed
###################################
# Setup
###################################
class VerboseLinear(torch.nn.Linear):
def __init__(self, **kwargs):
print(f'Begin VerboseLinear.__init__')
super().__init__(**kwargs)
print(f'End VerboseLinear.__init__')
class LinearStack(torch.nn.Module):
def __init__(self, input_dim=2, hidden_dim=4, output_dim=4, num_layers=2):
super().__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.hidden_dim = hidden_dim
self.input_layer = VerboseLinear(in_features=self.input_dim, out_features=self.hidden_dim)
self.layers = torch.nn.ModuleList([
torch.nn.Linear(in_features=self.hidden_dim, out_features=self.hidden_dim, bias=False)
for x in range(num_layers)
])
self.output_layer = torch.nn.Linear(in_features=self.hidden_dim, out_features=self.output_dim)
self.identity = torch.nn.Identity()
def forward(self, x):
x = self.input_layer(x)
for layer in self.layers:
x = layer(x)
x = self.output_layer(x)
x = self.identity(x)
return x
###################################
# DRIVER
###################################
def test_driver():
print()
print('BUILDING MODEL')
with deepspeed.zero.Init():
model = LinearStack()
print()
# parted = [name for (name, p) in model.named_parameters() if p._partitioned]
# not_parted = [name for (name, p) in model.named_parameters() if not p._partitioned]
# print('partitioned: ', parted)
# print('full: ', not_parted)
# print()
model.train()
test_input = torch.rand(1, model.input_dim)
grad_output = torch.rand(1, model.output_dim)
grad_output.requires_grad = False
test_input.requires_grad = False
print()
print('BEGINNING FORWARD')
print()
output = model(test_input)
output.backward(grad_output)
# parted = [name for (name, p) in model.named_parameters() if p._partitioned]
# not_parted = [name for (name, p) in model.named_parameters() if not p._partitioned]
# print('partitioned: ', parted)
# print('full:' , not_parted)
# print()
#samyamspeed.disable()
test_driver()
| 2,394 | 25.318681 | 102 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/onebit/test_nccl_backend.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed.comm as dist
import numpy as np
import argparse
import deepspeed
import os
from deepspeed.runtime.comm.nccl import NcclBackend
from deepspeed.accelerator import get_accelerator
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
args.local_rank = int(os.environ['LOCAL_RANK'])
get_accelerator().set_device(args.local_rank)
device = torch.device(get_accelerator().device_name(), args.local_rank)
size = dist.get_world_size()
rank = dist.get_rank()
backend = NcclBackend()
local_rank = args.local_rank
# A simulated compression function using deepspeed.comm
def torch_sim(a):
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
a_sign = None
worker_error = a - a_compressed
dist.all_reduce(a_compressed)
a_compressed.mul_(1 / dist.get_world_size())
a_server_sign = a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
a_list = torch.chunk(a_compressed, chunks=dist.get_world_size())
server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list]
a_sign_list = torch.chunk(a_server_sign, dist.get_world_size())
a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())])
rank = dist.get_rank()
server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank]
get_accelerator().synchronize()
dist.barrier()
return a_server_compressed, worker_error, server_error
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
a_torch, worker_error_torch, server_error_torch = torch_sim(a)
get_accelerator().empty_cache()
a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank)
threshold = 1e-6
magnitude_threshold = 1e-6
diff_mask = (a_after - a_torch) > threshold
diff_server_mask = torch.chunk(diff_mask, size)[rank]
mpi_server = torch.chunk(a_after, size)[rank] + server_error
torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch
test_correctness = True
# If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic
# The test would skip those numbers that are too small in compensated_server_m
if test_correctness:
if torch.sum(diff_server_mask) == 0:
print('Successfully passed the test for NCCL Backend at Rank {}'.format(rank))
else:
check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold
if torch.sum(check_mag_mask) == 0:
print('Successfully passed the test for NCCL Backend at Rank {}'.format(rank))
else:
print('Fails at {} of positions'.format(torch.sum(check_mag_mask)))
| 3,515 | 36.404255 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/onebit/test_mpi_backend.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from mpi4py import MPI
import torch
import deepspeed.comm as dist
import numpy as np
import deepspeed
from deepspeed.runtime.comm.mpi import MpiBackend
from deepspeed.accelerator import get_accelerator
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
# Change cuda_aware to True to test out CUDA-Aware MPI communication
backend = MpiBackend(cuda_aware=False)
local_rank = rank % get_accelerator().device_count()
device = torch.device(get_accelerator().device_name(), local_rank)
# A simulated compression function using deepspeed.comm
def torch_sim(a):
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
a_sign = None
worker_error = a - a_compressed
dist.all_reduce(a_compressed)
a_compressed.mul_(1 / dist.get_world_size())
a_server_sign = a_compressed.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
a_list = torch.chunk(a_compressed, chunks=dist.get_world_size())
server_scale = [chunk_a.norm() / np.sqrt(chunk_a.numel()) for chunk_a in a_list]
a_sign_list = torch.chunk(a_server_sign, dist.get_world_size())
a_server_compressed = torch.cat([server_scale[i] * a_sign_list[i] for i in range(dist.get_world_size())])
rank = dist.get_rank()
server_error = a_list[rank] - server_scale[rank] * a_sign_list[rank]
get_accelerator().synchronize()
dist.barrier()
return a_server_compressed, worker_error, server_error
tensor_size = 100 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
a_torch, worker_error_torch, server_error_torch = torch_sim(a)
get_accelerator().empty_cache()
a_after = backend.compressed_allreduce(a, worker_error, server_error, local_rank)
threshold = 1e-6
magnitude_threshold = 1e-6
diff_mask = (a_after - a_torch) > threshold
diff_server_mask = torch.chunk(diff_mask, size)[rank]
mpi_server = torch.chunk(a_after, size)[rank] + server_error
torch_server = torch.chunk(a_torch, size)[rank] + server_error_torch
test_correctness = True
# If the number in the compensated_server_m is too small (e.g 1e-8), then calling sign() might be problematic
# The test would skip those numbers that are too small in compensated_server_m
if test_correctness:
if torch.sum(diff_server_mask) == 0:
print('Successfully passed the test for MPI Backend at Rank {}'.format(rank))
else:
check_mag_mask = mpi_server[diff_server_mask] > magnitude_threshold
if torch.sum(check_mag_mask) == 0:
print('Successfully passed the test for MPI Backend at Rank {}'.format(rank))
else:
print('Fails at {} of positions'.format(torch.sum(check_mag_mask)))
| 3,412 | 37.348315 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/onebit/test_nccl_perf.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
import deepspeed.comm as dist
import numpy as np
import argparse
import deepspeed
import os
from deepspeed.runtime.comm.nccl import NcclBackend
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.accelerator import get_accelerator
from statistics import mean
timers = SynchronizedWallClockTimer()
parser = argparse.ArgumentParser()
parser.add_argument('--local_rank', type=int, default=-1)
args = parser.parse_args()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
args.local_rank = int(os.environ['LOCAL_RANK'])
get_accelerator().set_device(args.local_rank)
device = torch.device(get_accelerator().device_name(), args.local_rank)
size = dist.get_world_size()
rank = dist.get_rank()
backend = NcclBackend()
local_rank = args.local_rank
# Setting tensor_size (BERT-Large)
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
warmup = 10
iters = 10
# Warmup
for i in range(warmup):
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
time_list = []
a_sign = a.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)
scale = a.norm() / np.sqrt(a.numel())
a_compressed = scale * a_sign
print("Shape of the compressed buffer:", a_compressed.shape) if rank == 0 else None
for i in range(iters):
timers('compressed_allreduce').start()
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
#deepspeed.comm.all_reduce(a_compressed)
timers('compressed_allreduce').stop()
time_list.append(timers('compressed_allreduce').elapsed())
#timer_names = ['compressed_allreduce']
#timers.log(names=timer_names, normalizer=1, memory_breakdown=None)
places = 2
convert = 1e3
float_size = 4
if rank == 0:
for i in range(iters):
lat = time_list[i]
print("latency = ", lat * convert)
minlat = round(min(time_list) * convert)
maxlat = round(max(time_list) * convert)
meanlat = round(mean(time_list) * convert, places)
print("min, max, and mean = {} ms, {} ms, {} ms".format(minlat, maxlat, meanlat)) if rank == 0 else None
#print("tensor shape", a.shape)
duration = meanlat / 1e3
tput = ((tensor_size * 4) / duration)
print("algo throughput: %f Bytes/s, %f GB/s" % (tput, tput / 1e9)) if rank == 0 else None
size = tensor_size * 4
n = dist.get_world_size()
busbw = (size / duration) * (2 * (n - 1) / n)
print("busbw: %f GB/s" % (busbw / 1e9)) if rank == 0 else None
| 3,065 | 30.285714 | 104 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/onebit/test_mpi_perf.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from mpi4py import MPI
import torch
import deepspeed
from deepspeed.runtime.comm.mpi import MpiBackend
# Configure wall clock timer
from deepspeed.utils.timer import SynchronizedWallClockTimer
from deepspeed.accelerator import get_accelerator
from statistics import mean
timers = SynchronizedWallClockTimer()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
deepspeed.init_distributed(dist_backend=get_accelerator().communication_backend_name())
# Change cuda_aware to True to test out CUDA-Aware MPI communication
backend = MpiBackend(cuda_aware=False)
local_rank = rank % get_accelerator().device_count()
device = torch.device(get_accelerator().device_name(), local_rank)
tensor_size = 300 * 2**20
server_size = int(tensor_size / size)
if tensor_size % (8 * size) != 0:
right_tensor_size = tensor_size + (8 * size - (tensor_size % (8 * size)))
else:
right_tensor_size = tensor_size
right_server_size = right_tensor_size // size
# Adding bias to the initialization of the gradient we are communicating
# In order to get rid of the case where some elements in the gradient are too small
a = (torch.rand(tensor_size, device=device) - 0.5) + 0.01 * rank
worker_error = torch.zeros(right_tensor_size, device=device)
server_error = torch.zeros(right_server_size, device=device)
warmup = 10
iters = 10
# Warmup
for i in range(warmup):
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
time_list = []
for i in range(iters):
timers('compressed_allreduce').start()
backend.compressed_allreduce(a, worker_error, server_error, local_rank)
timers('compressed_allreduce').stop()
time_list.append(timers('compressed_allreduce').elapsed())
timer_names = ['compressed_allreduce']
timers.log(names=timer_names, normalizer=1, memory_breakdown=None)
places = 2
convert = 1e3
float_size = 4
if rank == 0:
for i in range(iters):
lat = time_list[i]
print("latency = ", lat * convert)
minlat = round(min(time_list) * convert)
maxlat = round(max(time_list) * convert)
meanlat = round(mean(time_list) * convert, places)
print("min, max, and mean = {} ms, {} ms, {} ms".format(minlat, maxlat, meanlat))
| 2,281 | 28.636364 | 87 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/lightning/test_simple.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import torch
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.strategies import DeepSpeedStrategy
from torch.utils.data import DataLoader, Dataset
class RandomDataset(Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class BoringModel(LightningModule):
def __init__(self):
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
return self.layer(x)
def training_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("train_loss", loss)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("valid_loss", loss)
def test_step(self, batch, batch_idx):
loss = self(batch).sum()
self.log("test_loss", loss)
def configure_optimizers(self):
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
def train_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def val_dataloader(self):
return DataLoader(RandomDataset(32, 64), batch_size=2)
def test_lightning_model():
"""Test that DeepSpeed works with a simple LightningModule and LightningDataModule."""
model = BoringModel()
trainer = Trainer(strategy=DeepSpeedStrategy(), max_epochs=1, precision=16, accelerator="gpu", devices=1)
trainer.fit(model)
| 1,673 | 25.571429 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/run_sanity_check.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import sys
import unittest
sys.path.append('../DeepSpeedExamples/Megatron_GPT2')
sys.path.append('../DeepSpeedExamples/BingBertSquad')
# Import the test cases here.
import Megatron_GPT2
import BingBertSquad
def pytest_hack(runner_result):
'''This is an ugly hack to get the unittest suites to play nicely with
pytest. Otherwise failed tests are not reported by pytest for some reason.
Long-term, these model tests should be adapted to pytest.
'''
if not runner_result.wasSuccessful():
print('SUITE UNSUCCESSFUL:', file=sys.stderr)
for fails in runner_result.failures:
print(fails, file=sys.stderr)
assert runner_result.wasSuccessful() # fail the test
def test_megatron():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(Megatron_GPT2.suite()))
def test_megatron_checkpoint():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(Megatron_GPT2.checkpoint_suite()))
def test_squad():
runner = unittest.TextTestRunner(failfast=True)
pytest_hack(runner.run(BingBertSquad.suite()))
| 1,298 | 27.23913 | 83 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/test_common.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import unittest
import subprocess
import os
import time
class BaseTestCase(unittest.TestCase):
def __init__(self, methodName="DeepSpeed performance test"):
super(BaseTestCase, self).__init__(methodName)
self.test_dir = "./test"
self.baseline_dir = "./baseline"
self.timestr = time.strftime("%Y%m%d-%H%M%S")
def gen_output_name(self, test_config, prefix, baseline_config=False):
other_args = test_config["other_args"] if "other_args" in test_config else ""
zero_args = "_zero" if "zero" in test_config and test_config["zero"] else ""
other_args = other_args.strip(' -\\').replace(" ", "").replace("\"", "")
if other_args:
other_args = "_" + other_args
if test_config["deepspeed"] and not baseline_config:
file_name = "_mp{0}_gpu{1}_node{2}_bs{3}_step{4}_layer{5}_hidden{6}_seq{7}_head{8}{9}_ds{10}-{11}.log".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
other_args, zero_args, self.timestr)
save_dir = self.test_dir
else:
file_name = "_mp{0}_gpu{1}_node{2}_bs{3}_step{4}_layer{5}_hidden{6}_seq{7}_head{8}{9}.log".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
other_args)
save_dir = self.baseline_dir
return os.path.join(save_dir, prefix + file_name)
def ensure_directory_exists(self, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def clean_test_env(self):
cmd = "dlts_ssh pkill -9 -f /usr/bin/python"
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
time.sleep(20)
def run_gpt2_test(self, test_config, output):
ds_flag = "-d " + test_config["json"] if test_config["deepspeed"] else ""
ckpt_num = test_config["ckpt_num_layers"] if "ckpt_num_layers" in test_config else 1
other_args = "-o " + test_config["other_args"] if "other_args" in test_config else ""
cmd = "./ds_gpt2_test.sh -m {0} -g {1} -n {2} -b {3} -s {4} -l {5} -h {6} -q {7} -e {8} -c {9} {10} {11}".format(
test_config["mp"], test_config["gpus"], test_config["nodes"], test_config["bs"], test_config["steps"],
test_config["layers"], test_config["hidden_size"], test_config["seq_length"], test_config["heads"],
ckpt_num, other_args, ds_flag)
self.ensure_directory_exists(output)
with open(output, "w") as f:
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash', stdout=f, stderr=f)
| 3,124 | 44.955882 | 122 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/run_checkpoint_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import subprocess
import os
import re
from .test_common import BaseTestCase
LAYERS = 2
HIDDEN_SIZE = 128
ATTN_HEADS = 8
def remove_file(test_id, filename):
cmd = f"if [ -f {filename} ] ; then rm -v {filename}; fi"
print(f"{test_id} cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "validation loss at the end of training for test data | LM loss:"
match_number = re.compile(r'LM loss: ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class GPT2CheckpointTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on GPT2 model"):
super(GPT2CheckpointTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_mp2_gpu4_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu8_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu1_node1_with_zero2_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 1,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu1_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_load_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp1_gpu2_gpu4_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero1(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero1",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero1",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_load_gpu2_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"load_gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu4_gpu2_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero2(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero2",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu2_load_gpu4_node1_with_zero2_offload(self):
test_config = {
"mp": 2,
"gpus": 2,
"load_gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"tag": "ds_zero2_offload",
"zero": True,
"other_args": "",
"checkpoint_name": "ckpt_mp2_gpu2_gpu4_w_zero2_offload",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_without_zero(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1100,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": 256,
"heads": ATTN_HEADS,
"deepspeed": True,
"zero": False,
"other_args": "",
"tag": "ds_without_zero",
"checkpoint_name": "ckpt_mp4_gpu16_wo_zero",
"checkpoint_interval": 1000,
"json": "ds_config_func_bs8_no_zero.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def gen_name(self, test_config, prefix):
save_dir = "checkpoint_test_logs"
tag = test_config["tag"]
checkpoint_name = test_config["checkpoint_name"]
file_name = f"_{tag}_{checkpoint_name}.log"
return os.path.join(save_dir, prefix + file_name)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
# Cache save and load gpu counts
save_gpus = test_config["gpus"]
if "load_gpus" in test_config:
load_gpus = test_config["load_gpus"]
del test_config["load_gpus"]
else:
load_gpus = test_config["gpus"]
# save to current directory.
checkpoint_folder = test_config["checkpoint_name"]
checkpoint_interval = test_config["checkpoint_interval"]
checkpoint_name = test_config["checkpoint_name"]
#---------------remove old checkpoint---------------#
try:
cmd = f"rm -rf {checkpoint_name}"
print(f"{self.id()} cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print("No old checkpoint")
if "cpu_optimizer" in test_config and test_config["cpu_optimizer"]:
cpu_optimizer_flag = " --cpu-optimizer"
else:
cpu_optimizer_flag = ""
#-----------------Saving Checkpoint-----------------#
# building checkpoint arguments
test_config[
"other_args"] = f"\"--save {checkpoint_folder} --save-interval {checkpoint_interval} {cpu_optimizer_flag}\""
prefix = "gpt2_saving_checkpoint"
# create checkpoint run...
base_file = self.gen_name(test_config, prefix)
# remove previous test log
try:
cmd = f"rm {base_file}"
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} No old logs")
print("{0}: Run for saving checkpoint".format(self.id()))
self.run_gpt2_test(test_config, base_file)
#-----------------Loading Checkpoint-----------------#
# building checkpoint arguments
test_config["other_args"] = f"\"--load {checkpoint_folder} {cpu_optimizer_flag} \""
# set checkpoint load iteration
try:
cmd = f"echo {checkpoint_interval} > {checkpoint_name}/latest_checkpointed_iteration.txt"
print(f"{self.id()} running cmd: {cmd}")
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} Failed to update the checkpoint iteration file")
return False
prefix = "gpt2_loading_checkpoint"
# set load gpus
test_config["gpus"] = load_gpus
print("{0}: Second run loading checkpoint and continuing.".format(self.id()))
test_file = self.gen_name(test_config, prefix)
# remove previous test log
try:
cmd = f"rm {test_file}"
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
except:
print(f"{self.id()} no previous logs for")
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def checkpoint_suite():
suite = unittest.TestSuite()
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_with_zero2_offload'))
# Shrink DP
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu1_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_load_gpu2_node1_with_zero2_offload'))
# Expand DP
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp1_gpu2_load_gpu4_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero1'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero2'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu2_load_gpu4_node1_with_zero2_offload'))
suite.addTest(GPT2CheckpointTestCase('test_mp2_gpu4_node1_without_zero'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(checkpoint_suite())
| 18,755 | 31.732984 | 120 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/run_perf_baseline.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import re
from test_common import BaseTestCase
class GPT2PerfBaselineTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed performance test on GPT2 model"):
super(GPT2PerfBaselineTestCase, self).__init__(methodName)
def test_perf_1_5B(self):
test_config = {
"mp": 2,
"gpus": 16,
"nodes": 4,
"bs": 16,
"steps": 100,
"layers": 48,
"hidden_size": 1600,
"seq_length": 1024,
"heads": 16,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_4B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 64,
"hidden_size": 2304,
"seq_length": 1024,
"heads": 16,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_8B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 72,
"hidden_size": 3072,
"seq_length": 1024,
"heads": 24,
"deepspeed": False,
}
self.run_test(test_config)
def test_perf_20B(self):
test_config = {
"mp": 16,
"gpus": 16,
"nodes": 4,
"bs": 4,
"steps": 50,
"layers": 111,
"hidden_size": 3808,
"seq_length": 1024,
"heads": 32,
"ckpt_num_layers": 1,
"deepspeed": False,
}
self.run_test(test_config)
def run_test(self, test_config):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_perf"
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
exec_time = self.grep_latency_from_file(test_file)
if exec_time == 0.0:
print("{0}: no latency found in file {1}".format(self.id(), test_file))
else:
print("{0}: execution time per iteration is {1}ms.".format(self.id(), exec_time))
def grep_latency_from_file(self, file_name):
latency = 0.0
count = 0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "elapsed time per iteration"
match_number = re.compile(r'elapsed time per iteration \(ms\): ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
ms_per_iter = re.findall(match_number, line)
latency += float(ms_per_iter[0])
count += 1
if count > 0:
latency /= count
return latency
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2PerfBaselineTestCase('test_perf_1_5B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_4B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_8B'))
suite.addTest(GPT2PerfBaselineTestCase('test_perf_20B'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
| 3,524 | 25.908397 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/run_perf_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import re
from test_common import BaseTestCase
class GPT2PerfTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed performance test on GPT2 model"):
super(GPT2PerfTestCase, self).__init__(methodName)
def test_perf_1_5B(self):
test_config = {
"mp": 1,
"gpus": 16,
"nodes": 4,
"bs": 32,
"steps": 100,
"layers": 48,
"hidden_size": 1600,
"seq_length": 1024,
"heads": 16,
"deepspeed": True,
"json": "ds_config_perf_bs32.json",
}
self.run_test(test_config)
def test_perf_4B(self):
test_config = {
"mp": 1,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 100,
"layers": 64,
"hidden_size": 2304,
"seq_length": 1024,
"heads": 16,
"deepspeed": True,
"json": "ds_config_perf_bs8.json",
}
self.run_test(test_config)
def test_perf_8B(self):
test_config = {
"mp": 2,
"gpus": 16,
"nodes": 4,
"bs": 16,
"steps": 100,
"layers": 72,
"hidden_size": 3072,
"seq_length": 1024,
"heads": 24,
"deepspeed": True,
"json": "ds_config_perf_bs16.json",
}
self.run_test(test_config)
def test_perf_20B(self):
test_config = {
"mp": 4,
"gpus": 16,
"nodes": 4,
"bs": 8,
"steps": 50,
"layers": 111,
"hidden_size": 3808,
"seq_length": 1024,
"heads": 32,
"ckpt_num_layers": 1,
"deepspeed": True,
"json": "ds_config_perf_bs8.json",
}
self.run_test(test_config)
def run_test(self, test_config):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_perf"
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
exec_time = self.grep_latency_from_file(test_file)
if exec_time == 0.0:
print("{0}: no latency found in file {1}".format(self.id(), test_file))
else:
print("{0}: execution time per iteration is {1}ms.".format(self.id(), exec_time))
def grep_latency_from_file(self, file_name):
latency = 0.0
count = 0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "elapsed time per iteration"
match_number = re.compile(r'elapsed time per iteration \(ms\): ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
ms_per_iter = re.findall(match_number, line)
latency += float(ms_per_iter[0])
count += 1
if count > 0:
latency /= count
return latency
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2PerfTestCase('test_perf_1_5B'))
suite.addTest(GPT2PerfTestCase('test_perf_4B'))
suite.addTest(GPT2PerfTestCase('test_perf_8B'))
suite.addTest(GPT2PerfTestCase('test_perf_20B'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
| 3,662 | 26.133333 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
from .run_func_test import GPT2FuncTestCase
from .run_checkpoint_test import GPT2CheckpointTestCase, checkpoint_suite
from .run_func_test import suite
| 339 | 27.333333 | 83 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/Megatron_GPT2/run_func_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import os
import re
from .test_common import BaseTestCase
LAYERS = 2
HIDDEN_SIZE = 128
ATTN_HEADS = 8
SEQ_LEN = 64
MASTER_PORT = 29700
def grep_loss_from_file(file_name):
loss = 0.0
print(f'grepping {file_name}')
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "validation loss at the end of training for test data | LM loss:"
match_number = re.compile(r'LM loss: ([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class GPT2FuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on GPT2 model"):
super(GPT2FuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_mp1_gpu2_node1_fp16(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_no_zero.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero1(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero1(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero1(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero1(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero1.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2.json",
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2_ds_offload(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.02)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2_ds_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
succ = self.run_test(test_config, 0.02)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_gas(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": True,
"json": "ds_config_func_bs8_zero2_gas3.json",
"baseline": "ds_config_func_bs8_zero0_gas3.json",
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
succ = self.run_partition_activations_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_ds_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.02)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.02)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2_ds_offload(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.02)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.02)
self.assertTrue(succ)
def test_mp1_gpu1_node1_zero2_torch_offload(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs4_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp1_gpu2_node1_zero2_torch_offload(self):
test_config = {
"mp": 1,
"gpus": 2,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_mp2_gpu4_node1_zero2_torch_offload(self):
test_config = {
"mp": 2,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
self.assertTrue(succ)
def test_mp4_gpu4_node1_zero2_torch_offload(self):
test_config = {
"mp": 4,
"gpus": 4,
"nodes": 1,
"bs": 8,
"steps": 1000,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_bs8_zero2_offload.json",
"cpu_optimizer": True,
"test_torch_offload": True,
}
basic_run_config = test_config
succ = self.run_test(basic_run_config, 0.01)
self.assertTrue(succ)
partition_activation_config = test_config
succ = self.run_partition_activations_test(partition_activation_config, 0.01)
def test_optimizer_scheduler(self):
test_config = {
"mp": 1,
"gpus": 1,
"nodes": 1,
"bs": 4,
"steps": 20,
"layers": LAYERS,
"hidden_size": HIDDEN_SIZE,
"seq_length": SEQ_LEN,
"heads": ATTN_HEADS,
"deepspeed": False,
"json": "ds_config_func_scheduler.json",
}
succ = self.run_test(test_config, 0.01)
# assure no crash.
self.assertTrue(True)
def run_partition_activations_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
baseline_prefix = "gpt2_func_"
prefix = "gpt2_partition_activation_"
deepspeed_config = test_config["json"]
baseline_deepspeed_config = False
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, True)
# baseline run...
# turnoff deepspeed if baseline deepspeed config
# is not provided
if not "baseline" in test_config:
test_config["deepspeed"] = False
else:
test_config["json"] = test_config["baseline"]
baseline_prefix += test_config["json"][0:-5]
baseline_deepspeed_config = True
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
base_file = self.gen_output_name(test_config, baseline_prefix, baseline_config=baseline_deepspeed_config)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, False)
test_config["other_args"] = f"\"--deepspeed-activation-checkpointing {cpu_optimizer_flag}\""
test_config["json"] = deepspeed_config
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "gpt2_func"
baseline_prefix = prefix
deepspeed_config = test_config["json"]
baseline_deepspeed_config = False
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, True)
# baseline run...
# turn off deepspeed if a baseline deepspeed config
# is not provided
if not "baseline" in test_config:
test_config["deepspeed"] = False
else:
test_config["json"] = test_config["baseline"]
baseline_prefix = prefix + test_config["json"][0:-5]
baseline_deepspeed_config = True
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
# baseline run...
base_file = self.gen_output_name(test_config, baseline_prefix, baseline_config=baseline_deepspeed_config)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_gpt2_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
cpu_optimizer_flag = self.gen_cpu_optimizer_flag(test_config, False)
test_config["other_args"] = f"\"{cpu_optimizer_flag}\""
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_gpt2_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def gen_cpu_optimizer_flag(self, test_config, is_baseline):
if 'cpu_optimizer' in test_config and test_config['cpu_optimizer']:
cpu_optimizer_flag = "--cpu-optimizer"
if is_baseline:
cpu_optimizer_flag += " --cpu_torch_adam"
return cpu_optimizer_flag
if 'test_torch_offload' in test_config and test_config['test_torch_offload']:
cpu_optimizer_flag += " --cpu_torch_adam"
return cpu_optimizer_flag
else:
cpu_optimizer_flag = ""
return cpu_optimizer_flag
def suite():
suite = unittest.TestSuite()
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_fp16'))
# Baseline = Megatron + Torch.Optim.Adam
# Test = Megatron + Torch.Optim.Adam + ZeRO-Offload
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_torch_offload'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2_torch_offload'))
# Baseline = Megatron + Torch.Optim.Adam
# Test = Megatron + DeepSpeedAdam + ZeRO-Offload
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2_ds_offload'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero1'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu1_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp1_gpu2_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp4_gpu4_node1_zero2'))
suite.addTest(GPT2FuncTestCase('test_mp2_gpu4_node1_zero2_gas'))
suite.addTest(GPT2FuncTestCase('test_optimizer_scheduler'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
| 19,096 | 30.61755 | 113 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/BingBertSquad/BingBertSquad_run_func_test.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Note: please copy webtext data to "Megatron-LM" folder, before running this script.
"""
import unittest
import os
import re
from .BingBertSquad_test_common import BaseTestCase
def grep_loss_from_file(file_name):
loss = 0.0
with open(file_name, 'r') as f:
lines = f.readlines()
line_filter = "bert_squad_progress: step="
match_number = re.compile(r'loss=([-+]?[0-9]+\.?[0-9]*(?:[Ee][-+]?[0-9]+)?)')
for line in lines:
if line_filter in line:
loss = re.findall(match_number, line)
loss = float(loss[0])
if loss == 0.0:
print("no loss found in file ", file_name)
return loss
class BingBertSquadFuncTestCase(BaseTestCase):
def __init__(self, methodName="DeepSpeed function test on BingBertSquad model"):
super(BingBertSquadFuncTestCase, self).__init__(methodName)
def setUp(self):
self.save_dir = os.getcwd()
new_dir = os.path.dirname(__file__)
if new_dir:
os.chdir(new_dir)
def tearDown(self):
os.chdir(self.save_dir)
def test_gpu4_fp16(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu4_fp16_zero2(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_zero2_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp16(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp16_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--fp16 --print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu4_fp32(self):
test_config = {
"gpus": 4,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def test_gpu1_fp32(self):
test_config = {
"gpus": 1,
"deepspeed": False,
"json": "deepspeed_bsz24_fp32_config.json",
"max_steps": 8,
"max_epoch_steps": 4,
"other_args": "--print_steps 1"
}
succ = self.run_test(test_config, 0.01)
self.assertTrue(succ)
def run_test(self, test_config, r_tol):
print("\n")
print("{0}: starting......".format(self.id()))
prefix = "BingBertSquad_func"
test_config['other_args'] += f" --max_steps {test_config['max_steps']}"
test_config['other_args'] += f" --max_steps_per_epoch {test_config['max_epoch_steps']}"
# baseline run...
test_config["deepspeed"] = False
base_file = self.gen_output_name(test_config, prefix)
# skip baseline run if it exists.
if not self.has_loss_data(base_file):
print("{0}: baseline run.".format(self.id()))
self.run_BingBertSquad_test(test_config, base_file)
else:
print("{0}: baseline exists.".format(self.id()))
# DeepSpeed run...
test_config["deepspeed"] = True
print("{0}: DeepSpeed run.".format(self.id()))
test_file = self.gen_output_name(test_config, prefix)
self.run_BingBertSquad_test(test_config, test_file)
return self.check_parity(base_file, test_file, r_tol)
def has_loss_data(self, file_name):
has_loss = False
if os.path.exists(file_name):
loss = grep_loss_from_file(file_name)
if loss != 0.0:
has_loss = True
return has_loss
def check_parity(self, base_file, test_file, r_tol):
base_loss = grep_loss_from_file(base_file)
test_loss = grep_loss_from_file(test_file)
print("baseline loss: {0}, test loss: {1}".format(base_loss, test_loss))
if base_loss == 0.0 or test_loss == 0.0:
return False
if abs((base_loss - test_loss) / base_loss) > r_tol:
return False
return True
def suite():
suite = unittest.TestSuite()
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp16_zero2'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp16'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu4_fp32'))
suite.addTest(BingBertSquadFuncTestCase('test_gpu1_fp32'))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
| 5,243 | 28.460674 | 95 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/BingBertSquad/BingBertSquad_test_common.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import unittest
import subprocess
import os
import time
class BaseTestCase(unittest.TestCase):
def __init__(self, methodName="DeepSpeed performance test"):
super(BaseTestCase, self).__init__(methodName)
self.test_dir = "./test"
self.baseline_dir = "./baseline"
self.timestr = time.strftime("%Y%m%d-%H%M%S")
def gen_output_name(self, test_config, prefix):
other_args = test_config["other_args"] if "other_args" in test_config else ""
zero_args = "_zero" if "zero" in test_config and test_config["zero"] else ""
other_args = other_args.strip(' -\\').replace(" ", "").replace("\"", "")
if other_args:
other_args = "_" + other_args
if test_config["deepspeed"]:
file_name = "_gpu{0}_{1}_ds{2}-{3}.log".format(test_config["gpus"], other_args, zero_args, self.timestr)
save_dir = self.test_dir
else:
file_name = "_gpu{0}_{1}.log".format(test_config["gpus"], other_args)
save_dir = self.baseline_dir
return os.path.join(save_dir, prefix + file_name)
def ensure_directory_exists(self, filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def clean_test_env(self):
cmd = "dlts_ssh pkill -9 -f /usr/bin/python"
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash')
time.sleep(20)
def run_BingBertSquad_test(self, test_config, output):
ds_flag = " -d --deepspeed_config " + test_config["json"] if test_config["deepspeed"] else " "
other_args = " " + test_config["other_args"] if "other_args" in test_config else " "
cmd = "./run_BingBertSquad_sanity.sh -e 1 -g {0} {1} {2}".format(test_config["gpus"], other_args, ds_flag)
self.ensure_directory_exists(output)
with open(output, "w") as f:
print(cmd)
subprocess.run(cmd, shell=True, check=False, executable='/bin/bash', stdout=f, stderr=f)
| 2,150 | 36.086207 | 116 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/BingBertSquad/__init__.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from .BingBertSquad_run_func_test import BingBertSquadFuncTestCase
from .BingBertSquad_run_func_test import suite
| 210 | 25.375 | 66 |
py
|
DeepSpeed
|
DeepSpeed-master/tests/model/BingBertSquad/test_e2e_squad.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
import subprocess as sp
import os
from math import isclose
import sys
import pytest
import json
sys.path.append("../../../DeepSpeedExamples/BingBertSquad")
import evaluate as eval
squad_dir = "/data/BingBertSquad"
base_dir = "../../../DeepSpeedExamples/BingBertSquad"
script_file_name = "run_squad_deepspeed.sh"
model_file_name = "training_state_checkpoint_162.tar"
eval_file_name = "dev-v1.1.json"
pred_file_name = "predictions.json"
num_gpus = "4"
timeout_sec = 5 * 60 * 60 # 5 hours
eval_version = "1.1"
def create_config_file(tmpdir, zeroenabled=False):
config_dict = {
"train_batch_size": 24,
"train_micro_batch_size_per_gpu": 6,
"steps_per_print": 10,
"optimizer": {
"type": "Adam",
"params": {
"lr": 3e-5,
"weight_decay": 0.0,
"bias_correction": False
}
},
"gradient_clipping": 1.0,
"fp16": {
"enabled": True
}
}
config_dict["zero_optimization"] = zeroenabled
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
def test_e2e_squad_deepspeed_base(tmpdir):
config_file = create_config_file(tmpdir)
# base run results => {"exact_match": 83.9829706717124, "f1": 90.71138132004097}
expected_exact_match = 83.98
expected_f1 = 90.71
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen(["bash", script_file_name, num_gpus, model_file, squad_dir, output_dir, config_file], cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"], expected_exact_match, abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
def test_e2e_squad_deepspeed_zero(tmpdir):
config_file = create_config_file(tmpdir, True)
# base run results => {"exact_match": 84.1438032166509, "f1": 90.89776136505441}
expected_exact_match = 84.14
expected_f1 = 90.89
model_file = os.path.join(squad_dir, model_file_name)
eval_file = os.path.join(squad_dir, eval_file_name)
output_dir = os.path.join(tmpdir, "output")
pred_file = os.path.join(output_dir, pred_file_name)
proc = sp.Popen(["bash", script_file_name, num_gpus, model_file, squad_dir, output_dir, config_file], cwd=base_dir)
try:
proc.communicate(timeout=timeout_sec)
if os.path.exists(pred_file):
eval_result = eval.evaluate(eval_version, eval_file, pred_file)
print("evaluation result: ", json.dumps(eval_result))
assert isclose(eval_result["exact_match"], expected_exact_match, abs_tol=1e-2)
assert isclose(eval_result["f1"], expected_f1, abs_tol=1e-2)
else:
pytest.fail("Error: Run Failed")
except sp.TimeoutExpired:
proc.kill()
pytest.fail("Error: Timeout")
except sp.CalledProcessError:
pytest.fail("Error: Run Failed")
| 3,710 | 28.452381 | 119 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/ds_aio_handle.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
import os
import time
from multiprocessing import Pool, Barrier
from test_ds_aio_utils import report_results, task_log, task_barrier
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
def pre_handle(args, tid, read_op):
io_string = "Read" if read_op else "Write"
num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size
file = args.read_file if read_op else f'{args.write_file}.{tid}'
io_parallel = args.io_parallel if args.io_parallel else 1
handle = AsyncIOBuilder().load().aio_handle(args.block_size, args.queue_depth, args.single_submit,
args.overlap_events, io_parallel)
task_log(tid, f'Created deepspeed aio handle')
if args.gpu:
buffer = torch.empty(num_bytes, dtype=torch.uint8, device=get_accelerator().device_name())
else:
if args.use_accelerator_pin_memory:
buffer = get_accelerator().pin_memory(torch.empty(num_bytes, dtype=torch.uint8, device='cpu'))
else:
buffer = handle.new_cpu_locked_tensor(num_bytes, torch.empty(0, dtype=torch.uint8))
task_log(tid, f'Allocate tensor of size {num_bytes} bytes')
ctxt = {}
ctxt['file'] = file
ctxt['num_bytes'] = num_bytes
ctxt['handle'] = handle
ctxt['buffer'] = buffer
ctxt['elapsed_sec'] = 0
task_log(tid, f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}')
return ctxt
def pre_handle_read(pool_params):
args, tid = pool_params
ctxt = pre_handle(args, tid, True)
return ctxt
def pre_handle_write(pool_params):
args, tid = pool_params
ctxt = pre_handle(args, tid, False)
return ctxt
def post_handle(pool_params):
_, _, ctxt = pool_params
ctxt["buffer"].detach()
ctxt["buffer"] = None
return ctxt
def main_parallel_read(pool_params):
args, tid, ctxt = pool_params
handle = ctxt['handle']
start_time = time.time()
ret = handle.pread(ctxt['buffer'], ctxt['file'], args.validate, True)
assert ret != -1
handle.wait()
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_parallel_write(pool_params):
args, tid, ctxt = pool_params
handle = ctxt['handle']
start_time = time.time()
ret = handle.pwrite(ctxt['buffer'], ctxt['file'], args.validate, True)
assert ret != -1
handle.wait()
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_handle_read(pool_parms):
args, tid, ctxt = pool_parms
handle = ctxt['handle']
start_time = time.time()
ret = handle.read(ctxt['buffer'], ctxt['file'], args.validate)
assert ret != -1
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_handle_write(pool_parms):
args, tid, ctxt = pool_parms
handle = ctxt['handle']
start_time = time.time()
ret = handle.write(ctxt['buffer'], ctxt['file'], args.validate)
assert ret != -1
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def get_schedule(args, read_op):
schedule = {}
if read_op:
schedule['pre'] = pre_handle_read
schedule['post'] = post_handle
schedule['main'] = main_parallel_read if args.io_parallel else main_handle_read
else:
schedule['pre'] = pre_handle_write
schedule['post'] = post_handle
schedule['main'] = main_parallel_write if args.io_parallel else main_handle_write
return schedule
def _aio_handle_tasklet(pool_params):
args, tid, read_op = pool_params
# Create schedule
schedule = get_schedule(args, read_op)
task_log(tid, f'schedule = {schedule}')
task_barrier(aio_barrier, args.threads)
# Run pre task
task_log(tid, f'running pre-task')
ctxt = schedule["pre"]((args, tid))
task_barrier(aio_barrier, args.threads)
# Run main tasks in a loop
ctxt["main_task_sec"] = 0
for i in range(args.loops):
task_log(tid, f'running main task {i}')
start_time = time.time()
ctxt = schedule["main"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
stop_time = time.time()
ctxt["main_task_sec"] += stop_time - start_time
# Run post task
task_log(tid, f'running post-task')
ctxt = schedule["post"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops
def _init_tasklet(b):
global aio_barrier
aio_barrier = b
def aio_handle_multiprocessing(args, read_op):
b = Barrier(args.threads)
pool_params = [(args, p, read_op) for p in range(args.threads)]
with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
pool_results = p.map(_aio_handle_tasklet, pool_params)
report_results(args, read_op, pool_results)
| 5,191 | 28.168539 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/aio_bench_perf_sweep.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import sys
import argparse
import json
import itertools
import subprocess
import shutil
from test_ds_aio_utils import refine_integer_value
from perf_sweep_utils import READ_OP_DESC, WRITE_OP_DESC, BENCH_LOG_DIR, \
READ_IO_DIR, WRITE_IO_DIR, READ_LOG_DIR, WRITE_LOG_DIR
from deepspeed.ops.op_builder import AsyncIOBuilder
OTHER_OPTIONS = '--handle'
PERF_SCRIPT = 'test_ds_aio.py'
DEFAULT_SWEEP_CONFIG = {
"block_size": ["128K", "256K"],
"queue_depth": [4, 16, 32],
"overlap_events": [True, False],
"io_parallel": [2, 8],
"single_submit": [False]
}
class Job(object):
def __init__(self, cmd_line, output_file=None, work_dir=None):
self.cmd_line = cmd_line
self.output_file = output_file
self.work_dir = work_dir
self.output_fd = None
def cmd(self):
return self.cmd_line
def get_stdout(self):
return self.output_fd
def get_stderr(self):
return self.output_fd
def get_cwd(self):
return self.work_dir
def open_output_file(self):
if self.output_file is not None:
self.output_fd = open(self.output_file, 'w')
def close_output_file(self):
if self.output_fd is not None:
self.output_fd.close()
self.output_fd = None
class SweepConfig(object):
def __init__(self, args):
self.nvme_dir = args.nvme_dir
self.io_size = args.io_size
self.search_space = get_sweep_config_dict(args.sweep_config)
self.read = not args.no_read
self.write = not args.no_write
self.flush_cache = not args.no_sudo
self.log_dir = args.log_dir
self.loops = args.loops
self.other_options = f'{OTHER_OPTIONS} --loops {args.loops}'
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--nvme_dir',
required=True,
type=str,
help='Directory in which to perform I/O tests. A writeable directory on a NVMe device.')
parser.add_argument('--sweep_config', type=str, default=None, help='Performance sweep configuration json file.')
parser.add_argument('--no_read', action='store_true', help='Disable read performance measurements.')
parser.add_argument('--no_write', action='store_true', help='Disable write performance measurements.')
parser.add_argument('--io_size',
type=str,
default="400M",
help='Number of I/O bytes to read/write for performance measurements.')
parser.add_argument(
'--no_sudo',
action='store_true',
help=
'Run without sudo access. Page cache will not be flushed and reported read speeds may be higher than actual.')
parser.add_argument(
'--log_dir',
type=str,
default=BENCH_LOG_DIR,
help=f'Output directory for performance log files. Default is {os.path.join(".", BENCH_LOG_DIR)}')
parser.add_argument('--loops', type=int, default=1, help='Count of operation repetitions')
args = parser.parse_args()
print(f'args = {args}')
return args
def dump_cmd_lines(cmd_lines):
print(f'cmd line count = {len(cmd_lines)}')
for i, cmd in enumerate(cmd_lines):
print(f'{i}: {cmd}')
def get_sweep_config_dict(sweep_config_json):
if sweep_config_json is None:
return DEFAULT_SWEEP_CONFIG
with open(sweep_config_json) as fp:
sweep_config = json.load(fp)
return sweep_config
def get_sweep_cmd_lines(sweep_config_dict):
def flatten_options(key, value_list):
flat_list = []
for v in value_list:
if not type(v) is bool:
flat_list.append(f'--{key} {v}')
elif v:
flat_list.append(f'--{key}')
else:
flat_list.append(' ')
return flat_list
flat_list = [flatten_options(key, value) for key, value in sweep_config_dict.items()]
cmd_list = list(itertools.product(*flat_list))
cmd_list = [list(cmd) for cmd in cmd_list]
#dump_cmd_lines(cmd_list)
return cmd_list
def run_job(job):
args = ' '.join(job.cmd())
print(f'args = {args}')
job.open_output_file()
proc = subprocess.run(args=args, shell=True, stdout=job.get_stdout(), stderr=job.get_stderr(), cwd=job.get_cwd())
job.close_output_file()
assert proc.returncode == 0, \
f"This command failed: {job.cmd()}"
def launch_sweep(sweep_jobs, sync_job, flush_cache_job):
for perf_job in sweep_jobs:
if flush_cache_job is not None:
run_job(sync_job)
run_job(flush_cache_job)
run_job(perf_job)
run_job(sync_job)
def create_cmd_tags(cmd_line):
tags = {}
for param_value in cmd_line:
fields = param_value.split()
if len(fields) == 1:
tags[fields[0]] = None
elif len(fields) == 2:
tags[fields[0]] = fields[1]
return tags
def get_log_file(io_op_desc, cmd_line):
QUEUE_DEPTH = "--queue_depth"
BLOCK_SIZE = "--block_size"
SINGLE_SUBMIT = "--single_submit"
OVERLAP_EVENTS = "--overlap_events"
THREAD_COUNT = "--threads"
IO_PARALLEL = "--io_parallel"
tag_map = {
QUEUE_DEPTH: "d",
BLOCK_SIZE: "bs",
SINGLE_SUBMIT: "single",
OVERLAP_EVENTS: "overlap",
THREAD_COUNT: "t",
IO_PARALLEL: "p"
}
tag_default = {
QUEUE_DEPTH: 1,
BLOCK_SIZE: "1M",
SINGLE_SUBMIT: "block",
OVERLAP_EVENTS: "sequential",
THREAD_COUNT: 1,
IO_PARALLEL: 1
}
def get_default_value(tag):
value = tag_default[tag]
if tag in [SINGLE_SUBMIT, OVERLAP_EVENTS]:
return value
return f'{tag_map[tag]}{value}'
def get_config_value(tag, value):
tag_key = tag_map[tag]
if value is None:
return tag_key
return f'{tag_key}{value}'
tag_list = [SINGLE_SUBMIT, OVERLAP_EVENTS, THREAD_COUNT, IO_PARALLEL, QUEUE_DEPTH, BLOCK_SIZE]
log_tags = [io_op_desc]
cmd_tags = create_cmd_tags(cmd_line)
for tag in tag_list:
if tag in cmd_tags:
log_tags.append(get_config_value(tag, cmd_tags[tag]))
else:
log_tags.append(get_default_value(tag))
log_file = '_'.join(log_tags)
log_file += '.txt'
return log_file
def create_perf_jobs(io_op_desc, log_dir, cmd_lines):
py_cmd = ['python', os.path.join(script_path(), PERF_SCRIPT)]
perf_jobs = []
for cmd in cmd_lines:
log_file = os.path.join(log_dir, get_log_file(io_op_desc, cmd))
job = Job(cmd_line=py_cmd + cmd, output_file=log_file)
perf_jobs.append(job)
return perf_jobs
def script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
def async_io_setup():
return AsyncIOBuilder().is_compatible()
def get_block_size_and_count(io_bytes):
block_size = 1
block_count = io_bytes
bytes_in_KB = 1024
while block_count % bytes_in_KB == 0:
block_size *= bytes_in_KB
block_count /= bytes_in_KB
return int(block_size), int(block_count)
def create_read_file(sweep_config):
read_folder = os.path.join(sweep_config.nvme_dir, f'{READ_IO_DIR}')
os.makedirs(read_folder, exist_ok=True)
read_file_name = os.path.join(read_folder, f'random_{sweep_config.io_size}B.pt')
block_size, block_count = get_block_size_and_count(refine_integer_value(sweep_config.io_size))
dd_job = Job(cmd_line=[f'dd if=/dev/urandom of={read_file_name} bs={block_size} count={block_count}'])
print(f'[Start] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....')
run_job(dd_job)
print(f'[Done] Create read file of {sweep_config.io_size} bytes by running {dd_job.cmd()} ....')
return read_folder, read_file_name
def remove_folder(folder):
assert os.path.isdir(folder), f"Error: cannot remove {folder} - folder not found"
shutil.rmtree(folder)
def run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines):
read_folder, read_file_name = create_read_file(sweep_config)
read_option = f'--read_file {read_file_name}'
read_cmd_lines = [[f'{read_option} {sweep_config.other_options}'] + cmd for cmd in cmd_lines]
#dump_cmd_lines(read_cmd_lines)
log_folder = os.path.join(sweep_config.log_dir, f'{READ_LOG_DIR}')
os.makedirs(log_folder, exist_ok=True)
perf_jobs = create_perf_jobs(io_op_desc=READ_OP_DESC, log_dir=log_folder, cmd_lines=read_cmd_lines)
launch_sweep(sweep_jobs=perf_jobs, sync_job=sync_job, flush_cache_job=flush_cache_job)
remove_folder(read_folder)
def run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines):
write_folder = os.path.join(sweep_config.nvme_dir, f'{WRITE_IO_DIR}')
os.makedirs(write_folder, exist_ok=True)
write_file_name = os.path.join(write_folder, f'random_{sweep_config.io_size}B.pt')
write_option = f'--write_size {sweep_config.io_size} --write_file {write_file_name}'
write_cmd_lines = [[f'{write_option} {sweep_config.other_options}'] + cmd for cmd in cmd_lines]
#dump_cmd_lines(write_cmd_lines)
log_folder = os.path.join(sweep_config.log_dir, f'{WRITE_LOG_DIR}')
os.makedirs(log_folder, exist_ok=True)
perf_jobs = create_perf_jobs(io_op_desc=WRITE_OP_DESC, log_dir=log_folder, cmd_lines=write_cmd_lines)
launch_sweep(sweep_jobs=perf_jobs, sync_job=sync_job, flush_cache_job=flush_cache_job)
remove_folder(write_folder)
def main():
print("Running performance sweep of deepspeed nvme library")
if not async_io_setup():
error_msg = """
Failing because environment is not properly configured for deepspeed async i/o module.
Possible fix: apt install libaio-dev.
"""
print(error_msg)
quit()
args = parse_arguments()
sweep_config = SweepConfig(args)
cmd_lines = get_sweep_cmd_lines(sweep_config.search_space)
if sweep_config.flush_cache:
flush_cache_job = Job(cmd_line=['sudo', 'bash -c', "'echo 1 > /proc/sys/vm/drop_caches'"])
else:
flush_cache_job = None
sync_job = Job(cmd_line=['sync'])
if sweep_config.read:
run_read_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines)
if sweep_config.write:
run_write_sweep(sweep_config, flush_cache_job, sync_job, cmd_lines)
if __name__ == "__main__":
main()
| 10,657 | 29.538682 | 118 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/perf_sweep_utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
SCRIPT_PREFIX = '_aio_bench'
WRITE_OP_DESC = 'write'
READ_OP_DESC = 'read'
READ_IO_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_io'
WRITE_IO_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_io'
BENCH_LOG_DIR = f'{SCRIPT_PREFIX}_logs'
READ_LOG_DIR = f'{SCRIPT_PREFIX}_{READ_OP_DESC}_logs'
WRITE_LOG_DIR = f'{SCRIPT_PREFIX}_{WRITE_OP_DESC}_logs'
| 425 | 29.428571 | 55 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/parse_aio_stats.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
READ_SPEED = 'read_speed'
WRITE_SPEED = 'write_speed'
PERF_METRICS = [READ_SPEED, WRITE_SPEED]
METRIC_SEARCH = {READ_SPEED: 'E2E Read Speed', WRITE_SPEED: 'E2E Write Speed'}
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir', type=str, required=True, help='Folder of statistics logs')
parser.add_argument('--metric',
type=str,
required=True,
help='Performance metric to report: [read_speed|write_speed]')
args = parser.parse_args()
print(f'args = {args}')
return args
def extract_value(key, file):
INVALID_PREFIXES = ["ds"]
for p in INVALID_PREFIXES:
if key.startswith(p):
return key
try:
if key[0] in ['t', 'd', 'p']:
return int(key[1:])
if key.startswith("bs"):
if key.endswith('K'):
v = key[2:].split('K')
return int(v[0]) * 1024
elif key.endswith('M'):
v = key[2:].split('M')
return int(v[0]) * 1024 * 1024
else:
return int(key[2:])
except:
print(f"{file}: extract_value fails on {key}")
return None
return key
def get_file_key(file):
f, _ = os.path.splitext(os.path.basename(file))
fields = f.split('_')
values = [extract_value(k, file) for k in fields]
return tuple(values)
def get_thread_count(file):
f, _ = os.path.splitext(os.path.basename(file))
fields = f.split('_')
for key in fields:
if key[0] == 't':
return int(key[1:])
return 1
"""
Extract performance metric from log file.
Sample file lines are:
Task Read Latency = 0.031647682189941406 sec
Task Read Speed = 12.342926020792527 GB/sec
E2E Read Latency = 0.031697988510131836 sec
E2E Read Speed = 12.323337169333062 GB/sec
For the above sample, -metric = "read_speed" corresponds to "E2E Read Speed", and 12.32 will be returned
"""
def get_metric(file, metric):
thread_count = get_thread_count(file)
with open(file) as f:
for line in f.readlines():
if line.startswith(METRIC_SEARCH[metric]):
if metric in [READ_SPEED, WRITE_SPEED]:
fields = line.split()
return float(fields[-2])
else:
fields = line.split('=')
return float(fields[-1])
return None
def validate_args(args):
if not args.metric in PERF_METRICS:
print(f'{args.metric} is not a valid performance metrics')
return False
if not os.path.isdir(args.log_dir):
print(f'{args.log_dir} folder is not existent')
return False
return True
def get_results(log_files, metric):
results = {}
for f in log_files:
file_key = get_file_key(f)
value = get_metric(f, metric)
results[file_key] = value
return results
def get_sorted_results(log_dir, metric):
log_files = [f for f in os.listdir(log_dir) if os.path.isfile(os.path.join(log_dir, f))]
log_files_path = [os.path.join(log_dir, f) for f in log_files]
results = get_results(log_files_path, metric)
result_keys = list(results.keys())
sorted_keys = sorted(result_keys)
return sorted_keys, results
def main():
print("Parsing aio statistics")
args = parse_arguments()
if not validate_args(args):
quit()
sorted_keys, results = get_sorted_results(args.log_dir, args.metric)
for k in sorted_keys:
print(f'{k} = {results[k]}')
if __name__ == "__main__":
main()
| 3,835 | 24.744966 | 104 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/aio_bench_generate_param.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
import json
from parse_aio_stats import READ_SPEED, WRITE_SPEED, get_sorted_results
from perf_sweep_utils import BENCH_LOG_DIR, READ_LOG_DIR, WRITE_LOG_DIR
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir',
type=str,
default=BENCH_LOG_DIR,
help=f'Folder of performance sweep logs. Default is {os.path.join(".", BENCH_LOG_DIR)}')
args = parser.parse_args()
print(f'args = {args}')
return args
def validate_args(args):
for d in [READ_LOG_DIR, WRITE_LOG_DIR]:
log_dir = os.path.join(args.log_dir, d)
if not os.path.isdir(log_dir):
print(f'{log_dir} folder is not existent')
return False
return True
def convert_to_param(key):
assert len(key) == 6
return {
"single_submit": "true" if key[0] == "single" else "false",
"overlap_events": "true" if key[1] == "overlap" else "false",
"thread_count": int(key[3]),
"queue_depth": int(key[4]),
"block_size": int(key[5])
}
def generate_aio_param(read_log_dir, write_log_dir):
_, read_results = get_sorted_results(read_log_dir, READ_SPEED)
_, write_results = get_sorted_results(write_log_dir, WRITE_SPEED)
combined_perf = {key[1:]: value for key, value in read_results.items()}
for key, value in write_results.items():
new_key = key[1:]
if new_key in combined_perf:
combined_perf[new_key] += value
else:
combined_perf[new_key] = 0
optimal_key = None
optimal_perf = 0.0
for key, value in combined_perf.items():
if value > optimal_perf:
optimal_perf = value
optimal_key = key
aio_param = {"aio": convert_to_param(optimal_key)}
read_perf_keys = {key[1:]: key for key in read_results.keys()}
write_perf_keys = {key[1:]: key for key in write_results.keys()}
optimal_config_read = read_results.get(read_perf_keys[optimal_key], None)
optimal_config_write = write_results.get(write_perf_keys[optimal_key], None)
print(f'Best performance (GB/sec): read = {optimal_config_read:5.2f}, write = {optimal_config_write:5.2f}')
print(json.dumps(aio_param, indent=3))
def main():
print('Generate aio param')
args = parse_arguments()
if not validate_args(args):
quit()
read_log_dir = os.path.join(args.log_dir, READ_LOG_DIR)
write_log_dir = os.path.join(args.log_dir, WRITE_LOG_DIR)
generate_aio_param(read_log_dir, write_log_dir)
if __name__ == "__main__":
main()
| 2,814 | 29.268817 | 112 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/test_ds_aio.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import os
import argparse
import multiprocessing as mp
from ds_aio_basic import aio_basic_multiprocessing
from ds_aio_handle import aio_handle_multiprocessing
from test_ds_aio_utils import refine_args
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--read_file', type=str, default=None, help='Read file.')
parser.add_argument('--write_file', type=str, default=None, help='Write file.')
parser.add_argument('--write_size', type=str, default=None, help='Number of bytes to write.')
parser.add_argument('--block_size', type=str, default='1M', help='I/O block size.')
parser.add_argument('--queue_depth', type=int, default=32, help='I/O queue depth.')
parser.add_argument('--threads', type=int, default=1, help='Thread parallelism count.')
parser.add_argument('--single_submit',
action='store_true',
help='Submit I/O requests in singles (default is submit queue_depth amount at once.).')
parser.add_argument('--overlap_events',
action='store_true',
help='Overlap I/O submission and completion requests.')
parser.add_argument('--validate', action='store_true', help='Perform validation in library.')
parser.add_argument('--handle', action='store_true', help='Use AIO handle.')
parser.add_argument('--loops', type=int, default=1, help='Count of operation repetitions')
parser.add_argument('--io_parallel', type=int, default=None, help='Per iop parallelism')
parser.add_argument('--gpu', action='store_true', help='Use GPU memory')
parser.add_argument('--use_accelerator_pin_memory',
action='store_true',
help='Obtain pinned (CPU page-locked) tensors from accelerator')
args = parser.parse_args()
print(f'args = {args}')
return args
def validate_args(args):
if args.read_file and not os.path.isfile(args.read_file):
print(f'args validation error: {args.read_file} not found')
return False
return True
def main():
print(f'Testing deepspeed_aio python frontend')
args = parse_arguments()
refine_args(args)
if not validate_args(args):
quit()
mp.set_start_method('spawn')
multiprocess_function = aio_handle_multiprocessing if args.handle else aio_basic_multiprocessing
if args.read_file:
multiprocess_function(args, True)
if args.write_file:
multiprocess_function(args, False)
if __name__ == "__main__":
main()
| 2,738 | 30.848837 | 111 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/validate_async_io.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
from deepspeed.ops.op_builder import AsyncIOBuilder
assert AsyncIOBuilder().is_compatible()
| 271 | 26.2 | 75 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/ds_aio_basic.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
import torch
import os
import time
from multiprocessing import Pool, Barrier
from test_ds_aio_utils import report_results, task_log, task_barrier
from deepspeed.accelerator import get_accelerator
from deepspeed.ops.op_builder import AsyncIOBuilder
def pre_basic(args, tid, read_op):
io_string = "Read" if read_op else "Write"
num_bytes = os.path.getsize(args.read_file) if read_op else args.write_size
file = args.read_file if read_op else f'{args.write_file}.{tid}'
task_log(tid, f'Allocate tensor of size {num_bytes} bytes')
buffer = get_accelerator().pin_memory(torch.empty(num_bytes, dtype=torch.uint8, device='cpu'))
task_log(tid, f'{io_string} file {file} of size {num_bytes} bytes from buffer on device {buffer.device}')
ctxt = {}
ctxt['file'] = file
ctxt['num_bytes'] = num_bytes
ctxt['buffer'] = buffer
ctxt['elapsed_sec'] = 0
return ctxt
def pre_basic_read(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, True)
return ctxt
def pre_basic_write(pool_params):
args, tid = pool_params
ctxt = pre_basic(args, tid, False)
return ctxt
def post_basic(pool_params):
_, _, ctxt = pool_params
ctxt["buffer"].detach()
ctxt["buffer"] = None
return ctxt
def main_basic_read(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_read(ctxt['buffer'], ctxt['file'], args.block_size, args.queue_depth,
args.single_submit, args.overlap_events, args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def main_basic_write(pool_params):
args, tid, ctxt = pool_params
start_time = time.time()
AsyncIOBuilder().load().aio_write(ctxt['buffer'], ctxt['file'], args.block_size, args.queue_depth,
args.single_submit, args.overlap_events, args.validate)
end_time = time.time()
ctxt['elapsed_sec'] += end_time - start_time
return ctxt
def get_schedule(args, read_op):
schedule = {}
if read_op:
schedule['pre'] = pre_basic_read
schedule['post'] = post_basic
schedule['main'] = main_basic_read
else:
schedule['pre'] = pre_basic_write
schedule['post'] = post_basic
schedule['main'] = main_basic_write
return schedule
def _aio_handle_tasklet(pool_params):
args, tid, read_op = pool_params
# Create schedule
schedule = get_schedule(args, read_op)
task_log(tid, f'schedule = {schedule}')
task_barrier(aio_barrier, args.threads)
# Run pre task
task_log(tid, f'running pre-task')
ctxt = schedule["pre"]((args, tid))
task_barrier(aio_barrier, args.threads)
# Run main tasks in a loop
ctxt["main_task_sec"] = 0
for i in range(args.loops):
task_log(tid, f'running main task {i}')
start_time = time.time()
ctxt = schedule["main"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
stop_time = time.time()
ctxt["main_task_sec"] += stop_time - start_time
# Run post task
task_log(tid, f'running post-task')
ctxt = schedule["post"]((args, tid, ctxt))
task_barrier(aio_barrier, args.threads)
return ctxt["main_task_sec"], ctxt["elapsed_sec"], ctxt["num_bytes"] * args.loops
def _init_tasklet(b):
global aio_barrier
aio_barrier = b
def aio_basic_multiprocessing(args, read_op):
b = Barrier(args.threads)
pool_params = [(args, p, read_op) for p in range(args.threads)]
with Pool(processes=args.threads, initializer=_init_tasklet, initargs=(b, )) as p:
pool_results = p.map(_aio_handle_tasklet, pool_params)
report_results(args, read_op, pool_results)
| 3,957 | 28.537313 | 109 |
py
|
DeepSpeed
|
DeepSpeed-master/csrc/aio/py_test/test_ds_aio_utils.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
"""
Functionality of swapping optimizer tensors to/from (NVMe) storage devices.
"""
BYTES_PER_GB = 1024**3
LOG_TIDS = [0]
def task_log(tid, msg):
if tid in LOG_TIDS:
print(f'tid {tid}: {msg}')
def task_barrier(barrier, num_parties):
assert barrier.parties == num_parties
barrier.wait()
assert barrier.broken == False
def report_results(args, read_op, pool_results):
#print(f'pool_results = {pool_results}')
io_string = 'Read' if read_op else 'Write'
if None in pool_results:
print(f'Failure in one of {args.threads} {io_string} processes')
return
total_bytes = sum([num_bytes for _, _, num_bytes in pool_results])
task_latency_sec = max([sec for _, sec, _ in pool_results])
task_speed_GB = total_bytes / task_latency_sec / BYTES_PER_GB
print(f'Task {io_string} Latency = {task_latency_sec} sec')
print(f'Task {io_string} Speed = {task_speed_GB} GB/sec')
e2e_latency_sec = max([sec for sec, _, _ in pool_results])
e2e_speed_GB = total_bytes / e2e_latency_sec / BYTES_PER_GB
print(f'E2E {io_string} Latency = {e2e_latency_sec} sec')
print(f'E2E {io_string} Speed = {e2e_speed_GB} GB/sec')
def refine_integer_value(value):
unit_dict = {'K': 1024, 'M': 1024**2, 'G': 1024**3}
if value[-1] in list(unit_dict.keys()):
int_value = int(value[:-1]) * unit_dict[value[-1]]
return int_value
return int(value)
def refine_args(args):
if args.write_size and type(args.write_size) == str:
args.write_size = refine_integer_value(args.write_size)
if args.block_size and type(args.block_size) == str:
args.block_size = refine_integer_value(args.block_size)
| 1,790 | 29.355932 | 75 |
py
|
DeepSpeed
|
DeepSpeed-master/docs/code-docs/source/conf.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
# -- Project information -----------------------------------------------------
project = 'DeepSpeed'
copyright = '2020, Microsoft'
author = 'Microsoft'
# The full version, including alpha/beta/rc tags
with open("../../../version.txt", "r") as f:
release = f.readline().rstrip()
master_doc = 'index'
autodoc_member_order = 'bysource'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'recommonmark',
'sphinx_rtd_theme',
'sphinxcontrib.autodoc_pydantic',
'sphinx.ext.autosectionlabel',
]
pygments_style = 'sphinx'
# autodoc_pyandtic config
autodoc_pydantic_model_show_field_summary = False
autodoc_pydantic_field_signature_prefix = ' '
autodoc_pydantic_model_signature_prefix = 'class'
autodoc_pydantic_model_show_json = False
autodoc_pydantic_model_show_config_summary = False
autodoc_pydantic_model_show_config_member = False
autodoc_pydantic_model_show_validator_summary = False
autodoc_pydantic_model_show_validator_members = False
autodoc_pydantic_model_summary_list_order = 'bysource'
autodoc_pydantic_model_member_order = 'bysource'
autodoc_pydantic_field_list_validators = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# GitHub integration
html_context = {
"display_github": True,
"github_user": "microsoft",
"github_repo": "DeepSpeed",
"github_version": "master",
"conf_py_path": "/docs/code-docs/source/",
}
sys.path.insert(0, os.path.abspath('../../../'))
# Prepend module names to class descriptions?
add_module_names = True
autoclass_content = 'auto'
autodoc_mock_imports = ["apex", "mpi4py", "tensorboardX", "numpy", "cupy"]
| 3,294 | 31.303922 | 79 |
py
|
DeepSpeed
|
DeepSpeed-master/release/bump_patch_version.py
|
# Copyright (c) Microsoft Corporation.
# SPDX-License-Identifier: Apache-2.0
# DeepSpeed Team
from packaging import version as pkg_version
with open('../version.txt') as fd:
version = pkg_version.parse(fd.read())
with open('../version.txt', 'w') as fd:
fd.write(f'{version.major}.{version.minor}.{version.micro + 1}\n')
print(f'{version} -> {version.major}.{version.minor}.{version.micro + 1}')
| 408 | 26.266667 | 74 |
py
|
mBNN
|
mBNN-main/evaluate.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import calibration as cal
from scipy.stats import norm
from thop import profile
def ll_mixture_normal(output, target, sigma):
exponent = -((target - output)**2).T/(2 * sigma**2)
log_coeff = -0.5*torch.log(2*torch.tensor(np.pi))-torch.log(sigma)
px = torch.mean(torch.exp(exponent + log_coeff),1)
ll = torch.where(px!=0, torch.log(px), torch.mean(exponent + log_coeff,1))
return torch.sum(ll)
def A(mu, sigma2):
sigma = torch.sqrt(sigma2)
r = (mu/sigma).detach().cpu().numpy()
A1 = 2*sigma*(torch.from_numpy(norm.pdf(r)).float().cuda())
A2 = mu*(torch.from_numpy(2*norm.cdf(r)-1).float().cuda())
return(A1 + A2)
def CRPS_mixnorm(w,mu,sigma2,x):
M = len(w)
if (len(mu)!=M or len(sigma2)!=M): return(None)
if x.dim()>0 :
if len(x)>1:
return(None)
w = w/torch.sum(w)
crps1 = torch.sum(w*A(x-mu, sigma2))
crps3=[]
for m in range(M):
crps3.append(torch.sum(w*A(mu[m]-mu,sigma2[m] + sigma2)))
crps3 = torch.stack(crps3)
crps2 = torch.sum(crps3*w/2)
return crps1 - crps2
def CRPS_norm(mu,sigma2,x):
if x.dim()>0 :
if len(x)>1:
return(None)
crps1 = A(x-mu, sigma2)
crps2 = 0.5*A(0,2*sigma2)
return crps1 - crps2
def evaluate_averaged_model_regression(pred_list, target_list, sigma_list):
CRPS_list=[]
for i in range(len(target_list)):
CRPS = CRPS_mixnorm(torch.ones(pred_list.shape[0]).cuda(),pred_list[:,i], sigma_list**2, target_list[i])
CRPS_list.append(CRPS)
CRPSs = torch.stack(CRPS_list)
RMSE = torch.sqrt(((torch.mean(pred_list,0) - target_list)**2).mean()).item()
m_NLL = -ll_mixture_normal(pred_list, target_list, sigma_list).item() / pred_list.shape[1]
CRPS = torch.mean(CRPSs).item()
return(RMSE, m_NLL, CRPS)
def evaluate_averaged_model_classification(pred_list, target_list):
target_list = target_list.long()
outputs_mixture = torch.mean(pred_list, dim=0)
ACC= torch.mean((torch.argmax(outputs_mixture,1) == target_list).float()).item()
criterion = torch.nn.NLLLoss(reduction='mean')
m_NLL = criterion(torch.log(outputs_mixture), target_list).item()
ECE = cal.get_calibration_error(outputs_mixture.detach().cpu().numpy(), target_list.detach().cpu().numpy())
return(ACC, m_NLL, ECE)
class MLP_customized(nn.Module):
def __init__(self, input_dim, output_dim, h_vec):
super(MLP_customized, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.L = len(h_vec)
self.p_vec = np.hstack([input_dim,h_vec,output_dim])
self.layers = self._make_layer()
def _make_layer(self):
layers = []
for l in range(self.L):
layer = []
layer.append(nn.Linear(self.p_vec[l], self.p_vec[l+1]))
layer.append(nn.ReLU())
layers.append(nn.Sequential(*layer))
layer = []
layer.append(nn.Linear(self.p_vec[-2], self.output_dim))
layers.append(nn.Sequential(*layer))
return nn.Sequential(*layers)
def forward(self, x):
x = x.view(-1, self.input_dim)
x = self.layers(x)
return x
class Convert(nn.Module):
def __init__(self, size):
super(Convert, self).__init__()
self.size = size
def forward(self, x):
s = x.shape
if s[1]>self.size:
return torch.split(x, self.size, 1)[0]
elif s[1]<self.size:
return torch.cat((x, torch.zeros(s[0],self.size-s[1],s[2],s[3]).cuda()), 1)
class BasicBlock_customized(nn.Module):
expansion = 1
def __init__(self, h, stride, empty_shortcut):
super(BasicBlock_customized, self).__init__()
self.conv1 = nn.Conv2d(h[0], h[1], kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(h[1])
self.conv2 = nn.Conv2d(h[1], h[2], kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(h[2])
self.shortcut = nn.Sequential()
if not empty_shortcut:
self.shortcut = nn.Sequential(
nn.Conv2d(h[0], h[2], kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(h[2])
)
elif h[0]!=h[2]:
self.shortcut = nn.Sequential(
Convert(h[2])
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet18_customized(nn.Module):
def __init__(self, num_classes, h_vec):
super(ResNet18_customized, self).__init__()
self.conv1 = nn.Conv2d(3, h_vec[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(h_vec[0])
self.layer1 = self._make_layer(BasicBlock_customized, h_vec[0:5], stride=1)
self.layer2 = self._make_layer(BasicBlock_customized, h_vec[4:9], stride=2)
self.layer3 = self._make_layer(BasicBlock_customized, h_vec[8:13], stride=2)
self.layer4 = self._make_layer(BasicBlock_customized, h_vec[12:17], stride=2)
self.linear = nn.Linear(h_vec[16], num_classes)
self.register_buffer('sigma', torch.tensor([1.0]).cuda())
def _make_layer(self, block, h, stride):
layers = []
layers.append(block(h[0:3], stride, (stride==1)))
layers.append(block(h[2:5], 1, True))
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def profiling(model_type, p_data, n_act_i_nodes, num_classes, n_act_h_nodes, MLP_L=None, MLP_p=None, n_h_nodes=None):
if model_type == "MLP":
full_model = MLP_customized(p_data, num_classes, [MLP_p]*MLP_L).cuda()
macs_f, params_f = profile(full_model, torch.randn(1,p_data).cuda(), verbose=False)
compressed_model = MLP_customized(n_act_i_nodes, num_classes, n_act_h_nodes).cuda()
macs_c, params_c = profile(compressed_model, torch.randn(1,n_act_i_nodes).cuda(), verbose=False)
return macs_c/macs_f, params_c/params_f
elif model_type == "resnet18":
full_model = ResNet18_customized(num_classes, n_h_nodes).cuda()
macs_f, params_f = profile(full_model, (torch.randn(1,3,32,32).cuda(),), verbose=False)
compressed_model = ResNet18_customized(num_classes, n_act_h_nodes).cuda()
macs_c, params_c = profile(compressed_model, (torch.randn(1,3,32,32).cuda(),), verbose=False)
return macs_c/macs_f, params_c/params_f
| 7,111 | 38.731844 | 121 |
py
|
mBNN
|
mBNN-main/utils.py
|
import os
import pandas as pd
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset, DataLoader
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def data_process(dataname, data_dir, seed=0, batch_size=100):
if dataname == "Boston":
data = pd.read_csv(data_dir + "/housing.data", header=None, sep="\s+")
data_x = MinMaxScaler((0,1)).fit_transform(data.iloc[:, :-1].astype(np.float64))
data_y=np.array(data.iloc[:,-1]).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 1
elif dataname == "Concrete":
data=pd.read_csv(data_dir + "/Concrete_Data.csv", header=None)
data_x = MinMaxScaler((0,1)).fit_transform(data.iloc[:, :-1].astype(np.float64))
data_y=np.array(data.iloc[:,-1]).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 1
elif dataname == "Energy":
data = pd.read_csv(data_dir + "/ENB2012_data.csv", header=None)
data_x = MinMaxScaler((0,1)).fit_transform(data.iloc[:, :-1].astype(np.float64))
data_y=np.array(data.iloc[:,-1]).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 1
elif dataname == "Yacht":
data = pd.read_csv(data_dir + "/yacht_hydrodynamics.data", header=None, sep="\s+")
data_x = MinMaxScaler((0,1)).fit_transform(data.iloc[:, :-1].astype(np.float64))
data_y=np.array(data.iloc[:,-1]).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 1
elif dataname == "Haberman":
data = pd.read_csv(data_dir + "/haberman.data", header=None)
data_x = MinMaxScaler((0,1)).fit_transform(pd.get_dummies(data.iloc[:, :-1]).astype(np.float64))
data_y = np.array(data.iloc[:,-1]-1).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), shuffle=True, batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 2
elif dataname == "Retinopathy":
data=pd.read_csv(data_dir + "/messidor_features.data", header=None)
data_x = MinMaxScaler((0,1)).fit_transform(pd.get_dummies(data.iloc[:, :-1]).astype(np.float64))
data_y = np.array(data.iloc[:,-1]).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), shuffle=True, batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 2
elif dataname == "Tic-tac-toe":
data=pd.read_csv(data_dir + "/tic-tac-toe.data", header=None)
data_x = MinMaxScaler((0,1)).fit_transform(pd.get_dummies(data.iloc[:, :-1]).astype(np.float64))
data_y = np.array(pd.Series(np.where(data.iloc[:,-1]=="positive",1,0))).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), shuffle=True, batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 2
elif dataname == "Promoter":
data=pd.read_csv(data_dir + "/promoters.data", header=None)
gene=[]
for i in range(data.shape[0]):
gene.append(list(data.iloc[i,2].replace('\t', '')))
data_x = MinMaxScaler((0,1)).fit_transform(pd.get_dummies(pd.DataFrame(gene),drop_first=True).astype(np.float64))
data_y = np.array(pd.Series(np.where(data.iloc[:,0]=="+",1,0))).reshape(-1,1)
x_train, x_test, y_train, y_test = train_test_split(data_x, data_y, test_size=0.1, random_state=seed)
x_train, x_test, y_train, y_test = torch.tensor(x_train).float(), torch.tensor(x_test).float(), torch.tensor(y_train).float(), torch.tensor(y_test).float()
if batch_size == -1:
train_batch_size = x_train.shape[0]
else:
train_batch_size = batch_size
trainloader = DataLoader(TensorDataset(x_train, y_train), shuffle=True, batch_size=train_batch_size)
test_batch_size = x_test.shape[0]
testloader = DataLoader(TensorDataset(x_test, y_test), batch_size=test_batch_size)
n_train, n_test, p_data, num_classes = x_train.shape[0], x_test.shape[0], x_train.shape[1], 2
elif dataname == "CIFAR10":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if batch_size<0:
raise Exception('Invalid batch size.')
trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)
n_train, n_test, p_data, num_classes = len(trainset), len(testset), None, 10
elif dataname == "CIFAR100":
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5070751592371323, 0.48654887331495095, 0.4409178433670343], [0.2673342858792401, 0.2564384629170883, 0.27615047132568404])])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.5070751592371323, 0.48654887331495095, 0.4409178433670343], [0.2673342858792401, 0.2564384629170883, 0.27615047132568404])])
trainset = torchvision.datasets.CIFAR100(root=data_dir, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR100(root=data_dir, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)
n_train, n_test, p_data, num_classes = len(trainset), len(testset), None, 100
elif dataname == "SVHN":
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.SVHN(root=data_dir, split='train', download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torchvision.datasets.SVHN(root=data_dir, split='test', download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
n_train, n_test, p_data, num_classes = len(trainset), len(testset), None, 100
return trainloader, testloader, n_train, n_test, p_data, num_classes
def filter_ranks(conv, bn):
bn_scale = bn.weight.data / torch.sqrt(bn.running_var + bn.eps)
new_filter = bn_scale.reshape(-1,1,1,1) * conv.weight.data
new_bias = bn.bias.data - bn_scale*bn.running_mean
return (torch.pow(new_filter, 2)).sum((1,2,3)) #+ torch.pow(new_bias,2)#
| 12,447 | 56.364055 | 199 |
py
|
mBNN
|
mBNN-main/MBNN/BSTS.py
|
import pandas as pd
import numpy as np
import argparse
import csv
from datetime import datetime
from torch import optim
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler, Normalizer
from Kalman_Filter import *
from MCMC import *
from Mask_Update import *
from models import *
import matplotlib.pyplot as plt
import seaborn as sns
import sys
sys.path.append('..')
from utils import *
parser = argparse.ArgumentParser(description='BSTS')
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--out', default='', help='Directory to output the result')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--method', type=str, default= 'mBNN', choices=['linear', 'BNN', 'mBNN'])
def logit(x):
return np.log(x/(1-x))
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out
data_y = np.array(pd.read_csv(args.data_dir + '/y_search.csv'))
data_x = np.array(pd.read_csv(args.data_dir + '/x_search.csv'))
data_x = MinMaxScaler((0,1)).fit_transform(np.stack(data_x).T)
data_y = np.sum(np.stack(data_y),0)/1000
n = len(data_y)
n_train = 240
n_test = n - n_train
data_x_train = data_x[:n_train]
data_y_train = data_y[:n_train]
data_x_test = data_x[n_train:]
data_y_test = data_y[n_train:]
Z = np.ones(shape=(1,1))
H = np.ones(shape=(1,1))
T = np.ones(shape=(1,1))*0.95
Q = np.ones(shape=(1,1))*0.1
a_0 = np.zeros(shape=(1))
P_0 = np.ones(shape=(1,1))
if args.method == linear:
thinning_interval = 10
burn_in_sample = 10
num_sample = 20
total_epoch = thinning_interval*(burn_in_sample+num_sample)
burn_in_epoch = thinning_interval*burn_in_sample
beta_list = []
filtered_state_list = []
filtered_state_cov_list = []
data_x_train_one = np.c_[np.ones((data_x_train.shape[0], 1)),data_x_train]
inv = np.linalg.inv(data_x_train_one.T @ data_x_train_one + np.identity(data_x_train_one.shape[1]))
diff = data_y_train
for epoch in range(total_epoch):
beta_mean = (inv @ data_x_train_one.T @ diff.reshape(-1,1)).squeeze()
beta = np.random.multivariate_normal(beta_mean, inv)
predicted = (data_x_train_one @ beta).squeeze()
ERROR = ((diff - predicted)**2).sum()
gamma = np.random.gamma(1 + n_train/2, 1/(ERROR/2 + 1))
H = 1/gamma
y = (data_y_train-predicted).reshape(1,-1)
filtered_state, filtered_state_cov, _, _, _, _, _, _ = kalman_filter(y, Z, H, T, Q, a_0, P_0)
sampled_mu = np.random.normal(loc=filtered_state.squeeze(), scale = np.sqrt(filtered_state_cov.squeeze()))
diff = data_y_train - sampled_mu
if (epoch+1) % thinning_interval == 0:
print("epoch : ", epoch, ", RMSE : ", np.sqrt(np.mean((diff-predicted)**2)))
if (epoch+1) > burn_in_epoch:
beta_list.append(beta)
filtered_state_list.append(filtered_state)
filtered_state_cov_list.append(filtered_state_cov)
torch.save({'beta_list' : beta_list, 'filtered_state_list': filtered_state_list, 'filtered_state_cov_list':filtered_state_cov_list}, out_directory + "/BSTS_linear.pth")
else:
L, p, prior, prior_scale = 2, 100, "Cauchy", 1.0
lr = 1e-5
thinning_interval = 10
burn_in_sample = 10
num_sample = 20
lamb, N_max = 0.1, 3
num_classes=1
x_train, y_train, x_test, y_test = torch.tensor(data_x_train).float(), torch.tensor(data_y_train).float(), torch.tensor(data_x_test).float(), torch.tensor(data_y_test).float()
trainloader, testloader = DataLoader(TensorDataset(x_train, y_train), batch_size=x_train.shape[0]), DataLoader(TensorDataset(x_test, y_test), batch_size=x_test.shape[0])
p_data = x_train.shape[1]
task = 'regression'
model = M_MLP(p_data, num_classes, L, p, prior, prior_scale).cuda()
step_size = lr/n
total_epoch = thinning_interval*(burn_in_sample+num_sample)
burn_in_epoch = thinning_interval*burn_in_sample
step_size_tuning = DualAveragingStepSize(initial_step_size=step_size)
model_tmp = copy.deepcopy(model)
model_list = []
filtered_state_list=[]
filtered_state_cov_list=[]
for epoch in range(total_epoch):
model.train()
try:
p_accept = HMC(model, task, trainloader, lr=step_size, lf_step = 50)
if epoch < burn_in_epoch:
step_size, _ = step_size_tuning.update(p_accept)
if epoch == burn_in_epoch:
_, step_size = step_size_tuning.update(p_accept)
except:
model = copy.deepcopy(model_tmp)
model_tmp = copy.deepcopy(model)
if args.method == "mBNN":
model.eval()
for _ in range(2):
mask_update_dataloader(model, task, trainloader, n, lamb, N_max)
predicted = []
for _, (inputs, _) in enumerate(trainloader):
inputs = inputs.cuda()
outputs = model(inputs)
predicted.append(outputs)
predicted = torch.vstack(predicted).detach().cpu().numpy().squeeze()
sigma_update(model, trainloader, n_train)
H = model.sigma.item()**2
y = (data_y_train-predicted).reshape(1,-1)
filtered_state, filtered_state_cov, _, _, _, _, _, _ = kalman_filter(y, Z, H, T, Q, a_0, P_0)
sampled_mu = np.random.normal(loc=filtered_state.squeeze(), scale = np.sqrt(filtered_state_cov.squeeze()))
diff = data_y_train - sampled_mu
y_train = torch.tensor(diff).float().reshape(-1,1)
trainloader = DataLoader(TensorDataset(x_train, y_train), batch_size=n_train)
if (epoch+1) % thinning_interval == 0:
if (epoch+1) > burn_in_epoch:
model_list.append(copy.deepcopy(model))
filtered_state_list.append(filtered_state)
filtered_state_cov_list.append(filtered_state_cov)
torch.save({'model_list' : model_list, 'filtered_state_list': filtered_state_list, 'filtered_state_cov_list':filtered_state_cov_list}, out_directory + "/BSTS_"+args.method".pth")
if __name__ == '__main__':
main()
| 6,742 | 37.752874 | 186 |
py
|
mBNN
|
mBNN-main/MBNN/image.py
|
import argparse
from datetime import date
import os
import numpy as np
import copy
from datetime import datetime
from progress.bar import ChargingBar as Bar
import pickle
import sys
sys.path.append('..')
from utils import *
from evaluate import *
from MBNN.models import *
from MBNN.MCMC import *
from MBNN.Mask_Update import *
parser = argparse.ArgumentParser(description='mBNN for image dataset')
########################## model setting ##########################
parser.add_argument('--model', type=str, default= 'resnet18', choices=['resnet18'], help='architecture of model')
parser.add_argument('--prior', type=str, default= 'Cauchy', choices=['Cauchy'], help='type of prior')
parser.add_argument('--prior_scale', type=float, default= 0.1, help='scale of prior')
########################## basic setting ##########################
parser.add_argument('--start_seed', type=int, help='start_seed')
parser.add_argument('--end_seed', type=int, help='end_seed')
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--out', default='', help='Directory to output the result')
######################### Dataset setting #############################
parser.add_argument('--dataset', type=str, default= 'CIFAR10', choices=['CIFAR10', 'CIFAR100'], help='benchmark dataset')
parser.add_argument('--batch_size', default=100, type=int, help='train batchsize')
######################### MCMC setting #############################
parser.add_argument('--num_sample', default=5, type=int, help='the number of MCMC sample')
parser.add_argument('--burn_in_sample', default=5, type=int, help='the number of MCMC burn_in sample')
parser.add_argument('--thinning_interval', default=20, type=int, help='thinning_interval epoch')
parser.add_argument('--lr', default=1e-3, type=float, help='initial learning rate')
parser.add_argument('--temperature', default=(1/50000)**.5, type=float, help='temperature for SGLD')
######################### Mask setting ###############################
parser.add_argument('--update_period', default=10, type=int, help='period of mask update (in batch)')
parser.add_argument('--num_update', default=1, type=int, help='number of times to update at a time')
parser.add_argument('--lamb', default=0.05, type=float, help='hyperparameter for the prior of sparsity')
parser.add_argument('--N_max', default=3, type=int, help='maximum of the number of updated mask')
parser.add_argument('--death_method', type=str, default= 'proposed', choices=["proposed", "Oops", "random"])
parser.add_argument('--birth_method', type=str, default= 'random', choices=["proposed", "Oops", "random"])
######################### add name #############################
parser.add_argument('--add_name', default='', type=str, help='add_name')
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out + '/MBNN' + '/' + str(args.dataset)
out_directory += '/' + str(date.today().strftime('%Y%m%d')[2:])
out_directory += '/' + '_lam' + str(args.lamb)
out_directory += '_bm_' + args.birth_method + '_dm_' + args.death_method
if args.add_name != '':
out_directory +='_'+str(args.add_name)
if not os.path.isdir(out_directory):
mkdir_p(out_directory)
result1_list = []
result2_list = []
result3_list = []
result4_list = []
result5_list = []
for seed in range(args.start_seed, args.end_seed+1):
print("seed : ", seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
#dataset
trainloader, testloader, n_train, n_test, p_data, num_classes = data_process(args.dataset, args.data_dir, seed, args.batch_size)
task = 'classification'
model = M_ResNet18(num_classes, args.prior, args.prior_scale).cuda()
initial_nodes = []
for n, p in model.named_parameters():
if 'active' in n:
initial_nodes.append(torch.sum(p.data).item())
#MCMC
step_size = args.lr/args.batch_size
total_epoch = args.thinning_interval*(args.burn_in_sample+args.num_sample)
burn_in_epoch = args.thinning_interval*args.burn_in_sample
optimizer = torch.optim.SGD(model.parameters(), lr=step_size)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=burn_in_epoch, eta_min=step_size/100)
mt=0
bar = Bar('{:>10}'.format('Training'), max=total_epoch)
res = pd.DataFrame({'mean_ns':[],'total_ns':[]})
for epoch in range(total_epoch):
model.train()
for batch_idx, (inputs, targets) in enumerate(trainloader):
optimizer.zero_grad()
loss_noise = noise_loss(model,step_size)*args.temperature
loss = -log_prob_data(model, task, inputs, targets, n_train) + loss_noise
loss.backward()
optimizer.step()
if (batch_idx+1) % args.update_period ==0:
for _ in range(args.num_update):
mask_update_data(model, task, inputs, targets, n_train, args.lamb, args.N_max, args.death_method, args.birth_method)
if epoch+1 <= burn_in_epoch:
scheduler.step()
model.eval()
with torch.no_grad():
_,ERROR_train = log_likelihood_dataloader(model, task, trainloader)
_,ERROR_test = log_likelihood_dataloader(model, task, testloader)
ERROR_train, ERROR_test = ERROR_train.item()/n_train, ERROR_test.item()/n_test
total_node_sparsity = []
l=0
for n, p in model.named_parameters():
if 'active' in n:
total_node_sparsity.append(torch.sum(p.data).item())
l+=1
total_node_sparsity_ratio = np.sum(np.stack(total_node_sparsity))/np.sum(np.stack(initial_nodes))
bar.suffix = '({epo}/{total_epo}) ERR_train:{ER_tr} | ERR_test:{ER_te} | {ns}'.format(
epo=epoch + 1,
total_epo=total_epoch,
ER_tr=np.round(ERROR_train,3),
ER_te=np.round(ERROR_test,3),
ns = np.round(total_node_sparsity_ratio,3)
)
res.loc[epoch,'total_ns'] = total_node_sparsity_ratio
res.loc[epoch,'Er_train'] = np.round(ERROR_train,3)
res.loc[epoch,'Er_test'] = np.round(ERROR_test,3)
res.to_csv(out_directory + '/node_sparsity_seed_%d.csv'%(seed,))
if (epoch + 1) > burn_in_epoch and (epoch+1-burn_in_epoch) % args.thinning_interval == 0:
torch.save(model.state_dict(), out_directory + '/seed_%d_mt_%d.pt'%(seed, mt))
mt += 1
bar.next()
bar.finish()
print("model testing")
pred_list=[]
target_list=[]
sigma_list=[]
macs_list=[]
params_list=[]
with torch.no_grad():
for mt in range(args.num_sample):
model = M_ResNet18(num_classes, args.prior, args.prior_scale).cuda()
model.eval()
n_h_nodes = []
for name, param in model.named_parameters():
if 'active' in name:
n_h_nodes.append(int(param.sum().item()))
model.load_state_dict(torch.load(out_directory + '/seed_%d_mt_%d.pt'%(seed,mt), map_location='cuda:'+str(args.gpu)))
pred = []
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
pred.append(F.softmax(outputs,dim=1))
if mt==0:
target_list.append(targets.squeeze())
pred_list.append(torch.cat(pred,0))
n_act_h_nodes = []
for name, param in model.named_parameters():
if 'active' in name:
n_act_h_nodes.append(int(param.sum().item()))
n_act_i_nodes = p_data
macs, params = profiling(args.model, p_data, n_act_i_nodes, num_classes, n_act_h_nodes, n_h_nodes=n_h_nodes)
macs_list.append(macs)
params_list.append(params)
pred_list = torch.stack(pred_list)
target_list = torch.cat(target_list,0)
ACC, m_NLL, ECE = evaluate_averaged_model_classification(pred_list, target_list)
macs = np.stack(macs_list).mean()
params = np.stack(params_list).mean()
print("ACC : ", ACC, " m_NLL : ", m_NLL, " ECE : ", ECE, "FLOPs rate : ", 100 * (macs), " non-zero param rate : ", 100 * (params))
result1_list.append(ACC)
result2_list.append(m_NLL)
result3_list.append(ECE)
result4_list.append(100 * (macs))
result5_list.append(100 * (params))
num_seed = args.end_seed - args.start_seed
result1_list, result2_list, result3_list, result4_list, result5_list = np.stack(result1_list), np.stack(result2_list), np.stack(result3_list), np.stack(result4_list), np.stack(result5_list)
print("%.3f(%.3f), %.3f(%.3f), %.3f(%.3f), %.2f(%.2f), %.2f(%.2f)" % (np.mean(result1_list), np.std(result1_list)/np.sqrt(num_seed),np.mean(result2_list), np.std(result2_list)/np.sqrt(num_seed),np.mean(result3_list), np.std(result3_list)/np.sqrt(num_seed),np.mean(result4_list), np.std(result4_list)/np.sqrt(num_seed),np.mean(result5_list), np.std(result5_list)/np.sqrt(num_seed)))
if __name__ == '__main__':
main()
| 10,377 | 45.124444 | 393 |
py
|
mBNN
|
mBNN-main/MBNN/Polynomial.py
|
import argparse
from datetime import date
import os
import numpy as np
import copy
from datetime import datetime
from progress.bar import ChargingBar as Bar
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from utils import *
from evaluate import *
from MBNN.models import *
from MBNN.MCMC import *
from MBNN.Mask_Update import *
parser = argparse.ArgumentParser(description='mBNN noisy polynomial regression')
parser.add_argument('--death_method', type=str, default= 'proposed', choices=["proposed", "Oops", "random"])
parser.add_argument('--birth_method', type=str, default= 'random', choices=["proposed", "Oops", "random"])
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--out', default='', help='Directory to output the result')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--method', type=str, default= 'mBNN', choices=['BNN', 'mBNN'])
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out + "/method_" + args.method
if args.method == 'mBNN':
out_directory += "_birth_" + args.birth_method + "_death_" + args.death_method
n = 20
p_data = 1
num_classes=1
x = np.random.uniform(low=-4.0, high=4.0, size=n)
eps = np.random.normal(loc=0.0, scale=3.0, size=n)
y = x**3 + eps
L, p, prior, prior_scale = 2, 1000, "Cauchy", 0.3
lr = 0.1
thinning_interval = 10
burn_in_sample = 300
num_sample = 1
lamb, N_max = 0.1, 3
task = 'regression'
model = M_MLP(p_data, num_classes, L, p, prior, prior_scale).cuda()
step_size = lr/n
total_epoch = thinning_interval*(burn_in_sample+num_sample)
burn_in_epoch = thinning_interval*burn_in_sample
step_size_tuning = DualAveragingStepSize(initial_step_size=step_size)
model_tmp = copy.deepcopy(model)
trainloader = DataLoader(TensorDataset(torch.tensor(x).float().reshape(-1,1), torch.tensor(y).float().reshape(-1,1)), batch_size=n)
model_list=[]
res = pd.DataFrame()
for epoch in range(total_epoch):
model.train()
try:
p_accept = HMC(model, task, trainloader, lr=step_size, lf_step = 30)
if epoch < burn_in_epoch:
step_size, _ = step_size_tuning.update(p_accept)
if epoch == burn_in_epoch:
_, step_size = step_size_tuning.update(p_accept)
except:
model = copy.deepcopy(model_tmp)
model_tmp = copy.deepcopy(model)
if (epoch+1) % 1 ==0:
model.eval()
for _ in range(2):
mask_update_dataloader(model, task, trainloader, n, lamb, N_max, args.death_method, args.birth_method)
if task == 'regression':
sigma_update(model, trainloader, n)
if args.method == "mBNN":
model.eval()
with torch.no_grad():
_,ERROR_train = log_likelihood_dataloader(model, task, trainloader)
node_sparsity_l = []
l=0
for name, param in model.named_parameters():
if 'active' in name:
node_sparsity_l.append(torch.sum(param.data).item())
l+=1
node_sparsity = np.mean(np.stack(node_sparsity_l))
res.loc[epoch,'n1'] = node_sparsity_l[0]
res.loc[epoch,'n2'] = node_sparsity_l[1]
res.loc[epoch,'n_r'] = (node_sparsity_l[0] + node_sparsity_l[1])/(2*1000)
res.to_csv(out_directory + '/node_sparsity.csv')
if (epoch + 1) > burn_in_epoch and (epoch+1-burn_in_epoch) % thinning_interval == 0:
model_list.append(copy.deepcopy(model))
torch.save({'model_list' : model_list}, out_directory + "/models.pth")
if __name__ == '__main__':
main()
| 4,017 | 33.637931 | 135 |
py
|
mBNN
|
mBNN-main/MBNN/UCI.py
|
import argparse
from datetime import date
import os
import numpy as np
import copy
from datetime import datetime
from progress.bar import ChargingBar as Bar
import pickle
import sys
sys.path.append('..')
from utils import *
from evaluate import *
from MBNN.models import *
from MBNN.MCMC import *
from MBNN.Mask_Update import *
parser = argparse.ArgumentParser(description='mBNN for UCI dataset')
########################## model setting ##########################
parser.add_argument('--model', type=str, default= 'MLP', choices=['MLP', 'resnet18'], help='architecture of model')
parser.add_argument('--L', type=int, default= 2, help='depth of MLP')
parser.add_argument('--p', type=int, default= 1000, help='width of MLP')
parser.add_argument('--prior', type=str, default= 'Cauchy', choices=['Normal', 'Cauchy'], help='type of prior')
parser.add_argument('--prior_scale', type=float, default= 1.0, help='scale of prior')
########################## basic setting ##########################
parser.add_argument('--start_seed', type=int, help='start_seed')
parser.add_argument('--end_seed', type=int, help='end_seed')
parser.add_argument('--gpu', default=0, type=int, help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--data_dir', default='', help='Directory of dataset')
parser.add_argument('--out', default='', help='Directory to output the result')
######################### Dataset setting #############################
parser.add_argument('--dataset', type=str, default= 'Boston', choices=['Boston', 'Concrete', 'Energy', 'Yacht'], help='dataset name')
parser.add_argument('--batch_size', default=-1, type=int, help='train batchsize')
######################### MCMC setting #############################
parser.add_argument('--num_sample', default=20, type=int, help='the number of MCMC sample')
parser.add_argument('--burn_in_sample', default=2, type=int, help='the number of MCMC burn_in sample')
parser.add_argument('--thinning_interval', default=200, type=int, help='thinning_interval epoch')
parser.add_argument('--lr', default=1e-2, type=float, help='initial learning rate')
parser.add_argument('--lf_step', default=20, type=int, help='the number of leapfrog step')
######################### Mask setting ###############################
parser.add_argument('--update_period', default=1, type=int, help='period of mask update')
parser.add_argument('--num_update', default=10, type=int, help='number of times to update at a time')
parser.add_argument('--lamb', default=0.1, type=float, help='hyperparameter for the prior of sparsity')
parser.add_argument('--N_max', default=3, type=int, help='maximum of the number of updated mask')
parser.add_argument('--death_method', type=str, default= 'proposed', choices=["proposed", "Oops", "random"])
parser.add_argument('--birth_method', type=str, default= 'random', choices=["proposed", "Oops", "random"])
######################### add name #############################
parser.add_argument('--add_name', default='', type=str, help='add_name')
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
def main():
out_directory = args.out + '/MBNN' + '/' + str(args.dataset)
out_directory += '/' + str(date.today().strftime('%Y%m%d')[2:])
out_directory += '/' + '_lam' + str(args.lamb)
out_directory += '_bm_' + args.birth_method + '_dm_' + args.death_method
if args.add_name != '':
out_directory +='_'+str(args.add_name)
if not os.path.isdir(out_directory):
mkdir_p(out_directory)
result1_list = []
result2_list = []
result3_list = []
result4_list = []
result5_list = []
for seed in range(args.start_seed, args.end_seed+1):
print("seed : ", seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic=True
#dataset
trainloader, testloader, n_train, n_test, p_data, num_classes = data_process(args.dataset, args.data_dir, seed, args.batch_size)
std = torch.std(next(iter(trainloader))[1])
model = M_MLP(p_data, num_classes, args.L, args.p, args.prior, args.prior_scale).cuda()
#MCMC
step_size = args.lr/n_train
total_epoch = args.thinning_interval*(args.burn_in_sample+args.num_sample)
burn_in_epoch = args.thinning_interval*args.burn_in_sample
mt=0
bar = Bar('{:>10}'.format('Training'), max=total_epoch)
step_size_tuning = DualAveragingStepSize(initial_step_size=step_size)
model_tmp = copy.deepcopy(model)
res = pd.DataFrame()
for epoch in range(total_epoch):
try:
p_accept = HMC(model, task, trainloader, lr=step_size, lf_step = args.lf_step)
if epoch < burn_in_epoch:
step_size, _ = step_size_tuning.update(p_accept)
if epoch == burn_in_epoch:
_, step_size = step_size_tuning.update(p_accept)
except:
model = copy.deepcopy(model_tmp)
model_tmp = copy.deepcopy(model)
if (epoch+1) % args.update_period ==0:
model.eval()
for _ in range(args.num_update):
mask_update_dataloader(model, task, trainloader, n_train, args.lamb, args.N_max, args.death_method, args.birth_method)
sigma_update(model, trainloader, n_train, std)
if (epoch+1)%(20)==0:
model.eval()
with torch.no_grad():
_,ERROR_train = log_likelihood_dataloader(model, task, trainloader)
_,ERROR_test = log_likelihood_dataloader(model, task, testloader)
ERROR_train, ERROR_test = np.sqrt(ERROR_train.item()/n_train), np.sqrt(ERROR_test.item()/n_test)
node_sparsity_l = []
l=0
for n, p in model.named_parameters():
if 'active' in n:
node_sparsity_l.append(torch.sum(p.data).item())
l+=1
node_sparsity = np.mean(np.stack(node_sparsity_l))
bar.suffix = '({epo}/{total_epo}) ERR_train:{ER_tr} | ERR_test:{ER_te} | {ns}'.format(
epo=epoch + 1,
total_epo=total_epoch,
ER_tr=np.round(ERROR_train,3),
ER_te=np.round(ERROR_test,3),
ns = np.round(node_sparsity,2)
)
res.loc[epoch,'n1'] = node_sparsity_l[0]
res.loc[epoch,'n2'] = node_sparsity_l[1]
res.loc[epoch,'n_r'] = (node_sparsity_l[0] + node_sparsity_l[1])/(2*args.p)
res.to_csv(out_directory + '/node_sparsity_seed_%d.csv'%(seed,))
if (epoch + 1) > burn_in_epoch and (epoch+1-burn_in_epoch) % args.thinning_interval == 0:
torch.save(model.state_dict(), out_directory + '/seed_%d_mt_%d.pt'%(seed, mt))
mt += 1
bar.next()
bar.finish()
print("model testing")
pred_list=[]
target_list=[]
sigma_list=[]
with torch.no_grad():
for mt in range(args.num_sample):
model = M_MLP(p_data, num_classes, args.L, args.p, args.prior, args.prior_scale).cuda()
model.load_state_dict(torch.load(out_directory + '/seed_%d_mt_%d.pt'%(seed,mt)))
model.eval()
pred = []
for batch_idx, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
pred.append(outputs.squeeze())
if mt==0:
target_list.append(targets.squeeze())
sigma_list.append(model.sigma.data)
pred_list.append(torch.cat(pred,0))
pred_list = torch.stack(pred_list)
target_list = torch.cat(target_list,0)
sigma_list = torch.cat(sigma_list,0)
RMSE, m_NLL, CRPS = evaluate_averaged_model_regression(pred_list, target_list, sigma_list)
print("RMSE : ", RMSE, " m_NLL : ", m_NLL, " CRPS : ", CRPS)
result1_list.append(RMSE)
result2_list.append(m_NLL)
result3_list.append(CRPS)
num_seed = args.end_seed - args.start_seed
result1_list, result2_list, result3_list = np.stack(result1_list), np.stack(result2_list), np.stack(result3_list)
print("%.3f(%.3f), %.3f(%.3f), %.3f(%.3f)" % (np.mean(result1_list), np.std(result1_list)/np.sqrt(num_seed),np.mean(result2_list), np.std(result2_list)/np.sqrt(num_seed),np.mean(result3_list), np.std(result3_list)/np.sqrt(num_seed)))
if __name__ == '__main__':
main()
| 9,295 | 43.908213 | 251 |
py
|
mBNN
|
mBNN-main/MBNN/Kalman_Filter.py
|
# Reference : https://github.com/ChadFulton/tsa-notebooks/blob/master/code_state_space.ipynb
import numpy as np
def kalman_filter(y, Z, H, T, Q, a_0, P_0):
# Dimensions
k_endog, nobs = y.shape
k_states = T.shape[0]
# Allocate memory for variabless
filtered_state = np.zeros((k_states, nobs))
filtered_state_cov = np.zeros((k_states, k_states, nobs))
predicted_state = np.zeros((k_states, nobs+1))
predicted_state_cov = np.zeros((k_states, k_states, nobs+1))
forecast = np.zeros((k_endog, nobs))
forecast_error = np.zeros((k_endog, nobs))
forecast_error_cov = np.zeros((k_endog, k_endog, nobs))
loglikelihood = np.zeros((nobs+1,))
# Copy initial values to predicted
predicted_state[:, 0] = a_0
predicted_state_cov[:, :, 0] = P_0
# Kalman filter iterations
for t in range(nobs):
# Forecast for time t
forecast[:, t] = np.dot(Z, predicted_state[:, t])
# Forecast error for time t
forecast_error[:, t] = y[:, t] - forecast[:, t]
# Forecast error covariance matrix and inverse for time t
tmp1 = np.dot(predicted_state_cov[:, :, t], Z.T)
forecast_error_cov[:, :, t] = (
np.dot(Z, tmp1) + H
)
forecast_error_cov_inv = np.linalg.inv(forecast_error_cov[:, :, t])
determinant = np.linalg.det(forecast_error_cov[:, :, t])
# Filtered state for time t
tmp2 = np.dot(forecast_error_cov_inv, forecast_error[:,t])
filtered_state[:, t] = (
predicted_state[:, t] +
np.dot(tmp1, tmp2)
)
# Filtered state covariance for time t
tmp3 = np.dot(forecast_error_cov_inv, Z)
filtered_state_cov[:, :, t] = (
predicted_state_cov[:, :, t] -
np.dot(
np.dot(tmp1, tmp3),
predicted_state_cov[:, :, t]
)
)
# Loglikelihood
loglikelihood[t] = -0.5 * (
np.log((2*np.pi)**k_endog * determinant) +
np.dot(forecast_error[:, t], tmp2)
)
# Predicted state for time t+1
predicted_state[:, t+1] = np.dot(T, filtered_state[:, t])
# Predicted state covariance matrix for time t+1
tmp4 = np.dot(T, filtered_state_cov[:, :, t])
predicted_state_cov[:, :, t+1] = np.dot(tmp4, T.T) + Q
predicted_state_cov[:, :, t+1] = (
predicted_state_cov[:, :, t+1] + predicted_state_cov[:, :, t+1].T
) / 2
return (
filtered_state, filtered_state_cov,
predicted_state, predicted_state_cov,
forecast, forecast_error, forecast_error_cov,
loglikelihood
)
| 2,681 | 32.949367 | 93 |
py
|
mBNN
|
mBNN-main/MBNN/models.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class M_relu(nn.Module):
def __init__(self, input_dim, init_active_dim):
super().__init__()
self.input_dim = input_dim
self.init_active_dim = init_active_dim
self.active = nn.Parameter(torch.cuda.FloatTensor([1]*self.init_active_dim +
[0]*(self.input_dim-self.init_active_dim)), requires_grad=False)
def forward(self, x):
if len(x.shape)==2:
M = self.active.view(1,-1)
return M * F.relu(x)
elif len(x.shape)==4:
M = self.active.view(1,-1,1,1)
return M * F.relu(x)
class M_MLP(nn.Module):
def __init__(self, input_dim, output_dim, L, init_p, prior, prior_scale):
super(M_MLP, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.L = L
self.p = np.floor(1.1*init_p).astype(int) # For additional space
self.p_vec = np.hstack([input_dim,np.repeat(self.p,L),output_dim])
self.init_active_dim = init_p
self.layers = self._make_layer()
self.register_buffer('sigma', torch.tensor([1.0]).cuda())
if prior == 'Cauchy':
self.prior = torch.distributions.cauchy.Cauchy(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
elif prior == 'Normal':
self.prior = torch.distributions.normal.Normal(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
self.prior_scale = prior_scale
def _make_layer(self):
layers = []
for l in range(self.L):
layer = []
layer.append(nn.Linear(self.p_vec[l], self.p_vec[l+1]))
layer.append(M_relu(self.p_vec[l+1], self.init_active_dim))
layers.append(nn.Sequential(*layer))
layer = []
layer.append(nn.Linear(self.p, self.output_dim))
layers.append(nn.Sequential(*layer))
return nn.Sequential(*layers)
def forward(self, x):
x = x.view(-1, self.input_dim)
x = self.layers(x)
return x
class Masked_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(Masked_BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.M_relu1 = M_relu(planes,int(planes*4/5))
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.M_relu2 = M_relu(planes,int(planes*4/5))
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = self.M_relu1(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = self.M_relu2(out)
return out
class Masked_ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes, prior, prior_scale):
super(Masked_ResNet, self).__init__()
self.in_planes = int(64*(5/4))
self.conv1 = nn.Conv2d(3, int(64*(5/4)), kernel_size=3, stride=1, padding=1, bias=False) # For additional space
self.bn1 = nn.BatchNorm2d(int(64*(5/4)))
self.M_relu = M_relu(int(64*(5/4)),64)
self.layer1 = self._make_layer(block, int(64*(5/4)), num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, int(128*(5/4)), num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, int(256*(5/4)), num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, int(512*(5/4)), num_blocks[3], stride=2)
self.linear = nn.Linear(int(512*(5/4))*block.expansion, num_classes)
if prior == 'Cauchy':
self.prior = torch.distributions.cauchy.Cauchy(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
elif prior == 'Normal':
self.prior = torch.distributions.normal.Normal(torch.tensor([0.0]).cuda(), torch.tensor([prior_scale]).cuda())
self.prior_scale = prior_scale
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.M_relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def M_ResNet18(num_classes, prior, prior_scale):
return Masked_ResNet(Masked_BasicBlock, [2,2,2,2], num_classes=num_classes, prior=prior, prior_scale=prior_scale)
| 5,411 | 41.28125 | 134 |
py
|
mBNN
|
mBNN-main/MBNN/MCMC.py
|
import numpy as np
import torch
import copy
def ll_regression(output, target, sigma):
exponent = -((target - output)**2).sum() / (2 * sigma**2)
log_coeff = (-0.5*torch.log(2*torch.tensor(np.pi))-torch.log(sigma))*output.shape[0]
return (log_coeff + exponent)
def log_prior(model):
l_prior = torch.zeros(1, requires_grad=True).cuda()
for n, p in model.named_parameters():
if (not 'bn' in n) and (not 'active' in n) and (not 'sigma' in n) :
l_prior = l_prior + model.prior.log_prob(p).sum()
return l_prior
def log_likelihood_dataloader(model, task, dataloader):
l_likelihood = torch.zeros(1, requires_grad=True).cuda()
ERROR = torch.zeros(1, requires_grad=True).cuda()
if task is 'regression':
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
l_likelihood = l_likelihood + ll_regression(outputs, targets, model.sigma)
ERROR += ((targets - outputs)**2).sum()
elif task is 'classification':
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda().squeeze().long()
outputs = model(inputs)
l_likelihood = l_likelihood - criterion(outputs, targets)
ERROR += (torch.argmax(outputs,1) != targets).sum()
return l_likelihood, ERROR
def log_prob_dataloader(model, task, dataloader):
return log_prior(model) + log_likelihood_dataloader(model, task, dataloader)[0]
def log_likelihood_data(model, task, inputs, targets):
if task is 'regression':
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
l_likelihood = ll_regression(outputs, targets, model.sigma)
ERROR = ((targets - outputs)**2).sum()
elif task is 'classification':
criterion = torch.nn.CrossEntropyLoss(reduction='sum')
inputs, targets = inputs.cuda(), targets.cuda().squeeze().long()
outputs = model(inputs)
l_likelihood = - criterion(outputs, targets)
ERROR = (torch.argmax(outputs,1) != targets).sum()
return l_likelihood, ERROR
def log_prob_data(model, task, inputs, targets, datasize):
return log_prior(model) + log_likelihood_data(model, task, inputs, targets)[0]*(datasize/targets.shape[0])
def HMC(model, task, dataloader, lr, lf_step):
model_tmp = copy.deepcopy(model).cuda()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
if not hasattr(p, 'momentum'):
setattr(p, 'momentum', torch.zeros_like(p.data))
p.momentum = torch.randn(p.size()).cuda()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
ham = -log_prob.data
for n, p in model_tmp.named_parameters():
if p.requires_grad:
ham += (p.momentum * p.momentum).sum()/2
model_tmp.zero_grad()
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data/2
p.data = p.data + lr * p.momentum
for step in range(lf_step-1):
model_tmp.zero_grad()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data
p.data = p.data + lr * p.momentum
model_tmp.zero_grad()
log_prob = log_prob_dataloader(model_tmp, task, dataloader)
log_prob.backward()
for n, p in model_tmp.named_parameters():
if p.requires_grad:
p.momentum += lr*p.grad.data/2
ham_tmp = -log_prob.data
for n, p in model_tmp.named_parameters():
if p.requires_grad:
ham_tmp += (p.momentum * p.momentum).sum()/2
if np.isnan((-ham_tmp + ham).item()):
return 0
log_p_accept = min(0., float(-ham_tmp + ham))
if log_p_accept >= torch.log(torch.rand(1)):
model.load_state_dict(copy.deepcopy(model_tmp.state_dict()))
return np.exp(log_p_accept)
class DualAveragingStepSize:
def __init__(self, initial_step_size, target_accept=0.7, gamma=0.2, t0=10.0, kappa=0.75):
self.mu = np.log(initial_step_size)
self.target_accept = target_accept
self.gamma = gamma
self.t = t0
self.kappa = kappa
self.error_sum = 0
self.log_averaged_step = 0
def update(self, p_accept):
self.error_sum += self.target_accept - p_accept
log_step = self.mu - self.error_sum / (np.sqrt(self.t) * self.gamma)
eta = self.t ** -self.kappa
self.log_averaged_step = eta * log_step + (1 - eta) * self.log_averaged_step
self.t += 1
return np.exp(log_step), np.exp(self.log_averaged_step)
def sigma_update(model, dataloader, n, std=1.0):
ERROR = torch.zeros(1, requires_grad=True).cuda()
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
ERROR = ERROR + ((targets - outputs)**2).sum()
gamma_sampler = torch.distributions.gamma.Gamma(torch.tensor([1.0 + n/2]).cuda(), ERROR/2 + std)
model.sigma = torch.sqrt(1/gamma_sampler.sample())
def noise_loss(model, lr):
noise_loss = 0.0
noise_std = (2/lr)**0.5
for var in model.parameters():
means = torch.cuda.FloatTensor(var.size()).fill_(0)
noise_loss += torch.sum(var * torch.normal(means, std = noise_std).cuda())
return noise_loss
| 6,201 | 37.04908 | 110 |
py
|
mBNN
|
mBNN-main/MBNN/Mask_Update.py
|
import copy
import numpy as np
import torch
from itertools import permutations
from MCMC import *
def mask_update_dataloader(model, task, dataloader, datasize, lam=0.1, N_max=3, death_method="proposed", birth_method="random"):
model.zero_grad()
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = True
log_prob = log_prob_dataloader(model, task, dataloader)
log_prob.backward()
u = torch.bernoulli(torch.tensor(0.5))
N = np.random.randint(1,N_max+1)
if u==1:
proposal_prob = proposal_probability(model, u, method=death_method)
else:
proposal_prob = proposal_probability(model, u, method=birth_method)
ind = torch.multinomial(proposal_prob, N, replacement=False)
active_vectors, active_vectors_size, active_vectors_item, _, current_p = active_info(model)
L = len(active_vectors_size)
ind_L = [(np.cumsum(active_vectors_size)<=i).sum() for i in ind.cpu().numpy()]
ind_p = ind.cpu().numpy() - np.insert(np.cumsum(active_vectors_size),0,0)[ind_L]
nums = np.bincount(ind_L, minlength=L)
prior_rate = 1
for l in range(L):
if nums[l]>0:
prior_rate *= torch.exp(-(lam*np.log(datasize))**5 *((2-4*u)*current_p[l]*nums[l] + nums[l]**2))
if u==0:
prior_rate *= torch.prod(torch.arange(current_p[l]+1,current_p[l]+nums[l]+1)) / torch.prod(torch.arange(active_vectors_size[l]-current_p[l]-nums[l]+1,active_vectors_size[l]-current_p[l]+1))
else:
prior_rate *= torch.prod(torch.arange(active_vectors_size[l]-current_p[l]+1,active_vectors_size[l]-current_p[l]+nums[l]+1)) / torch.prod(torch.arange(current_p[l]-nums[l]+1,current_p[l]+1))
model_tmp = copy.deepcopy(model)
active_vectors_tmp = []
for n, p in model_tmp.named_parameters():
if 'active' in n:
active_vectors_tmp.append(p)
for k in range(len(ind_L)):
active_vectors_tmp[ind_L[k]][ind_p[k]].data += (1-2*u)
log_prob_tmp = log_prob_dataloader(model_tmp, task, dataloader)
log_prob_tmp.backward()
if (1-u)==1:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=death_method)
else:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=birth_method)
accept_prob = torch.clamp(prior_rate*torch.exp(log_prob_tmp-log_prob)*prob_multi_wor(proposal_prob_tmp, ind)/prob_multi_wor(proposal_prob, ind), max=1)
if torch.rand(1).cuda()<accept_prob:
for k in range(len(ind_L)):
if active_vectors[ind_L[k]].sum()>2*N_max:
active_vectors[ind_L[k]][ind_p[k]].data += (1-2*u)
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = False
def mask_update_data(model, task, inputs, targets, datasize, lam=0.1, N_max=3, death_method="proposed", birth_method="random"):
model.zero_grad()
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = True
log_prob = log_prob_data(model, task, inputs, targets, datasize)
log_prob.backward()
u = torch.bernoulli(torch.tensor(0.5))
N = np.random.randint(1,N_max+1)
if u==1:
proposal_prob = proposal_probability(model, u, method=death_method)
else:
proposal_prob = proposal_probability(model, u, method=birth_method)
ind = torch.multinomial(proposal_prob, N, replacement=False)
active_vectors, active_vectors_size, active_vectors_item, _, current_p = active_info(model)
L = len(active_vectors_size)
ind_L = [(np.cumsum(active_vectors_size)<=i).sum() for i in ind.cpu().numpy()]
ind_p = ind.cpu().numpy() - np.insert(np.cumsum(active_vectors_size),0,0)[ind_L]
nums = np.bincount(ind_L, minlength=L)
prior_rate = 1
for l in range(L):
if nums[l]>0:
prior_rate *= torch.exp(-(lam*np.log(datasize))**5 *((2-4*u)*current_p[l]*nums[l] + nums[l]**2))
if u==0:
prior_rate *= torch.prod(torch.arange(current_p[l]+1,current_p[l]+nums[l]+1)) / torch.prod(torch.arange(active_vectors_size[l]-current_p[l]-nums[l]+1,active_vectors_size[l]-current_p[l]+1))
else:
prior_rate *= torch.prod(torch.arange(active_vectors_size[l]-current_p[l]+1,active_vectors_size[l]-current_p[l]+nums[l]+1)) / torch.prod(torch.arange(current_p[l]-nums[l]+1,current_p[l]+1))
model_tmp = copy.deepcopy(model)
active_vectors_tmp = []
for n, p in model_tmp.named_parameters():
if 'active' in n:
active_vectors_tmp.append(p)
for k in range(len(ind_L)):
active_vectors_tmp[ind_L[k]][ind_p[k]].data += (1-2*u)
log_prob_tmp = log_prob_data(model_tmp, task, inputs, targets, datasize)
log_prob_tmp.backward()
if (1-u)==1:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=death_method)
else:
proposal_prob_tmp = proposal_probability(model_tmp, 1-u, method=birth_method)
accept_prob = torch.clamp(prior_rate*torch.exp(log_prob_tmp-log_prob)*prob_multi_wor(proposal_prob_tmp, ind)/prob_multi_wor(proposal_prob, ind), max=1)
if torch.rand(1).cuda()<accept_prob:
#print("jump")
for k in range(len(ind_L)):
active_vectors[ind_L[k]][ind_p[k]].data += (1-2*u)
for n, p in model.named_parameters():
if 'active' in n:
p.requires_grad = False
return accept_prob
def active_info(model):
active_vectors = []
active_vectors_size = []
active_vectors_item = []
active_vectors_grad = []
current_p = []
for n, p in model.named_parameters():
if 'active' in n:
active_vectors.append(p)
active_vectors_size.append(len(p))
active_vectors_item.append(p.data)
active_vectors_grad.append(p.grad)
current_p.append(torch.sum(p).data)
active_vectors_item = torch.hstack(active_vectors_item)
active_vectors_grad = torch.hstack(active_vectors_grad)
return(active_vectors, active_vectors_size, active_vectors_item, active_vectors_grad, current_p)
def prob_multi_wor(proposal_prob, ind):
sum=0
for ind_permute in permutations(ind):
subtract = 0
prod = 1
proposal_prob_tmp = copy.deepcopy(proposal_prob)
for k in range(len(ind_permute)):
prod *= proposal_prob_tmp[ind_permute[k]]/torch.sum(proposal_prob_tmp)
proposal_prob_tmp[ind_permute[k]] *= 0
sum += prod
return sum
def proposal_probability(model, u, method="random"):
_, _, active_vectors_item, active_vectors_grad, _ = active_info(model)
if method=='Oops':
proposal_prob = torch.where(active_vectors_item==u, torch.clamp(torch.exp(-(2*active_vectors_item-1)*active_vectors_grad/2), max=1e35), torch.tensor([0.]).cuda())
elif method=='proposed':
proposal_prob = torch.where(active_vectors_item==u, torch.exp(-torch.abs(active_vectors_grad)/2), torch.tensor([0.]).cuda())
elif method=='random':
proposal_prob = torch.where(active_vectors_item==u, torch.tensor([1.]).cuda(), torch.tensor([0.]).cuda())
proposal_prob = proposal_prob / torch.sum(proposal_prob)
return proposal_prob
| 7,489 | 40.381215 | 205 |
py
|
LTL-GATA
|
LTL-GATA-main/src/main.py
|
from argparse import ArgumentParser, Namespace
from copy import deepcopy
from gutils import init_logger
from args import add_args
from train import train
from test import test
import utils
parser = ArgumentParser()
add_args(parser)
logger = None
def main(args: Namespace):
config = utils.load_config(args.config, args.params)
logger = init_logger(log_file=config.io.root / args.logs,
log_level=args.log_level,
name=config.io.tag)
if args.pretrain:
logger.info(f"Running pretraining for {config.io.tag}")
config_pretrain = deepcopy(config)
train_args = vars(config_pretrain.train)
pretrain_args = vars(config_pretrain.pretrain)
for elem, val in pretrain_args.items():
if elem in train_args:
train_args[elem] = val
train(config=config_pretrain, pretrain=True)
if args.train:
logger.info("Running training")
logger.info(f"Experiment tag: {config.io.tag}")
logger.info(f"Output Dir: {config.io.output_dir}")
logger.info(f"Checkpoint Dir: {config.io.checkpoint_dir}")
logger.info(f"Trajectories Dir: {config.io.trajectories_dir}")
train(config)
if args.test:
logger.info(f"Running testing for {config.io.tag}")
test(config)
if not args.test and not args.train:
logger.warning("Unknown command. Use either '--test' or '--train'")
if __name__ == "__main__":
args = parser.parse_args()
main(args)
| 1,529 | 29.6 | 75 |
py
|
LTL-GATA
|
LTL-GATA-main/src/test.py
|
from argparse import Namespace
import logging
import csv
from utils import save_trajectories
from evaluate import run as evaluate
from env import get_game_env
from agent import Agent
logger = logging.getLogger()
def test(config: Namespace):
# np.random.seed(config.training.random_seed)
# make game environments
gr = config.training.graph_reward_lambda > 0 \
and config.training.graph_reward_filtered
requested_infos = Agent.env_infos(light=False,
win_formulas=config.model.use_ltl or gr)
env, num_game = get_game_env(
game=config.test.game,
data_dir=config.io.data_dir,
vocab_dir=config.io.vocab_dir,
difficulty_level=config.test.difficulty_level,
requested_infos=requested_infos,
max_episode_steps=config.test.steps_per_episode,
batch_size=config.test.batch_size,
split='test',
real_valued_graph=config.graph_updater.real_valued,
randomized_nouns_verbs=False,
prune=config.training.prune_actions,
strip_instructions=config.training.strip_instructions)
agent = Agent(config=config,
word_vocab=env.word_vocab,
ltl_vocab=env.ltl_vocab,
node_vocab=env.node_vocab,
relation_vocab=env.relation_vocab,
action_vocab=env.action_vocab,
pretrain=False,)
agent.load_model(config.test.filename)
logger.info(f"Loaded model from {config.test.filename}")
test_game_points, test_game_points_normalized, test_reward, \
test_normalized_rewards, test_game_steps, \
test_success, test_trajectories = evaluate(env, agent, num_game,
config.test, test=True)
logger.info(f"Saving results to {config.io.output_dir}")
# results_data = {'config': serialize_namespace(deepcopy(config))}
data = {
"test_game_points": str(test_game_points),
"test_normalized_game_points": str(test_game_points_normalized),
"test_rewards": str(test_reward),
"test_normalized_rewards": str(test_normalized_rewards),
"test_game_steps": str(test_game_steps),
"test_success": str(test_success),
}
results_file = config.io.output_dir / 'test_results.csv'
with results_file.open('w') as f:
writer = csv.writer(f)
writer.writerow(data.keys())
writer.writerow(data.values())
save_trajectories(test_trajectories,
config.io.trajectories_dir / 'test_trajectories.pkl',)
| 2,595 | 37.746269 | 78 |
py
|
LTL-GATA
|
LTL-GATA-main/src/tree.py
|
class Tree:
def __init__(self,) -> None:
...
class TreeNode:
def __init__(self, value, parent=None, children=None, mask=None):
self.value = value
self.parent = parent
self.children = children if children else []
self.mask = mask if mask is not None else [1.0] * 81
self._positional_encoding = None
if self.parent is None:
self.branch = 0
else:
if self not in self.parent.children:
self.parent.children.append(self)
self.branch = self.parent.children.index(self)
def num_children(self):
return len(self.children)
def size(self):
return 1 + sum([child.size() for child in self.children])
def depth(self):
if self.is_leaf():
return 0
return 1 + max([child.depth() for child in self.children])
def height(self):
if self.parent is None:
return 0
return 1 + self.parent.height()
def width(self):
return max([self.num_children()] + [child.width() for
child in self.children])
def is_leaf(self):
return self.num_children() == 0
def is_first_child(self):
return self.branch == 0
def is_last_child(self):
return self.branch == self.parent.num_children() - 1 if\
self.parent else True
def get_positional_encoding(self):
if self._positional_encoding is None:
if self.parent:
self._positional_encoding = [
0.0 for _ in range(self.parent.num_children())]
self._positional_encoding[self.branch] = 1.0
self._positional_encoding += \
self.parent.get_positional_encoding()
else:
self._positional_encoding = []
return self._positional_encoding
def get_padded_positional_encoding(self, max_pos_len):
padded = [x for x in self.get_positional_encoding()]
while len(padded) < max_pos_len:
padded.append(0.0)
padded = padded[: max_pos_len]
return padded
def is_isomorphic(self, arg, struct_only=False):
if (struct_only or self.value == arg.value) and \
self.num_children() == arg.num_children():
for i in range(len(self.children)):
if not self.children[i].is_isomorphic(arg.children[i],
struct_only):
return False
return True
return False
def prefix_traversal(self):
def _prefix(node):
yield node
for child in node.children:
yield from _prefix(child)
yield from _prefix(self)
def postfix_traversal(self):
def _postfix(node):
for child in node.children:
yield from _postfix(child)
yield node
yield from _postfix(self)
def depth_first_traversal(self):
yield from self.prefix_traversal()
def breadth_first_traversal(self):
unresolved = [self]
while unresolved:
yield unresolved[0]
unresolved += unresolved[0].children
del unresolved[0]
def choose_traversal(self, str_):
str_to_traversal = {
"prefix": self.prefix_traversal,
"postfix": self.postfix_traversal,
"depth_first": self.depth_first_traversal,
"breadth_first": self.breadth_first_traversal
}
yield from str_to_traversal[str_]()
def convert_to_sequence(self, traversal, separator=' '):
seq = ""
for node in traversal:
seq += str(node.value) + separator
return seq
def fill(self, branch_factor=2, placeholder_token='_NULL'):
fill_tree = {}
for node in self.depth_first_traversal():
value = node.value
if node.is_leaf():
value += "_0"
if node is self:
fill_tree[node] = TreeNode(value)
else:
fill_tree[node] = TreeNode(value, fill_tree[node.parent])
for node in self.depth_first_traversal():
if not node.is_leaf():
while len(fill_tree[node].children) < branch_factor:
TreeNode(placeholder_token, fill_tree[node])
return fill_tree[self]
def left_child_right_sibling(self, placeholder_token='_NULL'):
lcrs_tree = {}
for node in self.depth_first_traversal():
if node is self:
lcrs_tree[node] = TreeNode(node.value)
else:
if node.is_first_child():
lcrs_tree[node] = TreeNode(
node.value, lcrs_tree[node.parent])
if node.parent.is_last_child():
TreeNode(placeholder_token, lcrs_tree[node.parent])
else:
lcrs_tree[node] = TreeNode(
node.value, lcrs_tree[node.parent.children[
node.branch - 1]])
if node.is_leaf():
TreeNode(placeholder_token, lcrs_tree[node])
if node.is_last_child():
TreeNode(placeholder_token, lcrs_tree[node])
return lcrs_tree[self]
def inverse_left_child_right_sibling(self, placeholder_token='_NULL'):
ilcrs_tree = {}
try:
for node in self.depth_first_traversal():
if node.num_children() == 1:
TreeNode(placeholder_token, node)
for node in self.depth_first_traversal():
if node is self:
ilcrs_tree[node] = TreeNode(node.value)
elif node.value != placeholder_token:
true_first_child = node
while true_first_child.branch == 1:
true_first_child = true_first_child.parent
ilcrs_tree[node] = TreeNode(
node.value, ilcrs_tree[true_first_child.parent])
return ilcrs_tree[self]
except Exception:
return TreeNode(placeholder_token)
| 6,246 | 35.109827 | 75 |
py
|
LTL-GATA
|
LTL-GATA-main/src/evaluate.py
|
from collections import defaultdict
from argparse import Namespace
import datetime
import logging
import pdb
import numpy as np
from env.cooking import RecipeWrappedEnv
from utils import expand_trajectories
from components import AgentModes
from agent import Agent
logger = logging.getLogger()
def run(env: RecipeWrappedEnv, agent: Agent,
num_games: int, config: Namespace,
test: bool = False) -> None:
trajectories = list()
start_time = datetime.datetime.now()
achieved_game_points, achieved_reward, total_game_steps = \
list(), list(), list()
original_max_scores, games_max_scores,\
still_running_mask = list(), list(), list()
total_success = list()
for episode_no in range(0, num_games, config.batch_size):
obs, infos = env.reset()
actions = ["restart"] * config.batch_size
prev_step_dones = [0.] * config.batch_size
trajectories.extend([defaultdict(list)
for _ in range(config.batch_size)])
if test:
agent.test()
else:
agent.eval()
agent.reset_eval_states(obs, actions, infos)
previous_hidden, previous_cell = None, None
original_scores, max_scores = agent.games_max_scores(infos)
original_max_scores.extend(original_scores)
games_max_scores.extend(max_scores)
trajectories = expand_trajectories(
obs=obs, infos=infos, episode_no=episode_no,
batch_size=config.batch_size, dones=[False]*config.batch_size,
admissible_actions=[None] * config.batch_size,
rewards=[0] * config.batch_size,
step_rewards=[0] * config.batch_size,
actions=actions, trajectories=trajectories,
game_points=[0] * config.batch_size,
max_game_points=original_max_scores,
max_rewards=max_scores,
step_game_points=[0.] * config.batch_size,
states=agent.eval_states)
for step_no in range(config.steps_per_episode):
admissible_actions = infos['admissible_commands']
logger.debug("Eval acting")
if config.feed_cookbook_observation and step_no == 0:
next_actions_ind = [actions.index('examine cookbook') for
actions in admissible_actions]
actions = ["examine cookbook" for
_ in admissible_actions]
else:
next_actions_ind, previous_hidden, previous_cell = \
agent.act(states=agent.eval_states,
admissible_actions=admissible_actions,
greedy=True if np.equal(
config.eps, 0) else False,
previous_hidden=previous_hidden,
previous_cell=previous_cell,
eps=config.eps)
next_actions_ind = next_actions_ind.cpu().tolist()
actions = [candidates[i] for candidates, i in zip(
admissible_actions, next_actions_ind)]
logger.debug("Eval env step")
obs, scores, dones, infos = env.step(
actions, agent.eval_states.ltl_formulas)
# dones_wons = [done and won for done, won in
# zip(dones, infos['has_won'])]
logger.debug("Eval state update")
state_update_rewards, state_update_dones = \
agent.update_eval_states(obs, actions, dones, infos)
original_scores, scores = agent.adjust_scores(
scores, step_no, state_update_rewards, state_update_dones,
infos['won'], dones)
trajectories = expand_trajectories(
obs=obs, infos=infos, episode_no=episode_no,
states=agent.eval_states,
batch_size=config.batch_size, dones=dones, rewards=scores,
admissible_actions=admissible_actions,
actions=actions, trajectories=trajectories,
step_rewards=[0.] * config.batch_size,
game_points=original_scores,
max_game_points=original_max_scores,
max_rewards=max_scores,
step_game_points=[0.] * config.batch_size,
)
still_running = [1. - float(item) for item in prev_step_dones]
prev_step_dones = dones
still_running_mask.append(still_running)
if np.sum(still_running) == 0:
break
achieved_game_points.extend(original_scores)
achieved_reward.extend(scores)
total_success.extend([won for won in infos['won']])
total_game_steps = np.sum(still_running_mask, 0).tolist()
time_mark = datetime.datetime.now()
achieved_game_points = np.array(achieved_game_points, dtype="float32")
achieved_reward = np.array(achieved_reward, dtype="float32")
normalized_game_points = achieved_game_points / original_max_scores
normalized_rewards = achieved_reward / games_max_scores
original_max_scores = np.array(original_max_scores, dtype="float32")
games_max_scores = np.array(games_max_scores, dtype="float32")
logger.info(
f"\nEval | T: {str(time_mark - start_time).rsplit('.')[0]:s} | " +
f"normalized game points: {np.mean(normalized_game_points):2.3f} | " +
f"normalized reward: {np.mean(normalized_rewards):2.3f} | " +
f"game success: {np.mean(total_success):.3f} | " +
f"steps: {np.mean(total_game_steps):2.2f}"
)
return (np.mean(achieved_game_points),
np.mean(normalized_game_points),
np.mean(achieved_reward),
np.mean(normalized_rewards),
np.mean(total_game_steps),
np.mean(total_success), trajectories)
| 5,867 | 42.466667 | 78 |
py
|
LTL-GATA
|
LTL-GATA-main/src/args.py
|
from argparse import ArgumentParser
from datetime import datetime
from gutils.components import LogLevel
def add_args(parser: ArgumentParser) -> None:
parser.add_argument('--config', help='Config file path',
default='config.yaml')
parser.add_argument(
'--logs', help="Set output for log outputs",
default=f"logs/tod_{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}.log")
parser.add_argument('--log-level', type=LogLevel.__getitem__,
default=LogLevel.INFO,
choices=LogLevel.__members__.values(),
dest='log_level',
help="Log level.")
parser.add_argument(
"-p", "--params", nargs="+",
metavar="my.setting=value", default=[],
help="override params of the config file,"
" e.g. -p 'training.gamma=0.95'")
parser.add_argument("--pretrain", action='store_true', default=False,)
parser.add_argument("--train", action='store_true', default=False,)
parser.add_argument("--test", action='store_true', default=False,)
| 1,103 | 39.888889 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/belief_graph.py
|
from typing import List, Tuple, Set, Union
import torch
from textworld.logic import Proposition
from utils import triplet_to_proposition, proposition_to_triplet
def exists_triplet(triplets, arg1, arg2, relation):
for i, t in enumerate(triplets):
if arg1 in [t[0], "*"] and\
arg2 in [t[1], "*"] and\
relation in [t[2], "*"]:
return i
return None
def update_graph_triplets(triplets, commands, node_vocab, relation_vocab):
# remove duplicate but remain the order
tmp_commands = []
for cmd in commands:
if cmd not in tmp_commands:
tmp_commands.append(cmd)
commands = tmp_commands
for cmd in commands:
# get verb-arg1-arg2
if not (cmd.startswith("add") or cmd.startswith("delete")):
continue
cmd = cmd.split()
if len(cmd) <= 3:
continue
verb = cmd[0]
relation = cmd[-1]
if relation not in relation_vocab:
continue
nouns = " ".join(cmd[1:-1])
arg1, arg2 = "", ""
for n in node_vocab:
if nouns.startswith(n):
tmp = nouns[len(n):].strip()
if tmp == n:
continue
if tmp in node_vocab:
arg1 = n
arg2 = tmp
break
if arg1 == "" or arg2 == "":
continue
# manipulate KG
index = exists_triplet(triplets, arg1, arg2, relation)
if verb == "add":
if index is not None:
continue
triplets.append([arg1, arg2, relation])
else:
if index is None:
continue
triplets = triplets[:index] + triplets[index + 1:]
return triplets
class BeliefGraph:
def __init__(self, observation: str,
node_vocab: Set[str],
relation_vocab: Set[str],
ground_truth: bool,
seperator: str) -> None:
self._facts = set()
self._node_vocab = node_vocab
self._facts_as_triplets = list()
self._observations = observation
self._ground_truth = ground_truth
self._relation_vocab = relation_vocab
self._seperator = seperator
self.reward = 0
self.memory = set()
def to_str(self, facts) -> str:
return str(['-'.join(fact) for fact in facts].sort())
@property
def seen(self) -> bool:
return self.to_str(self._facts_as_triplets) in self.memory
def update_memory(self):
self.memory.add(self.to_str(self._facts_as_triplets))
def graph_rewards(self, prev_facts: List[Tuple[str, str, str]],
entities: List[str],
filtered: bool) -> float:
# if self._ground_truth:
# return 0
# if self.seen:
# return self.reward
if filtered:
prev_facts = set([tuple(f)
for f in prev_facts if f[0] in entities])
curr_facts = set([tuple(f) for f in self._facts_as_triplets
if f[0] in entities])
else:
prev_facts = set([tuple(f) for f in prev_facts])
curr_facts = set([tuple(f) for f in self._facts_as_triplets])
self.reward += len(curr_facts - prev_facts)
self.update_memory()
return self.reward
@property
def facts_as_triplets(self) -> Set[Tuple[str, str, str]]:
if self._ground_truth:
triplets = list()
for prop in self._facts:
triplet = proposition_to_triplet(prop)
node1, node2, relation = triplet
if node1 in self._node_vocab and node2 in \
self._node_vocab and \
relation in self._relation_vocab:
triplets.append(triplet)
return triplets
return self._facts_as_triplets
@property
def facts(self) -> Set[Proposition]:
return self._facts
def update(self, facts: Union[List[List[Tuple[Proposition]]],
List[Tuple[str, str, str]]],) -> None:
if facts is None:
return
if self._ground_truth:
# self._facts = self._facts | set(facts)
self._facts = facts
return
if isinstance(facts, torch.Tensor):
self._facts = facts
return
# per example in a batch
predict_cmds = facts.split("<sep>")
if predict_cmds[-1].endswith("<eos>"):
predict_cmds[-1] = predict_cmds[-1][:-5].strip()
else:
predict_cmds = predict_cmds[:-1]
if len(predict_cmds) == 0:
return
predict_cmds = [" ".join(item.split()) for item in predict_cmds]
predict_cmds = [item for item in predict_cmds if len(item) > 0]
self._facts_as_triplets = update_graph_triplets(
self._facts_as_triplets, predict_cmds,
self._node_vocab, self._relation_vocab)
new_facts = [triplet_to_proposition(triplet, self._seperator)
for triplet in self._facts_as_triplets]
self._facts = new_facts
| 5,241 | 33.261438 | 74 |
py
|
LTL-GATA
|
LTL-GATA-main/src/state.py
|
from __future__ import annotations
from typing import List, Tuple
from copy import deepcopy, copy
from belief_graph import BeliefGraph
from ltl import LTL
class State:
def __init__(self,
observation: str = None,
action: str = None,
ltl: LTL = None,
belief_graph: BeliefGraph = None,
past: List[State] = None) -> None:
self._ltl = ltl
self._past = list() if past is None else past
self._action = action
self._observation = observation
self._belief_graph = belief_graph
def update(self, observation: str,
action: str,
done: bool,
facts: List[Tuple[str, str, str]]) -> None:
self._past.append(
State(observation=self._observation,
ltl=deepcopy(self._ltl),
action=self._action,
belief_graph=copy(self._belief_graph),
past=self.past))
self._action = action
self._observation = observation
self._belief_graph.update(facts)
if self._ltl is not None:
ltl_reward, ltl_done = self._ltl.progress(
self._belief_graph.facts, action, done, observation)
else:
ltl_reward, ltl_done = 0, False
return ltl_reward, ltl_done
@property
def past(self) -> List[State]:
return self._past
@property
def action(self) -> str:
return self._action
@property
def observation(self) -> str:
return self._observation
@property
def ltl(self) -> LTL:
return self._ltl
@property
def belief_graph(self) -> BeliefGraph:
return self._belief_graph
class BatchedStates:
def __init__(self,
states: List[states] = None,
observations: List[str] = None,
actions: List[str] = None,
ltl_formulas: List[LTL] = None,
belief_graphs: List[BeliefGraph] = None,
action_space_size: int = None) -> None:
if states is not None:
self._states = states
elif None in [observations, actions, ltl_formulas, belief_graphs]:
raise ValueError(
"Either states must be passed or all of " +
"{observations, actions, ltl_formulas, belief_graphs}")
else:
self._states = [State(observation=obs, action=act,
ltl=ltl, belief_graph=bg) for
obs, act, ltl, bg in
zip(observations, actions,
ltl_formulas, belief_graphs)]
def __len__(self) -> int:
return len(self._states)
def update(self, observations: List[str],
actions: List[str],
facts: List[Tuple[str, str, str]],
dones: List[bool]):
state_rewards, state_dones = list(), list()
for state, obs, act, _facts, done in zip(self._states, observations,
actions, facts, dones):
state_reward, state_done = state.update(
observation=obs, action=act, done=done, facts=_facts)
state_rewards.append(state_reward)
state_dones.append(state_done)
return state_rewards, state_dones
def __getitem__(self, key: int) -> State:
return self.states[key]
@property
def states(self) -> BatchedStates:
return self._states
@property
def observations(self) -> List[str]:
return [state.observation for state in self._states]
@property
def ltl_formulas(self) -> List[LTL]:
return [state.ltl for state in self._states]
@property
def belief_graphs(self) -> List[BeliefGraph]:
return [state.belief_graph for state in self._states]
| 3,916 | 31.915966 | 76 |
py
|
LTL-GATA
|
LTL-GATA-main/src/utils.py
|
from __future__ import annotations
from typing import List, Dict, Any, Union, Tuple, Deque
from pathlib import Path, PosixPath
from argparse import Namespace
import pickle
from logic import Variable, Proposition
import numpy as np
import torch
import yaml
MISSING_WORDS = set()
CONSTANT_NAMES = {"P": "player", "I": "player",
"ingredient": None, "slot": None, "RECIPE": "cookbook"}
def proposition_to_triplet(proposition: Proposition) -> Proposition:
if len(proposition.names) == 1:
return (get_variable_name(proposition.names[0]),
proposition.name, 'is')
return (get_variable_name(proposition.names[0]),
get_variable_name(proposition.names[1]),
proposition.name)
def triplet_to_proposition(triplet: Tuple[str, str, str],
seperator: str) -> Proposition:
if triplet[-1] == 'is':
Proposition(name=triplet[1], arguments=[
Variable(triplet[0])], seperator=seperator)
return Proposition(name=triplet[2], arguments=[
Variable(triplet[0]), Variable(triplet[1])], seperator=seperator)
def rename_variables(
item: Union[Variable, Proposition]) -> Union[Variable, Proposition]:
if isinstance(item, Variable):
item.name = get_variable_name(item.name)
return item
if isinstance(item, Proposition):
new_args = list()
for var in item.arguments:
var.name = get_variable_name(var.name)
new_args.append(var)
item.arguments = tuple(new_args)
return item
raise ValueError(
f"Unknown item type {type(item)}. " +
"Must be one of {Variable, Proposition}")
def max_len(list_of_list):
if len(list_of_list) == 0:
return 0
return max(map(len, list_of_list))
def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.):
'''
Partially borrowed from Keras
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
if isinstance(sequences, np.ndarray):
return sequences
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
# pre truncating
trunc = s[-maxlen:]
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
# post padding
x[idx, :len(trunc)] = trunc
return x
def _word_to_id(word, word2id):
try:
return word2id[word]
except KeyError:
key = word + "_" + str(len(word2id))
if key not in MISSING_WORDS:
print("Warning... %s is not in vocab, vocab size is %d..." %
(word, len(word2id)))
# print(word)
# print(word2id)
# raise
MISSING_WORDS.add(key)
# with open("missing_words.txt", 'a+') as outfile:
# outfile.write(key + '\n')
# outfile.flush()
return word2id['<unk>'] # actually just 1
def _words_to_ids(words, word2id):
return [_word_to_id(word, word2id) for word in words]
def get_variable_name(name: str) -> str:
return CONSTANT_NAMES[name] if name in CONSTANT_NAMES else name
def load_config(config_file: Path, params: List[str]) -> Namespace:
assert Path(config_file).exists(), \
f"Could not find config file {config_file}"
with open(config_file) as reader:
config = yaml.safe_load(reader)
# Parse overriden params.
for param in params:
fqn_key, value = param.split("=")
entry_to_change = config
keys = fqn_key.split(".")
for k in keys[:-1]:
entry_to_change = entry_to_change[k]
entry_to_change[keys[-1]] = yaml.load(value)
# print(config)
config = Namespace(**config)
for k, v in config.__dict__.items():
if isinstance(v, dict):
config.__dict__[k] = Namespace(**v)
config.graph_updater.checkpoint = Path(
config.graph_updater.checkpoint).expanduser()
config.io.pretrained_embedding_path = Path(
config.io.pretrained_embedding_path).expanduser()
config.ltl_encoder.pretrained_embedding_path = Path(
config.ltl_encoder.pretrained_embedding_path).expanduser()
config.text_encoder.pretrained_embedding_path = Path(
config.text_encoder.pretrained_embedding_path).expanduser()
config.actions_encoder.pretrained_embedding_path = Path(
config.actions_encoder.pretrained_embedding_path).expanduser()
root = Path(config.io.root)
if root == root.expanduser():
config.io.root = Path(config_file).expanduser().parent / root
else:
config.io.root = root.expanduser()
config.io.output_dir = config.io.root / config.io.output_dir
config.io.checkpoint_dir = config.io.root / config.io.checkpoint_dir
config.io.trajectories_dir = config.io.root / config.io.trajectories_dir
config.io.output_dir.mkdir(exist_ok=True, parents=True)
config.io.checkpoint_dir.mkdir(exist_ok=True, parents=True)
config.io.trajectories_dir.mkdir(exist_ok=True, parents=True)
config.io.data_dir = Path(config.io.data_dir).expanduser()
config.io.vocab_dir = Path(config.io.vocab_dir).expanduser()
if config.test.filename is None:
if (config.io.checkpoint_dir / 'best.pt').exists():
config.test.filename = config.io.checkpoint_dir / 'best.pt'
else:
config.test.filename = config.io.checkpoint_dir / 'best_eval.pt'
else:
test_filename = Path(config.test.filename)
if test_filename == test_filename.expanduser():
config.test.filename = config.io.root / test_filename
else:
config.test.filename = test_filename.expanduser()
if config.io.tag is None:
config.io.tag = config.io.root.name
return config
def serialize_namespace(config: Namespace) -> Dict[str, Any]:
config = vars(config)
for k, v in config.items():
if isinstance(v, Namespace):
config[k] = serialize_namespace(v)
if isinstance(v, PosixPath):
config[k] = str(v)
return config
def expand_trajectories(
episode_no: int, batch_size: int,
obs: List[str], infos: Dict[str, Any],
states: BatchedStates,
trajectories: Deque[Dict[str, Any]],
**kwargs) -> List[Any]:
for i in range(batch_size):
idx = -(batch_size - i)
trajectories[idx]['states'] = states.states[i]
trajectories[idx]['observations'].append(obs[i])
trajectories[idx]['infos'] = {k: v[i] for k, v in infos.items()}
for k, v in kwargs.items():
trajectories[idx][k].append(v[i])
# trajectories[idx]['admissible_actions'].append(admissible_actions[i])
# trajectories[idx]['actions'].append(actions[i])
# trajectories[idx]['rewards'].append(scores[i])
# trajectories[idx]['step_rewards'].append(step_scores[i])
# trajectories[idx]['terminals'].append(dones[i])
return trajectories
def save_trajectories(trajectories, trajectories_file,) -> None:
with trajectories_file.open('wb') as f:
for path in trajectories:
for k, v in path.items():
if k not in {'infos', 'admissible_actions', 'states'}:
path[k] = np.array(v)
pickle.dump(trajectories, f)
def to_np(x):
if isinstance(x, np.ndarray):
return x
return x.data.cpu().numpy()
def to_pt(np_matrix, cuda=False, type='long'):
if type == 'long':
if cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.LongTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.LongTensor))
elif type == 'float':
if cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.FloatTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(
torch.FloatTensor))
def graph_triplets_to_string(list_of_triples):
list_of_triples = ["|".join(item) for item in list_of_triples]
list_of_triples.sort()
key = "<|>".join(list_of_triples)
return key
class EpisodicCountingMemory:
def __init__(self):
self.reset()
def push(self, stuff):
"""stuff is list of list of list of strings.
e.g.: [[['player', 'shed', 'at'], ['workbench', 'shed', 'at']]]
"""
assert len(stuff) > 0 # batch size should be greater than 0
if len(self.memory) == 0:
for _ in range(len(stuff)):
self.memory.append(set())
for b in range(len(stuff)):
key = graph_triplets_to_string(stuff[b])
self.memory[b].add(key)
def has_not_seen(self, stuff):
assert len(stuff) > 0 # batch size should be greater than 0
res = []
for b in range(len(stuff)):
key = graph_triplets_to_string(stuff[b])
res.append(key not in self.memory[b])
return res
def reset(self):
self.memory = []
def __len__(self):
return len(self.memory)
class LinearSchedule:
"""
Linear interpolation between initial_p and final_p over
schedule_timesteps. After this many timesteps pass final_p is
returned.
:param schedule_timesteps: (int) Number of timesteps for which to linearly
anneal initial_p to final_p
:param initial_p: (float) initial output value
:param final_p: (float) final output value
"""
def __init__(self, schedule_timesteps: int, final_p: float,
initial_p: float = 1.0) -> None:
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
self.schedule = np.linspace(initial_p, final_p, schedule_timesteps)
def value(self, step: int) -> float:
if step < 0:
return self.initial_p
if step >= self.schedule_timesteps:
return self.final_p
else:
return self.schedule[step]
| 11,080 | 34.289809 | 114 |
py
|
LTL-GATA
|
LTL-GATA-main/src/components.py
|
from __future__ import annotations
from typing import NamedTuple, List, Dict, Union
from dataclasses import dataclass
from enum import Enum
from copy import copy
import logging
import numpy as np
from state import State
Actions = List[str]
logger = logging.getLogger()
@dataclass
class ResultsCSVField:
time: str = None
episode_no: int = None
dqn_loss: float = None
train_game_points: float = None
train_normalized_game_points: float = None
train_rewards: float = None
train_normalized_rewards: float = None
train_game_rewards: float = None
train_steps: float = None
train_success: float = None
eval_game_points: float = None
eval_normalized_game_points: float = None
eval_rewards: float = None
eval_normalized_rewards: float = None
eval_steps: float = None
eval_success: float = None
def keys(self):
return list(vars(self).keys())
def values(self):
return list(vars(self).values())
class Vocabulary:
def __init__(self, vocab: List[str], name: str = 'Vocabulary',
original_only: bool = False) -> None:
self.trash = set()
self.original_tokens = copy(vocab)
if not original_only:
if '<unk>' not in vocab and '[UNK]' not in vocab:
vocab += ['<unk>']
if '<mask>' in vocab:
self.mask_token = '<mask>'
elif '[MASK]' in vocab:
self.mask_token = '[MASK]'
else:
vocab += ['<mask>']
self.mask_token = '<mask>'
if '<pad>' in vocab:
self.pad_token = '<pad>'
elif '[PAD]' in vocab:
self.pad_token = '[PAD]'
else:
vocab += ['<pad>']
self.pad_token = '<pad>'
self.name = name
self.tokens = vocab
self.tokens = list(dict.fromkeys(self.tokens))
self.map = self.build_map(vocab)
@property
def mask_token_id(self) -> int:
return self.map[self.mask_token]
@property
def pad_token_id(self) -> int:
return self.map[self.pad_token]
def build_map(self, vocab: List[str]) -> Dict[str, int]:
return {tok: i for i, tok in enumerate(vocab)}
def __add__(self, other: Union[Vocabulary, List]) -> Vocabulary:
if isinstance(other, list):
self.tokens += other
elif isinstance(other, Vocabulary):
self.tokens += other.tokens
else:
raise ValueError("Other must be of type Vocabulary or List")
self.tokens = list(dict.fromkeys(self.tokens))
self.map = self.build_map(self.tokens)
return self
def __eq__(self, other) -> bool:
return self.tokens == self.tokens and self.map == self.map
def __str__(self) -> str:
return self.name
def __len__(self) -> int:
return len(self.map)
def __iter__(self):
return iter(self.map)
def __contains__(self, tok) -> bool:
return tok in self.map
def __getitem__(self, tok) -> int:
if isinstance(tok, int) or isinstance(tok, np.int64):
return self.tokens[tok]
if tok not in self.map:
if tok not in self.trash:
logger.warning(f"Token '{tok}' not found in vocab: {self}")
self.trash.update([tok])
if '<unk>' not in self.map:
return self.map['-']
else:
return self.map['<unk>']
return self.map[tok]
class Sample(NamedTuple):
step: int
action: int
done: float
state: State
reward: float
is_final: bool
admissible_actions: Actions
class SampleMetadata(NamedTuple):
index: int
weight: float
priority: float
probability: float
class AgentModes(Enum):
eval = 0
train = 1
test = 2
| 3,874 | 25.909722 | 75 |
py
|
LTL-GATA
|
LTL-GATA-main/src/agent.py
|
from typing import Tuple, List, Dict, Any
from argparse import Namespace
from pathlib import Path
import logging
import copy
import json
import pdb
import torch.nn.functional as F
import numpy as np
import torch
from textworld import EnvInfos
from experience_replay import PrioritizedExperienceReplay
from components import AgentModes, Actions, Vocabulary
from graph_updater import GraphUpdater
from belief_graph import BeliefGraph
from utils import LinearSchedule
from optim import get_optimizer
from state import BatchedStates
from model import PolicyNet
from ltl import LTL
logger = logging.getLogger()
class Agent:
def __init__(self, config: Namespace, word_vocab: Vocabulary,
ltl_vocab: Vocabulary, relation_vocab: Vocabulary,
node_vocab: Vocabulary,
action_vocab: Vocabulary,
pretrain: bool = False) -> None:
self.ltl = config.ltl
self.training = config.training
self.evaluate = config.evaluate
self.test_config = config.test
with (config.io.data_dir / 'uuid_mapping.json').open('r') as f:
self.uuid_mapping = json.load(f)
self.set_random_seed(self.training.random_seed)
self._states: BatchedStates = None
self._eval_states: BatchedStates = None
self._word_vocab = word_vocab
self._ltl_vocab = ltl_vocab
self._relation_vocab = relation_vocab
self._node_vocab = node_vocab
self._action_vocab = action_vocab
self.graph_updater = GraphUpdater(
checkpoint=config.graph_updater.checkpoint,
vocab_path=config.io.vocab_dir,
word_vocab=self._word_vocab,
pretrained_embedding_path=(
config.io.pretrained_embedding_path),
node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
config=config.graph_updater)
# self.graph_updater.eval()
# for param in self.graph_updater.parameters():
# param.requires_grad = Tr
self.policy_net = PolicyNet(
config=config, word_vocab=self._word_vocab,
ltl_vocab=self._ltl_vocab,
action_vocab=self._action_vocab,
pretrain=pretrain,
graph_updater=self.graph_updater,
context_length=self.training.context_length)
self.target_net = PolicyNet(
config=config, word_vocab=self._word_vocab,
ltl_vocab=self._ltl_vocab,
action_vocab=self._action_vocab,
pretrain=pretrain,
graph_updater=self.graph_updater,
context_length=self.training.context_length)
self.target_net.train()
self.update_target_net()
for param in self.target_net.parameters():
param.requires_grad = False
self.replay = ep = config.training.experience_replay
self.use_belief_graph = config.model.use_belief_graph or \
config.model.use_ltl
self.use_ltl = config.model.use_ltl
self.recurrent_memory = config.model.recurrent_memory
self.experience = PrioritizedExperienceReplay(
beta=ep['beta'],
batch_size=ep['batch_size'],
multi_step=ep['multi_step'],
max_episode=config.training.max_episode,
seed=config.training.random_seed,
alpha=ep['alpha'],
capacity=ep['capacity'],
discount_gamma_game_reward=ep['discount_gamma_game_reward'],
accumulate_reward_from_final=ep['accumulate_reward_from_final'],
recurrent_memory=self.recurrent_memory,
sample_update_from=ep['sample_update_from'],
sample_history_length=ep['sample_history_length']
)
self.epsilon = self.training.epsilon_greedy['anneal_from']
self.epsilon_scheduler = LinearSchedule(
schedule_timesteps=self.training.epsilon_greedy['episodes'],
initial_p=self.epsilon,
final_p=self.training.epsilon_greedy['anneal_to'])
self.optimizer, self.scheduler = get_optimizer(
self.policy_net, config.training.optimizer)
if self.training.cuda:
self.cuda()
def save_model(self, episode_no: int, path: Path,
best_train: int, best_eval: int) -> None:
data = {
'policy_net': self.policy_net.state_dict(),
'policy_net_word_vocab': self.policy_net.word_vocab,
'policy_net_ltl_vocab': self.policy_net.ltl_vocab,
'policy_net_action_vocab': self.policy_net.action_vocab,
'episode_no': episode_no,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.optimizer.state_dict(),
'best_train': best_train,
'best_eval': best_eval,
}
torch.save(data, path)
def load_model(self, path: Path) -> None:
data = torch.load(path)
if 'policy_net' not in data:
self.policy_net.load_state_dict(data)
return 0
self.policy_net.load_vocab(data['policy_net_word_vocab'],
data['policy_net_ltl_vocab'],
data['policy_net_action_vocab'])
self.policy_net.load_state_dict(data['policy_net'])
self.target_net.load_vocab(data['policy_net_word_vocab'],
data['policy_net_ltl_vocab'],
data['policy_net_action_vocab'])
self.update_target_net()
self.optimizer.load_state_dict(data['optimizer'])
if self.scheduler is not None:
self.scheduler.load_state_dict(data['scheduler'])
if self.training.cuda:
self.cuda()
return data['episode_no'], data['best_train'], data['best_eval']
def reset_eval_states(self,
obs: List[str],
actions: List[str],
infos: Dict[str, List[Any]]) -> BatchedStates:
config = self.evaluate if self.mode == AgentModes.eval else \
self.test_config
if config.difficulty_level in {'r', 'mixed'}:
diffs = [self.uuid_mapping[game.metadata['uuid']]
if game.metadata['uuid'] in self.uuid_mapping else 11
for game in infos['game']]
else:
diffs = [config.difficulty_level for _ in range(
config.batch_size)]
self.graph_updater.prev_graph_hidden_state = \
torch.zeros(
len(obs), self.graph_updater.config.block_hidden_dim).cuda()
belief_graphs = [BeliefGraph(
o, node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
ground_truth=self.graph_updater.use_ground_truth_graph,
seperator='_') for
o in obs]
facts = self.graph_updater(
[f"{o} <sep> {a}" for o, a in zip(obs, actions)],
[bg.facts_as_triplets for bg in belief_graphs],
actions=actions,
infos=infos)
for i, facts_ in enumerate(facts):
belief_graphs[i].update(facts_)
belief_graphs[i].update_memory()
if self.use_ltl or (
self.training.graph_reward_lambda > 0
and self.training.graph_reward_filtered):
ltl_formulas = [LTL(
facts=bg.facts,
win_facts=win_facts,
fail_facts=fail_facts,
use_ground_truth=self.ltl.use_ground_truth,
reward_scale=self.ltl.reward_scale,
first_obs=first_obs,
as_bonus=self.ltl.as_bonus,
next_constrained=self.ltl.next_constrained,
difficulty=diff,
incomplete_cookbook=self.ltl.incomplete_cookbook,
single_reward=self.ltl.single_reward,
single_token_prop=self.ltl.single_token_prop,
reward_per_progression=self.training.reward_per_ltl_progression,
no_cookbook=self.ltl.no_cookbook,
negative_for_fail=self.ltl.negative_for_fail,
dont_progress=self.ltl.dont_progress)
for first_obs, bg, win_facts, fail_facts, diff in
zip(obs, belief_graphs, infos['win_facts'],
infos['fail_facts'],
diffs)]
else:
ltl_formulas = [None for _ in obs]
self._eval_states = BatchedStates(observations=obs,
actions=actions,
ltl_formulas=ltl_formulas,
belief_graphs=belief_graphs)
@ property
def eval_states(self) -> BatchedStates:
return self._eval_states
def update_eval_states(self, observations: List[str],
current_actions: List[str],
dones: List[bool],
infos: Dict[str, Any] = None) -> None:
if self.use_belief_graph:
facts = self.graph_updater(
[f"{obs} <sep> {a}" for obs, a in zip(
observations, current_actions)],
[bg.facts_as_triplets for bg in
self._eval_states.belief_graphs],
actions=current_actions,
infos=infos)
else:
facts = [None for _ in observations]
return self._eval_states.update(observations, current_actions,
facts, dones)
def reset_states(self,
obs: List[str],
actions: List[str],
infos: Dict[str, List[Any]]) -> BatchedStates:
if self.training.difficulty_level in {'r', 'mixed'}:
diffs = [self.uuid_mapping[game.metadata['uuid']]
if game.metadata['uuid'] in self.uuid_mapping else 11
for game in infos['game']]
else:
diffs = [self.training.difficulty_level for _ in range(
self.training.batch_size)]
self.graph_updater.prev_graph_hidden_state = \
torch.zeros(
len(obs), self.graph_updater.config.block_hidden_dim).cuda()
belief_graphs = [BeliefGraph(
o, node_vocab=self._node_vocab,
relation_vocab=self._relation_vocab,
ground_truth=self.graph_updater.use_ground_truth_graph,
seperator='_') for
o in obs]
facts = self.graph_updater(
[f"{o} <sep> {a}" for o, a in zip(obs, actions)],
[bg.facts_as_triplets for bg in belief_graphs],
actions=actions,
infos=infos)
for i, facts_ in enumerate(facts):
belief_graphs[i].update(facts_)
belief_graphs[i].update_memory()
if self.use_ltl or (
self.training.graph_reward_lambda > 0
and self.training.graph_reward_filtered):
ltl_formulas = [LTL(
facts=bg.facts,
win_facts=win_facts,
fail_facts=fail_facts,
use_ground_truth=self.ltl.use_ground_truth,
reward_scale=self.ltl.reward_scale,
as_bonus=self.ltl.as_bonus,
first_obs=first_obs,
next_constrained=self.ltl.next_constrained,
difficulty=diff,
incomplete_cookbook=self.ltl.incomplete_cookbook,
single_reward=self.ltl.single_reward,
single_token_prop=self.ltl.single_token_prop,
reward_per_progression=self.training.reward_per_ltl_progression,
no_cookbook=self.ltl.no_cookbook,
negative_for_fail=self.ltl.negative_for_fail,
dont_progress=self.ltl.dont_progress)
for first_obs, bg, win_facts, fail_facts, diff in
zip(obs, belief_graphs, infos['win_facts'],
infos['fail_facts'],
diffs)]
else:
ltl_formulas = [None for _ in obs]
self._states = BatchedStates(observations=obs,
actions=actions,
ltl_formulas=ltl_formulas,
belief_graphs=belief_graphs)
@ property
def states(self) -> BatchedStates:
return self._states
def update_states(self, observations: List[str],
current_actions: List[str],
dones: List[bool],
infos: Dict[str, Any] = None) -> None:
if self.use_belief_graph:
facts = self.graph_updater(
[f"{obs} <sep> {a}" for obs, a in zip(
observations, current_actions)],
[bg.facts_as_triplets for bg in self._states.belief_graphs],
actions=current_actions,
infos=infos)
else:
facts = [None for _ in observations]
return self._states.update(observations, current_actions, facts, dones)
def cuda(self) -> None:
if torch.cuda.is_available():
# torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.policy_net.cuda()
self.target_net.cuda()
self.graph_updater.cuda()
# torch.backends.cudnn.deterministic = True
else:
logger.critical("CUDA set but no CUDA device available")
raise RuntimeError("CUDA set but no CUDA device available")
def set_random_seed(self, seed: int) -> None:
np.random.seed(seed)
torch.manual_seed(seed)
if self.training.cuda:
torch.cuda.manual_seed(seed)
def update_target_net(self) -> None:
self.target_net.load_state_dict(self.policy_net.state_dict())
def eval(self) -> None:
self.policy_net.eval()
self.mode = AgentModes.eval
def test(self) -> None:
self.policy_net.eval()
self.mode = AgentModes.test
def train(self) -> None:
self.policy_net.train()
self.mode = AgentModes.train
def games_max_scores(self, infos: Dict[str, Any]) -> List[float]:
scores = [game.max_score for game in infos["game"]]
original_scores = copy.copy(scores)
score_offset = np.zeros_like(scores)
# if self.training.use_negative_reward:
# score_offset = 1
if self.training.reward_ltl and self.policy_net.use_ltl:
optimal_path_lengths = np.array([np.sum([len(tuples) > 0 for
tuples in quest])
for quest in infos['win_facts']])
# if self.training.reward_per_ltl_progression:
# # +1 for cookbook
# score_offset = 1 + optimal_path_lengths
# else:
if self.training.reward_per_ltl_progression:
score_offset += (1 + optimal_path_lengths) \
* self.ltl.reward_scale
elif self.ltl.single_reward:
score_offset = np.ones_like(scores)
else:
states = self.states if self.mode == AgentModes.train else \
self.eval_states
_len = [len(s.ltl.translator.formulas) for s in states]
score_offset = _len * self.ltl.reward_scale
if self.training.reward_ltl_only:
return (original_scores,
(np.ones_like(scores) * score_offset).tolist())
if self.training.penalize_path_length > 0:
score_offset -= self.training.penalize_path_length * \
optimal_path_lengths
return (original_scores,
(np.array(scores) + score_offset).tolist())
# TODO maybe a better way to do this?
def adjust_scores(self, scores: List[int],
step_no: int,
state_update_rewards: List[float],
state_update_dones: List[int],
wons: List[bool],
dones: List[bool]) -> List[float]:
original_scores = copy.copy(scores)
reward_ltl = self.training.reward_ltl and self.policy_net.use_ltl
if reward_ltl:
# if not self.training.reward_per_ltl_progression:
# state_update_rewards = state_update_dones
pass
# if self.training.reward_ltl_only:
# return original_scores, state_update_rewards
else:
state_update_rewards = [0] * len(scores)
score_offsets = np.array(state_update_rewards)
if self.training.reward_ltl_positive_only and reward_ltl:
score_offsets[np.where(score_offsets < 0)] = 0
if reward_ltl and self.training.reward_ltl_only:
return original_scores, score_offsets
if self.training.penalize_path_length > 0:
score_offsets -= self.training.penalize_path_length * (step_no + 1)
scores = np.array(scores)
# if self.training.use_negative_reward:
# neg_offset = (np.array(dones, dtype=float) * -1) + \
# (np.array(wons, dtype=float))
# scores[np.where(np.array(neg_offset) < 0)] = 0
# score_offsets + neg_offset
adjusted_scores = (scores + score_offsets).tolist()
if self.training.reward_ltl and self.policy_net.use_ltl and \
self.training.persistent_negative_reward:
for i, ltl_score in enumerate(state_update_rewards):
if ltl_score < 0:
adjusted_scores[i] = ltl_score
if dones[i] and not wons[i]:
adjusted_scores[i] = -1
if self.training.graph_reward_lambda > 0:
adjusted_scores += self.training.graph_reward_lambda * \
self.get_graph_rewards()
return original_scores, adjusted_scores
def get_graph_rewards(self) -> np.ndarray:
states = self.states if self.mode == AgentModes.train else \
self.eval_states
return np.array([0 if len(state.past) < 2 else
state.belief_graph.graph_rewards(
prev_facts=state.past[-1].belief_graph._facts_as_triplets,
entities=state.ltl.entities if state.ltl is not None else None,
filtered=self.training.graph_reward_filtered)
for state in states])
@staticmethod
def env_infos(light: bool, win_formulas: bool) -> EnvInfos:
request_infos = EnvInfos()
request_infos.game = True
request_infos.facts = not light
request_infos.win_facts = win_formulas
request_infos.fail_facts = win_formulas
request_infos.description = not light
request_infos.won = True
request_infos.lost = True
request_infos.admissible_commands = True
return request_infos
def choose_actions_indices(
self, actions_scores: torch.Tensor,
actions_mask: torch.Tensor, admissible_actions: List[Actions],
greedy: bool, random: bool, eps: float) -> torch.Tensor:
if greedy and random:
logging.critical(
"Asked to act greedily and randomly which is not possible")
raise ValueError(
"Asked to act greedily and randomly which is not possible")
elif greedy:
return self.choose_maxQ_actions(actions_scores, actions_mask).cpu()
elif random:
return self.choose_random_actions(admissible_actions)
batch_size = len(actions_scores)
maxQ_filtered_actions = self.choose_maxQ_actions(
actions_scores, actions_mask).cpu()
random_filtered_actions = self.choose_random_actions(
admissible_actions)
r = np.random.uniform(low=0., high=1.,
size=batch_size)
# less than selects random
if eps is None:
eps = self.epsilon
less_than_e = torch.tensor(
r <= eps, dtype=torch.int64).reshape((batch_size, 1))
# stack both options, gather based on less than epsilon
return torch.gather(torch.stack((maxQ_filtered_actions,
random_filtered_actions), dim=1),
dim=-1, index=less_than_e)
def choose_maxQ_actions(
self, actions_scores: torch.Tensor,
actions_mask: torch.Tensor) -> torch.Tensor:
actions_scores += -torch.min(
actions_scores, -1, keepdim=True)[0] + 1e-2
actions_scores *= actions_mask
if self.mode == AgentModes.test and self.test_config.softmax:
ret = torch.tensor([])
for b, mask in zip(actions_scores, actions_mask):
tmp = torch.functional.F.softmax(
b / self.test_config.softmax_temperature).detach().cpu()
tmp *= mask.detach().cpu()
options = torch.nonzero(torch.isclose(
tmp, tmp.max(), atol=1e-4)).flatten()
ret = torch.cat(
(ret, torch.tensor([np.random.choice(options)])))
return torch.tensor(ret).int()
else:
return torch.argmax(actions_scores.detach().cpu(), -1,)
def choose_random_actions(
self, admissible_actions: List[Actions]) -> torch.Tensor:
return torch.tensor([
np.random.choice(
len(candidates)) for candidates in admissible_actions],
dtype=torch.int32)
def act(self,
states: BatchedStates,
admissible_actions: List[Actions],
greedy: bool = False,
random: bool = False,
previous_hidden: torch.Tensor = None,
previous_cell: torch.Tensor = None,
eps: float = None) -> Actions:
actions_scores, actions_mask = None, None
if not random:
actions_scores, actions_mask, previous_hidden, previous_cell = \
self.policy_net(
states, admissible_actions, previous_hidden, previous_cell)
with torch.no_grad():
next_actions_indices = self.choose_actions_indices(
actions_scores=actions_scores,
actions_mask=actions_mask,
admissible_actions=admissible_actions,
greedy=greedy,
random=random,
eps=eps)
return next_actions_indices.squeeze(), previous_hidden, previous_cell
def get_loss(self, episode_no: int,) -> Tuple[float, float]:
# pdb.set_trace()
_samples, _stepped_samples, _rewards, sample_indices, _weights = \
self.experience.get_samples(
episode_no, self.recurrent_memory)
if _samples is None:
return None, None
# losses, q_values = list(), list()
sample_indices = np.array(sample_indices)
all_q_values, td_errors, dones = list(), list(), list()
losses, mlm_losses = list(), list()
previous_hidden, previous_cell = None, None
for step_no, (samples, stepped_samples, rewards, weights) in \
enumerate(zip(_samples, _stepped_samples, _rewards, _weights)):
stepped_states, stepped_admissible_actions = list(), list()
states, admissible_actions, indices = list(), list(), list()
for sample, stepped_sample in zip(samples, stepped_samples):
states.append(sample.state)
admissible_actions.append(sample.admissible_actions)
indices.append(sample.action)
stepped_states.append(stepped_sample.state)
stepped_admissible_actions.append(
stepped_sample.admissible_actions)
dones.append(sample.done)
states = BatchedStates(states=states)
stepped_states = BatchedStates(states=stepped_states)
not_dones = 1 - torch.tensor(
dones, device=self.policy_net.device, dtype=torch.int64)
actions_scores, actions_mask, previous_hidden, previous_cell = \
self.policy_net(
states, admissible_actions, previous_hidden, previous_cell)
q_values = torch.gather(
actions_scores, 1, torch.tensor(
indices, dtype=torch.int64,
device=self.policy_net.device).reshape((-1, 1)))
if self.recurrent_memory and \
step_no < self.replay['sample_update_from']:
continue
with torch.no_grad():
stepped_actions_scores, stepped_actions_mask, _, _ = \
self.policy_net(
stepped_states, stepped_admissible_actions,
previous_hidden, previous_cell)
stepped_indices = self.choose_maxQ_actions(
stepped_actions_scores, stepped_actions_mask
).to(self.policy_net.device)
stepped_indices = stepped_indices.reshape((-1, 1))
stepped_actions_scores_tgt, stepped_actions_tgt_mask, _, _ = \
self.target_net(
stepped_states, stepped_admissible_actions,
previous_hidden, previous_cell)
# stepped_actions_scores_tgt *= not_dones.unsqueeze(1)
stepped_q_values = torch.gather(
stepped_actions_scores_tgt, 1, stepped_indices)
discount = torch.tensor(
(np.ones((stepped_indices.shape[0])) *
self.replay['discount_gamma_game_reward']) **
sample_indices[:, 1],
device=self.policy_net.device, dtype=torch.float64)
# dones = torch.tensor(
# [s.is_final for s in samples], dtype=torch.float64,
# device=self.policy_net.device)
# discount *= (1 - dones)
# pdb.set_trace()
rewards = torch.tensor(rewards, device=self.policy_net.device) + \
stepped_q_values.squeeze() * (discount * not_dones)
# ** self.replay['multi_step'])
rewards = rewards.type(torch.float32)
loss = F.smooth_l1_loss(
q_values.squeeze(), rewards, reduction='none')
loss *= torch.tensor(
weights, device=self.policy_net.device, dtype=torch.float64)
losses.append(loss)
loss = self.policy_net.mlm_loss(states.observations)
mlm_losses.append(loss)
all_q_values.append(q_values)
# q_values has shape [*mod-replay-batch-size*, 1]
# rewards has shape [*mod-replay-batch-size*,
# *mod-replay-batch-size*]
abs_td_err = torch.abs(q_values.squeeze() - rewards)
td_errors.append(abs_td_err)
_range = 1
if self.recurrent_memory:
_range = self.replay['sample_history_length'] - \
self.replay['sample_update_from']
for i in range(_range):
abs_td_err = td_errors[i]
td_errors.append(abs_td_err)
new_priorities = abs_td_err + self.replay['eps']
self.experience.update_priorities(
sample_indices[:, 0] + i + (self.replay['sample_update_from']
if self.recurrent_memory else 0),
new_priorities.detach().cpu())
loss = torch.stack(losses).mean()
q_values = torch.stack(all_q_values).mean()
if any(mlm_losses):
loss = loss + self.training.mlm_alpha * \
torch.stack(mlm_losses).mean()
return loss, q_values
def update_dqn(self, episode_no: int) -> Tuple[float, float]:
loss, q_values = self.get_loss(episode_no)
if loss is None:
return None, None
self.policy_net.zero_grad()
self.optimizer.zero_grad()
loss.backward()
if np.greater(self.training.optimizer['clip_grad_norm'], 0.):
torch.nn.utils.clip_grad_norm_(
self.policy_net.parameters(),
self.training.optimizer['clip_grad_norm'])
self.optimizer.step()
return torch.mean(loss), torch.mean(q_values)
def finalize_episode(self, episode_no: int) -> None:
if (episode_no + self.training.batch_size) % \
self.training.target_net_update_frequency <= \
episode_no % self.training.target_net_update_frequency:
self.update_target_net()
if episode_no < self.training.learn_from_this_episode:
return
if episode_no < self.training.epsilon_greedy['episodes'] + \
self.training.learn_from_this_episode:
self.epsilon = self.epsilon_scheduler.value(
episode_no - self.training.learn_from_this_episode)
self.epsilon = max(self.epsilon, 0.)
if self.scheduler is not None and episode_no > \
self.training.learn_from_this_episode:
for _ in range(self.training.batch_size):
self.scheduler.step()
| 29,311 | 44.234568 | 80 |
py
|
LTL-GATA
|
LTL-GATA-main/src/segment_tree.py
|
from typing import Callable
import operator
class SegmentTree:
def __init__(self, capacity: int,
operation: Callable, neutral_element: int) -> None:
"""
Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
:param capacity: (int) Total size of the array - must be a power of two
:param operation: (lambda (Any, Any): Any) operation for combining
elements (eg. sum, max) must form a
mathematical group together with the set of possible values for
array elements (i.e. be associative)
:param neutral_element: (Any) neutral element for the operation above.
eg. float('-inf') for max and 0 for sum.
"""
assert capacity > 0 and capacity & (
capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._operation = operation
self._value = [neutral_element for _ in range(2 * capacity)]
def _reduce_helper(self, start: int, end: int, node: int,
node_start: int, node_end: int) -> float:
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1,
mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(
mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start: int = 0, end: int = None) -> float:
"""
Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1]
operation(... arr[end])))
:param start: (int) beginning of the subsequence
:param end: (int) end of the subsequences
:return: (Any) result of reducing self.operation over the specified
range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx: int, val: float) -> None:
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx: int) -> float:
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity: int):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start: int = 0, end: int = None) -> float:
"""
Returns arr[start] + ... + arr[end]
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr),
can be None for len(arr) - 1)
:return: (Any) reduction of SumSegmentTree
"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum: float) -> int:
"""
Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
:param prefixsum: (float) upperbound on the sum of array prefix
:return: (int) highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity: int):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start: int = 0, end: int = None) -> float:
"""
Returns min(arr[start], ..., arr[end])
:param start: (int) start position of the reduction (must be >= 0)
:param end: (int) end position of the reduction (must be < len(arr),
can be None for len(arr) - 1)
:return: (Any) reduction of MinSegmentTree
"""
return super(MinSegmentTree, self).reduce(start, end)
| 5,601 | 36.099338 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/experience_replay.py
|
from typing import Optional, List, Tuple
import logging
import pdb
from gutils import FixedSizeList
import numpy as np
from segment_tree import MinSegmentTree, SumSegmentTree
from utils import LinearSchedule
from components import Sample
logger = logging.getLogger()
class PrioritizedExperienceReplay:
def __init__(self,
beta: float,
batch_size: int,
multi_step: int,
max_episode: int,
seed: int = None,
alpha: float = 0.,
capacity: int = 100_000,
discount_gamma_game_reward: float = 1.,
accumulate_reward_from_final: bool = False,
recurrent_memory: bool = False,
sample_update_from: int = 0,
sample_history_length: int = 1) -> None:
self._max_priority = 1.
self.multi_step = multi_step
self.batch_size = batch_size
self.recurrent_memory = recurrent_memory
self.sample_update_from = sample_update_from
self.sample_history_length = sample_history_length
self.beta_scheduler = LinearSchedule(
schedule_timesteps=max_episode,
initial_p=beta, final_p=1.0)
self._buffer = FixedSizeList(capacity=capacity)
self._rng = np.random.RandomState(seed)
self.capacity = capacity
it_capacity = 2 ** int(np.ceil(np.log2(capacity)))
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
assert np.greater_equal(alpha, 0.)
self._alpha = alpha
self._discount_gamma_game_reward = discount_gamma_game_reward
self._accumulate_reward_from_final = accumulate_reward_from_final
def __len__(self) -> int:
return len(self._buffer)
@property
def at_capacity(self) -> bool:
return len(self._buffer) == self.capacity
@property
def buffer(self):
return self._buffer
def update_priorities(self, indices: List[int],
priorities: List[float]) -> bool:
for idx, priority in zip(indices, priorities):
if np.greater(priority, 0.):
assert 0 <= idx < len(self)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
else:
logger.error(f"Something wrong with priority: {priority}")
return False
return True
# TODO improve efficiency
def avg_rewards(self):
if len(self) == 0:
return 0.
return np.mean([sample.reward for sample in self._buffer
if sample is not None])
def add(self, sample: Sample) -> None:
self._buffer.append(sample)
idx = len(self) - 1
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def get_next_final_idx(self, idx: int) -> Optional[int]:
for i, sample in enumerate(self._buffer[idx:]):
if sample.is_final:
return i + idx
return None
def _sample_proportional(self) -> List[int]:
return [self._it_sum.find_prefixsum_idx(
self._rng.random() * self._it_sum.sum(
0, len(self) - 1)) for _ in range(self.batch_size)]
def get_samples_and_stepped(self, idx: int, n: int,
recurrent_memory: bool) -> List[Sample]:
assert n > 0
# if n == 1:
# if self._buffer[idx].is_final:
# return tuple([None for _ in range(3)])
# else:
# if np.any([item.is_final for item in self._buffer[idx: idx + n]]):
# return tuple([None for _ in range(3)])
next_final_idx = self.get_next_final_idx(idx)
if next_final_idx is None or idx + n > next_final_idx:
# n = idx - next_final_idx
return tuple([None for _ in range(3)])
samples, stepped_samples, rewards = list(), list(), list()
iteration_count = 1
if recurrent_memory:
iteration_count = n
n = 1
for j in range(iteration_count):
# n + 1 or just n?
length = next_final_idx - (idx + j) + 1 if \
self._accumulate_reward_from_final else n if not \
recurrent_memory else 1
sample = self._buffer[idx + j]
stepped_sample = self._buffer[idx + n + j]
_rewards = [self._discount_gamma_game_reward ** i *
self._buffer[idx + j + i].reward for
i in range(length)]
reward = np.sum(_rewards)
samples.append(sample)
stepped_samples.append(stepped_sample)
rewards.append(reward)
return samples, stepped_samples, rewards
def get_samples(self, episode_no: int,
recurrent_memory: bool = False
) -> Tuple[List[Sample],
List[Sample],
List[float],
List[Tuple[int, int]],
List[float]]:
logger.debug("Getting samples from ER")
if len(self) < self.batch_size:
return tuple([None for _ in range(5)])
beta = self.beta_scheduler.value(episode_no)
assert np.greater(beta, 0.)
idxs = self._sample_proportional()
ns = self._rng.randint(1, self.multi_step + 1, size=self.batch_size)
all_samples, all_stepped_samples, all_rewards, weights = \
[[list() for _ in range(self.sample_history_length if
self.recurrent_memory else 1)]
for i in range(4)]
indices = list()
for idx, n in zip(idxs, ns):
samples, stepped_samples, rewards = \
self.get_samples_and_stepped(
idx, self.sample_history_length if self.recurrent_memory
else n,
recurrent_memory=self.recurrent_memory)
if samples is None:
continue
if self.recurrent_memory:
indices.append((idx, self.sample_history_length))
else:
indices.append((idx, n))
for step in range(self.sample_history_length if
self.recurrent_memory else 1):
all_rewards[step].append(rewards[step])
all_samples[step].append(samples[step])
all_stepped_samples[step].append(stepped_samples[step])
if len(indices) == 0:
return tuple([None for _ in range(5)])
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self)) ** (-beta)
for step in range(self.sample_history_length if
self.recurrent_memory else 1):
for (idx, n) in indices:
p_sample = self._it_sum[idx + step] / self._it_sum.sum()
weight = (p_sample * len(self)) ** (-beta)
weights[step].append(weight / max_weight)
return all_samples, all_stepped_samples, all_rewards, indices, weights
| 7,334 | 38.86413 | 80 |
py
|
LTL-GATA
|
LTL-GATA-main/src/graph_updater.py
|
from typing import Tuple, List, Dict, Any
from pathlib import Path
import copy
import torch.nn.functional as F
import numpy as np
import torch
from utils import to_pt, max_len, pad_sequences, to_np
from model.layers import (
CQAttention, PointerSoftmax,
DecoderBlock, EncoderBlock, Embedding,
masked_softmax, SelfAttention, masked_mean,
DecoderBlockForObsGen, ObservationDiscriminator
)
from components import Vocabulary
class RelationalGraphConvolution(torch.nn.Module):
"""
Simple R-GCN layer, modified from theano/keras implementation from
https://github.com/tkipf/relational-gcn
We also consider relation representation here (relation labels matter)
"""
def __init__(self, entity_input_dim, relation_input_dim,
num_relations, out_dim, bias=True, num_bases=0):
super(RelationalGraphConvolution, self).__init__()
self.entity_input_dim = entity_input_dim
self.relation_input_dim = relation_input_dim
self.out_dim = out_dim
self.num_relations = num_relations
self.num_bases = num_bases
if self.num_bases > 0:
self.bottleneck_layer = torch.nn.Linear(
(self.entity_input_dim + self.relation_input_dim) *
self.num_relations, self.num_bases, bias=False)
self.weight = torch.nn.Linear(
self.num_bases, self.out_dim, bias=False)
else:
self.weight = torch.nn.Linear(
(self.entity_input_dim + self.relation_input_dim) *
self.num_relations, self.out_dim, bias=False)
if bias:
self.bias = torch.nn.Parameter(torch.FloatTensor(self.out_dim))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.weight.weight.data)
if self.bias is not None:
self.bias.data.fill_(0)
def forward(self, node_features, relation_features, adj):
# node_features: batch x num_entity x in_dim
# relation_features: batch x num_relation x in_dim
# adj: batch x num_relations x num_entity x num_entity
supports = []
for relation_idx in range(self.num_relations):
# batch x 1 x in_dim
_r_features = relation_features[:, relation_idx: relation_idx + 1]
_r_features = _r_features.repeat(
1, node_features.size(1), 1) # batch x num_entity x in_dim
# batch x num_entity x in_dim+in_dim
supports.append(torch.bmm(adj[:, relation_idx], torch.cat(
[node_features, _r_features], dim=-1)))
# batch x num_entity x (in_dim+in_dim)*num_relations
supports = torch.cat(supports, dim=-1)
if self.num_bases > 0:
supports = self.bottleneck_layer(supports)
output = self.weight(supports) # batch x num_entity x out_dim
if self.bias is not None:
return output + self.bias
else:
return output
class StackedRelationalGraphConvolution(torch.nn.Module):
'''
input: entity features: batch x num_entity x input_dim
relation features: batch x num_relations x input_dim
adjacency matrix: batch x num_relations x num_entity x num_entity
'''
def __init__(self, entity_input_dim, relation_input_dim,
num_relations, hidden_dims, num_bases,
use_highway_connections=False, dropout_rate=0.0,
real_valued_graph=False):
super(StackedRelationalGraphConvolution, self).__init__()
self.entity_input_dim = entity_input_dim
self.relation_input_dim = relation_input_dim
self.hidden_dims = hidden_dims
self.num_relations = num_relations
self.dropout_rate = dropout_rate
self.num_bases = num_bases
self.real_valued_graph = real_valued_graph
self.nlayers = len(self.hidden_dims)
self.stack_gcns()
self.use_highway_connections = use_highway_connections
if self.use_highway_connections:
self.stack_highway_connections()
def stack_highway_connections(self):
highways = [torch.nn.Linear(
self.hidden_dims[i], self.hidden_dims[i]) for i in range(
self.nlayers)]
self.highways = torch.nn.ModuleList(highways)
self.input_linear = torch.nn.Linear(
self.entity_input_dim, self.hidden_dims[0])
def stack_gcns(self):
gcns = [RelationalGraphConvolution(
self.entity_input_dim if i == 0 else self.hidden_dims[i - 1],
self.relation_input_dim,
self.num_relations, self.hidden_dims[i],
num_bases=self.num_bases)
for i in range(self.nlayers)]
self.gcns = torch.nn.ModuleList(gcns)
def forward(self, node_features, relation_features, adj):
x = node_features
for i in range(self.nlayers):
if self.use_highway_connections:
if i == 0:
prev = self.input_linear(x)
else:
prev = x.clone()
# batch x num_nodes x hid
x = self.gcns[i](x, relation_features, adj)
if self.real_valued_graph:
x = torch.sigmoid(x)
else:
x = F.relu(x)
x = F.dropout(x, self.dropout_rate, training=self.training)
if self.use_highway_connections:
gate = torch.sigmoid(self.highways[i](x))
x = gate * x + (1 - gate) * prev
return x
class GraphUpdater(torch.nn.Module):
def __init__(self, checkpoint: Path,
vocab_path: Path,
config: Dict[str, Any],
word_vocab: Vocabulary,
pretrained_embedding_path: Path = None,
relation_vocab: Vocabulary = None,
node_vocab: Vocabulary = None,
**kwargs) -> None:
super(GraphUpdater, self).__init__(**kwargs)
self.config = config
self._dummy = torch.nn.Parameter(torch.empty(0))
self._facts: List[Tuple[str, str, str]] = list()
self.use_ground_truth_graph = config.use_ground_truth_graph
self._word_vocab = Vocabulary(word_vocab.original_tokens,
name='GraphWordVocab',
original_only=True)
self._node_vocab = node_vocab
self._relation_vocab = relation_vocab
self.origin_relation_number = int((
len(self._relation_vocab) - 1) / 2)
self.word_embedding = Embedding(
embedding_size=config.word_embedding_size,
vocab_size=len(self._word_vocab),
id2word=self._word_vocab,
dropout_rate=config.embedding_dropout,
load_pretrained=True,
trainable=False,
embedding_oov_init="random",
pretrained_embedding_path=str(pretrained_embedding_path))
self.word_embedding_prj = torch.nn.Linear(
config.word_embedding_size, config.block_hidden_dim,
bias=False)
self.node_embeddings, self.relation_embeddings = None, None
self.node_embedding = Embedding(
embedding_size=config.node_embedding_size,
vocab_size=len(node_vocab),
trainable=True,
dropout_rate=config.embedding_dropout)
self.relation_embedding = Embedding(
embedding_size=config.relation_embedding_size,
vocab_size=len(relation_vocab),
trainable=True,
dropout_rate=config.embedding_dropout)
self.rgcns = StackedRelationalGraphConvolution(
entity_input_dim=config.node_embedding_size +
config.block_hidden_dim,
relation_input_dim=config.relation_embedding_size +
config.block_hidden_dim,
num_relations=len(self._relation_vocab),
hidden_dims=config.gcn_hidden_dims,
num_bases=config.gcn_num_bases,
use_highway_connections=config.gcn_highway_connections,
dropout_rate=config.gcn_dropout,
real_valued_graph=config.use_ground_truth_graph or
config.real_valued)
self.real_valued_graph = config.real_valued
self.self_attention = None
if config.use_self_attention:
self.self_attention = SelfAttention(
config.block_hidden_dim, config.n_heads, 0.)
if not config.use_ground_truth_graph:
if self.real_valued_graph:
# TODO CHANGE THIS TO 50 = batch_size
self.prev_graph_hidden_state = None
self.curr_mat = None
self.obs_gen_attention = CQAttention(
block_hidden_dim=config.block_hidden_dim,
dropout=config.gcn_dropout)
self.obs_gen_attention_prj = torch.nn.Linear(
config.block_hidden_dim * 4, config.block_hidden_dim,
bias=False)
self.obs_gen_decoder = torch.nn.ModuleList([
DecoderBlockForObsGen(
ch_num=config.block_hidden_dim, k=5,
block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.decoder_layers)])
self.obs_gen_tgt_word_prj = torch.nn.Linear(
config.block_hidden_dim, len(self._word_vocab), bias=False)
self.obs_gen_linear_1 = torch.nn.Linear(
config.block_hidden_dim, config.block_hidden_dim)
self.obs_gen_linear_2 = torch.nn.Linear(
config.block_hidden_dim, int(
len(self._relation_vocab) / 2) *
len(self._node_vocab) * len(self._node_vocab))
self.obs_gen_attention_to_rnn_input = torch.nn.Linear(
config.block_hidden_dim * 4, config.block_hidden_dim)
self.obs_gen_graph_rnncell = torch.nn.GRUCell(
config.block_hidden_dim, config.block_hidden_dim)
self.observation_discriminator = ObservationDiscriminator(
config.block_hidden_dim)
self.max_target_length = config.max_target_length
# Accounts for adding "self" and duplicate "_reverse"
# see agents.py:79-82
self.cmd_gen_attention = CQAttention(
block_hidden_dim=config.block_hidden_dim,
dropout=config.attention_dropout)
self.cmd_gen_attention_prj = torch.nn.Linear(
config.block_hidden_dim * 4,
config.block_hidden_dim, bias=False)
self.pointer_softmax = PointerSoftmax(
input_dim=config.block_hidden_dim,
hidden_dim=config.block_hidden_dim)
self.tgt_word_prj = torch.nn.Linear(
config.block_hidden_dim,
len(self._word_vocab),
bias=False)
self.decoder = torch.nn.ModuleList([
DecoderBlock(ch_num=config.block_hidden_dim, k=5,
block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.decoder_layers)])
self.encoder_for_pretraining_tasks = torch.nn.ModuleList([
EncoderBlock(conv_num=config.encoder_conv_num,
ch_num=config.block_hidden_dim,
k=5, block_hidden_dim=config.block_hidden_dim,
n_head=config.n_heads,
dropout=config.block_dropout)
for _ in range(config.encoder_layers)])
self.encoder_conv_num = config.encoder_conv_num
if config.from_pretrained:
self.load_checkpoint(checkpoint)
def load_checkpoint(self, checkpoint: Path) -> None:
pretrained_dict = torch.load(checkpoint)
model_dict = self.state_dict()
del model_dict['_dummy']
for k, v in model_dict.items():
if k not in pretrained_dict:
import pdb
pdb.set_trace()
assert k in pretrained_dict
model_dict = {k: v for k, v in pretrained_dict.items() if
k in model_dict}
self.load_state_dict(model_dict, strict=False)
@property
def device(self) -> str:
return self._dummy.device
def tokenize(self, inputs: List[str]) -> torch.Tensor:
word_list = [item.split() for item in inputs]
word_id_list = [[self._word_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word = to_pt(input_word, self.device != 'cpu')
return input_word
def encode_text(self,
inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
embeddings, mask = self.embed(inputs) # batch x seq_len x emb
# batch x seq_len x seq_len
squared_mask = torch.bmm(mask.unsqueeze(-1), mask.unsqueeze(1))
encoding_sequence = embeddings
for i, encoder in enumerate(self.encoder_for_pretraining_tasks):
# batch x time x enc
encoding_sequence = encoder(
encoding_sequence, squared_mask, i * (
self.encoder_conv_num + 2) + 1,
len(self.encoder_for_pretraining_tasks))
return encoding_sequence, mask
def embed(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
word_embeddings, mask = self.word_embedding(
inputs) # batch x time x emb
word_embeddings = self.word_embedding_prj(word_embeddings)
word_embeddings = word_embeddings * \
mask.unsqueeze(-1) # batch x time x hid
return word_embeddings, mask
def get_subsequent_mask(self, seq: torch.Tensor) -> torch.Tensor:
''' For masking out the subsequent info. '''
_, length = seq.size()
subsequent_mask = torch.triu(torch.ones(
(length, length)), diagonal=1).float()
subsequent_mask = 1.0 - subsequent_mask
if seq.is_cuda:
subsequent_mask = subsequent_mask.cuda()
subsequent_mask = subsequent_mask.unsqueeze(0) # 1 x time x time
return subsequent_mask
def decode(self, inputs: torch.Tensor,
h_og: torch.Tensor,
obs_mask: torch.Tensor,
h_go: torch.Tensor,
node_mask: torch.Tensor,
input_obs: torch.Tensor) -> torch.Tensor:
trg_embeddings, trg_mask = self.embed(
inputs) # batch x target_len x emb
# batch x target_len x target_len
trg_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), trg_mask.unsqueeze(1))
trg_mask_square = trg_mask_square * \
self.get_subsequent_mask(
inputs) # batch x target_len x target_len
# batch x target_len x obs_len
obs_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), obs_mask.unsqueeze(1))
# batch x target_len x node_len
node_mask_square = torch.bmm(
trg_mask.unsqueeze(-1), node_mask.unsqueeze(1))
trg_decoder_output = trg_embeddings
for i, decoder in enumerate(self.decoder):
trg_decoder_output, target_target_representations, \
target_source_representations, target_source_attention = \
decoder(
trg_decoder_output, trg_mask, trg_mask_square,
h_og, obs_mask_square, h_go, node_mask_square, i * 3 + 1,
len(self.decoder)) # batch x time x hid
trg_decoder_output = self.tgt_word_prj(trg_decoder_output)
trg_decoder_output = masked_softmax(
trg_decoder_output, m=trg_mask.unsqueeze(-1), axis=-1)
output = self.pointer_softmax(
target_target_representations, target_source_representations,
trg_decoder_output, trg_mask, target_source_attention,
obs_mask, input_obs)
return output
def get_word_input(self, input_strings):
word_list = [item.split() for item in input_strings]
word_id_list = [[self._word_vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word = to_pt(input_word, self.device != 'cpu')
return input_word
def get_graph_node_name_input(self):
res = copy.copy(self._node_vocab)
input_node_name = self.get_word_input(res) # num_node x words
return input_node_name
def get_graph_relation_name_input(self):
res = copy.copy(self._relation_vocab)
res = [item.replace("_", " ") for item in res]
input_relation_name = self.get_word_input(res) # num_node x words
return input_relation_name
def get_graph_relation_representations(self, relation_names_word_ids):
# relation_names_word_ids: num_relation x num_word
relation_name_embeddings, _mask = self.embed(
relation_names_word_ids) # num_relation x num_word x emb
_mask = torch.sum(_mask, -1) # num_relation
relation_name_embeddings = torch.sum(
relation_name_embeddings, 1) # num_relation x hid
tmp = torch.eq(_mask, 0).float()
if relation_name_embeddings.is_cuda:
tmp = tmp.cuda()
_mask = _mask + tmp
relation_name_embeddings = relation_name_embeddings / \
_mask.unsqueeze(-1)
relation_name_embeddings = relation_name_embeddings.unsqueeze(
0) # 1 x num_relation x emb
relation_ids = np.arange(len(self._relation_vocab)) # num_relation
relation_ids = to_pt(relation_ids,
cuda=relation_names_word_ids.is_cuda,
type='long').unsqueeze(0) # 1 x num_relation
relation_embeddings, _ = self.relation_embedding(
relation_ids) # 1 x num_relation x emb
# 1 x num_relation x emb+emb
relation_embeddings = torch.cat(
[relation_name_embeddings, relation_embeddings], dim=-1)
return relation_embeddings
def get_graph_node_representations(self, node_names_word_ids):
# node_names_word_ids: num_node x num_word
node_name_embeddings, _mask = self.embed(
node_names_word_ids) # num_node x num_word x emb
_mask = torch.sum(_mask, -1) # num_node
node_name_embeddings = torch.sum(
node_name_embeddings, 1) # num_node x hid
tmp = torch.eq(_mask, 0).float()
if node_name_embeddings.is_cuda:
tmp = tmp.cuda()
_mask = _mask + tmp
node_name_embeddings = node_name_embeddings / _mask.unsqueeze(-1)
node_name_embeddings = node_name_embeddings.unsqueeze(
0) # 1 x num_node x emb
node_ids = np.arange(len(self._node_vocab)) # num_node
node_ids = to_pt(node_ids,
cuda=node_names_word_ids.is_cuda,
type='long').unsqueeze(0) # 1 x num_node
node_embeddings, _ = self.node_embedding(
node_ids) # 1 x num_node x emb
# 1 x num_node x emb+emb
node_embeddings = torch.cat(
[node_name_embeddings, node_embeddings], dim=-1)
return node_embeddings
def get_graph_adjacency_matrix(self, triplets):
adj = np.zeros((len(triplets), len(self._relation_vocab), len(
self._node_vocab), len(self._node_vocab)), dtype="float32")
for b in range(len(triplets)):
node_exists = set()
for t in triplets[b]:
node1, node2, relation = t
assert node1 in self._node_vocab, \
node1 + " is not in node vocab"
assert node2 in self._node_vocab, \
node2 + " is not in node vocab"
assert relation in self._relation_vocab, \
relation + " is not in relation vocab"
node1_id, node2_id, relation_id = \
self._node_vocab[node1], self._node_vocab[node2], \
self._relation_vocab[relation]
adj[b][relation_id][node1_id][node2_id] = 1.0
adj[b][relation_id + self.origin_relation_number][
node2_id][node1_id] = 1.0
node_exists.add(node1_id)
node_exists.add(node2_id)
# self relation
for node_id in list(node_exists):
adj[b, -1, node_id, node_id] = 1.0
adj = to_pt(adj, self.device != 'cpu', type='float')
return adj
def encode_graph(self, graph_input):
# batch x num_node x emb+emb
if self.node_embeddings is None:
node_names_word_ids = self.get_graph_node_name_input()
self.node_embeddings = self.get_graph_node_representations(
node_names_word_ids) # 1 x num_node x emb+emb
if self.relation_embeddings is None:
relation_names_word_ids = self.get_graph_relation_name_input()
self.relation_embeddings = self.get_graph_relation_representations(
relation_names_word_ids) # 1 x num_node x emb+emb
if isinstance(graph_input, list):
input_adjacency_matrices = self.get_graph_adjacency_matrix(
graph_input)
elif isinstance(graph_input, torch.Tensor):
input_adjacency_matrices = graph_input
else:
raise NotImplementedError
input_adjacency_matrices = input_adjacency_matrices.to(self.device)
node_embeddings = self.node_embeddings.repeat(
input_adjacency_matrices.size(0), 1, 1)
# batch x num_relation x emb+emb
relation_embeddings = self.relation_embeddings.repeat(
input_adjacency_matrices.size(0), 1, 1)
# batch x num_node x enc
node_encoding_sequence = self.rgcns(
node_embeddings, relation_embeddings, input_adjacency_matrices)
if self.use_ground_truth_graph:
node_mask = torch.ones(node_encoding_sequence.size(
0), node_encoding_sequence.size(1)) # batch x num_node
if node_encoding_sequence.is_cuda:
node_mask = node_mask.cuda()
else:
# batch x num_node x num_node
node_mask = torch.sum(input_adjacency_matrices[:, :-1, :, :], 1)
node_mask = torch.sum(node_mask, -1) + \
torch.sum(node_mask, -2) # batch x num_node
node_mask = torch.gt(node_mask, 0).float()
node_encoding_sequence = node_encoding_sequence * \
node_mask.unsqueeze(-1)
if self.self_attention is not None:
mask_squared = torch.bmm(
node_mask.unsqueeze(-1), node_mask.unsqueeze(1))
node_encoding_sequence, _ = self.self_attention(
node_encoding_sequence, mask_squared, node_encoding_sequence,
node_encoding_sequence)
return node_encoding_sequence, node_mask
def hidden_to_adjacency_matrix(self, hidden, batch_size):
num_node = len(self._node_vocab)
num_relation = len(self._relation_vocab)
if hidden is None:
adjacency_matrix = torch.zeros(
batch_size, num_relation, num_node, num_node)
adjacency_matrix = adjacency_matrix.cuda()
else:
adjacency_matrix = torch.tanh(self.obs_gen_linear_2(
F.relu(self.obs_gen_linear_1(
hidden)))).view(batch_size,
int(num_relation / 2), num_node, num_node)
adjacency_matrix = adjacency_matrix.repeat(1, 2, 1, 1)
for i in range(int(num_relation / 2)):
adjacency_matrix[:, int(
num_relation / 2) + i] = \
adjacency_matrix[:, i].permute(0, 2, 1)
return adjacency_matrix
@torch.no_grad()
def forward(
self, observations: List[str],
graph: List[Tuple[str, str, str]],
actions: List[str] = None,
infos: Dict[str, Any] = None) -> List[Tuple[str, str, str]]:
if self.use_ground_truth_graph:
if infos is None:
raise ValueError(
"Can't have 'None' infos for ground truth graph")
if 'facts' not in infos.keys():
raise ValueError(
"Must have 'facts' as infos key. Set EnvInfos(facts=True)")
return infos['facts']
elif self.real_valued_graph:
if self.prev_graph_hidden_state is not None:
prev_graph_hidden_state = self.prev_graph_hidden_state.detach()
# TE-encode
input_obs = self.get_word_input(observations)
prev_action_word_ids = self.get_word_input(actions)
prev_action_encoding_sequence, prev_action_mask = self.encode_text(
prev_action_word_ids)
obs_encoding_sequence, obs_mask = self.encode_text(input_obs)
prev_adjacency_matrix = self.hidden_to_adjacency_matrix(
prev_graph_hidden_state, batch_size=len(
observations))
node_encoding_sequence, node_mask = self.encode_graph(
prev_adjacency_matrix)
h_ag = self.obs_gen_attention(
prev_action_encoding_sequence,
node_encoding_sequence, prev_action_mask, node_mask)
h_ga = self.obs_gen_attention(
node_encoding_sequence, prev_action_encoding_sequence,
node_mask, prev_action_mask)
h_ag = self.obs_gen_attention_prj(
h_ag)
h_ga = self.obs_gen_attention_prj(
h_ga)
h_og = self.obs_gen_attention(
obs_encoding_sequence, node_encoding_sequence, obs_mask,
node_mask)
h_go = self.obs_gen_attention(
node_encoding_sequence, obs_encoding_sequence, node_mask,
obs_mask)
h_og = self.obs_gen_attention_prj(
h_og) # bs X len X block_hidden_dim
h_go = self.obs_gen_attention_prj(
h_go) # bs X len X block_hidden_dim
ave_h_go = masked_mean(h_go, m=node_mask, dim=1)
ave_h_og = masked_mean(h_og, m=obs_mask, dim=1)
ave_h_ga = masked_mean(h_ga, m=node_mask, dim=1)
ave_h_ag = masked_mean(h_ag, m=prev_action_mask, dim=1)
rnn_input = self.obs_gen_attention_to_rnn_input(
torch.cat([ave_h_go, ave_h_og, ave_h_ga, ave_h_ag], dim=1))
rnn_input = torch.tanh(rnn_input) # batch x block_hidden_dim
h_t = self.obs_gen_graph_rnncell(
rnn_input, prev_graph_hidden_state) if \
prev_graph_hidden_state is not None else \
self.obs_gen_graph_rnncell(rnn_input)
current_adjacency_matrix = self.hidden_to_adjacency_matrix(
h_t, batch_size=len(
observations))
del self.prev_graph_hidden_state
self.prev_graph_hidden_state = h_t.detach()
self.curr_mat = current_adjacency_matrix.detach().cpu()
return self.curr_mat
else:
batch_size = len(observations)
# encode
input_obs = self.get_word_input(observations)
obs_encoding_sequence, obs_mask = self.encode_text(input_obs)
node_encoding_sequence, node_mask = self.encode_graph(
graph)
h_og = self.cmd_gen_attention(
obs_encoding_sequence, node_encoding_sequence, obs_mask,
node_mask)
h_go = self.cmd_gen_attention(
node_encoding_sequence, obs_encoding_sequence, node_mask,
obs_mask)
h_og = self.cmd_gen_attention_prj(h_og)
h_go = self.cmd_gen_attention_prj(h_go)
# step 2, greedy generation
# decode
input_target_token_list = [["<bos>"] for i in range(batch_size)]
eos = np.zeros(batch_size)
for _ in range(self.max_target_length):
input_target = self.tokenize(
[" ".join(item) for item in input_target_token_list])
# batch x time x vocab
pred = self.decode(input_target, h_og,
obs_mask, h_go, node_mask, input_obs)
# pointer softmax
pred = to_np(pred[:, -1]) # batch x vocab
pred = np.argmax(pred, -1) # batch
for b in range(batch_size):
new_stuff = [self._word_vocab[int(pred[b])]
] if eos[b] == 0 else list()
input_target_token_list[b] = input_target_token_list[
b] + new_stuff
if pred[b] == self._word_vocab["<eos>"]:
eos[b] = 1
if np.sum(eos) == batch_size:
break
return [" ".join(item[1:]) for item in input_target_token_list]
| 29,695 | 44.268293 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/train.py
|
from argparse import ArgumentParser, Namespace
from pathlib import Path
from collections import defaultdict, deque
from copy import deepcopy
import datetime
import logging
import copy
import csv
import pdb
import numpy as np
import tqdm
import yaml
from logic import proposition_from_textworld_logic
from components import Sample, ResultsCSVField
from utils import (expand_trajectories,
serialize_namespace,
save_trajectories,
get_variable_name)
from evaluate import run as evaluate
from ltl import PadLTL, progression
from env import get_game_env
from args import add_args
from agent import Agent
from state import State
parser = ArgumentParser()
add_args(parser)
logger = logging.getLogger()
def train(config: Namespace, pretrain: bool = False):
start_time = datetime.datetime.now()
fprefix = '' if not pretrain else 'pretrain_'
results_file = config.io.output_dir / f'{fprefix}results.csv'
config_file = config.io.output_dir / f'{fprefix}config.yaml'
with config_file.open('w') as f:
yaml.dump(serialize_namespace(deepcopy(config)), f)
if not config.checkpoint.resume:
with results_file.open('w') as f:
writer = csv.writer(f)
writer.writerow(ResultsCSVField().keys())
eval_env, num_eval_game = None, None
if pretrain:
raise NotImplementedError
env, _ = get_game_env(
game='simple_ltl',
progression_mode='full',
sampler=config.pretain.ltl_sampler,
batch_size=config.pretrain.batch_size,
real_valued_graph=config.graph_updater.real_valued,
randomized_nouns_verbs=config.training.randomized_nouns_verbs,)
agent = Agent(config=config,
word_vocab=env.word_vocab,
ltl_vocab=env.ltl_vocab,
action_vocab=env.action_vocab,
pretrain=True,)
else:
# make game environments
gr = config.training.graph_reward_lambda > 0 \
and config.training.graph_reward_filtered
requested_infos = Agent.env_infos(
light=config.training.light_env_infos,
win_formulas=config.model.use_ltl or gr)
# training game env
env, _ = get_game_env(
game=config.training.game,
data_dir=config.io.data_dir,
vocab_dir=config.io.vocab_dir,
difficulty_level=config.training.difficulty_level,
requested_infos=requested_infos,
max_episode_steps=config.training.steps_per_episode,
batch_size=config.training.batch_size,
split='train',
all_games=config.training.all_games,
training_size=config.training.training_size,
game_limit=config.training.game_limit,
real_valued_graph=config.graph_updater.real_valued,
randomized_nouns_verbs=config.training.randomized_nouns_verbs,
prune=config.training.prune_actions,
strip_instructions=config.training.strip_instructions)
if config.evaluate.run:
# training game env
eval_env, num_eval_game = get_game_env(
game=config.evaluate.game,
data_dir=config.io.data_dir,
vocab_dir=config.io.vocab_dir,
difficulty_level=config.evaluate.difficulty_level,
requested_infos=requested_infos,
max_episode_steps=config.evaluate.steps_per_episode,
batch_size=config.evaluate.batch_size,
split='valid',
real_valued_graph=config.graph_updater.real_valued,
randomized_nouns_verbs=False,
prune=config.training.prune_actions,
strip_instructions=config.training.strip_instructions)
agent = Agent(config=config,
word_vocab=env.word_vocab,
ltl_vocab=env.ltl_vocab,
node_vocab=env.node_vocab,
relation_vocab=env.relation_vocab,
action_vocab=env.action_vocab,
pretrain=False,)
if config.checkpoint.resume:
fname = config.io.checkpoint_dir / (
fprefix + 'latest.pt')
else:
fname = config.io.checkpoint_dir / (
fprefix + config.checkpoint.filename)
if (config.checkpoint.load or config.checkpoint.resume) and fname.exists():
logging.info(f"Loading from checkpoint : {fname}")
start_episode_no, best_train, best_eval = agent.load_model(fname)
logger.info(f"Loaded model from {fname}")
else:
start_episode_no, best_train, best_eval = 0, 0., 0.
if not config.checkpoint.resume:
start_episode_no, best_train, best_eval = 0, 0., 0.
trajectories = list()
cache_dqn_loss = deque(maxlen=config.io.report_history_length)
cache_game_steps = deque(maxlen=config.io.report_history_length)
cache_game_points = deque(maxlen=config.io.report_history_length)
cache_game_rewards = deque(maxlen=config.io.report_history_length)
cache_game_points_normalized = deque(
maxlen=config.io.report_history_length)
cache_game_rewards_normalized = deque(
maxlen=config.io.report_history_length)
cache_success = deque(
maxlen=config.io.report_history_length)
patience, total_steps, perfect_training = 0, 0, 0
prev_performance = 0.
eval_game_step, eval_success = 0., 0.
eval_game_points, eval_game_points_normalized, \
eval_normalized_rewards, eval_rewards = 0., 0., 0., 0.
batch_size = config.training.batch_size
episodes = tqdm.tqdm(range(start_episode_no,
config.training.max_episode,
batch_size))
pad_state = State(observation=agent._word_vocab.pad_token,
action=agent._word_vocab.pad_token,
ltl=PadLTL())
# import cProfile
# import pstats
# import io
# from pstats import SortKey
# pr = cProfile.Profile()
first_log = True
for episode_no in episodes:
# pr.enable()
# if episode_no > 0:
# s = io.StringIO()
# sortby = SortKey.CUMULATIVE
# ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
# ps.print_stats()
# print(s.getvalue())
# raise
env.seed(episode_no)
# np.random.seed(episode_no)
obs, infos = env.reset()
actions = ["restart"] * batch_size
agent.train()
agent.reset_states(obs, actions, infos)
previous_hidden, previous_cell = None, None
prev_rewards = [0. for _ in range(batch_size)]
prev_step_dones = [0. for _ in range(batch_size)]
prev_original_scores = [0. for _ in range(batch_size)]
actions_cache = list()
act_randomly = episode_no < config.training.learn_from_this_episode
still_running_mask, game_rewards, game_points = list(), list(), list()
dones_list = list()
original_max_scores, games_max_scores = agent.games_max_scores(infos)
if first_log:
logger.info(f"Max Scores : {games_max_scores[0]}")
first_log = False
if config.io.save_trajectories_frequency > 0:
trajectories.extend([defaultdict(list) for _ in range(batch_size)])
trajectories = expand_trajectories(
obs=obs, infos=infos, episode_no=episode_no - start_episode_no,
trajectories=trajectories, states=agent.states,
batch_size=batch_size,
dones=[False]*batch_size,
admissible_actions=[None] * batch_size,
rewards=[0] * batch_size,
step_rewards=[0] * batch_size,
game_points=[0] * batch_size,
max_game_points=original_max_scores,
max_rewards=games_max_scores,
step_game_points=[0] * batch_size,
actions=actions,)
for step_no in range(config.training.steps_per_episode):
admissible_actions = infos['admissible_commands']
logger.debug("Acting")
if config.training.feed_cookbook_observation and step_no == 0:
next_actions_ind = [actions.index('examine cookbook') for
actions in admissible_actions]
actions = ["examine cookbook" for
_ in admissible_actions]
else:
next_actions_ind, previous_hidden, previous_cell = agent.act(
states=agent.states,
admissible_actions=admissible_actions,
random=act_randomly,
previous_hidden=previous_hidden,
previous_cell=previous_cell)
next_actions_ind = next_actions_ind.cpu().tolist()
actions = [candidates[i] for candidates, i in zip(
admissible_actions, next_actions_ind)]
actions_cache.append((next_actions_ind, admissible_actions))
if episode_no == 100000:
pass
# pdb.set_trace()
logger.debug("Doing env step")
obs, scores, dones, infos = env.step(
next_actions_ind if config.training.randomized_nouns_verbs
else actions,
agent.states.ltl_formulas)
# dones_wons = [done and won for done, won in
# zip(dones, infos['won'])]
logger.debug("Updating states")
state_update_rewards, state_update_dones = agent.update_states(
obs, actions, dones, infos)
original_scores, scores = agent.adjust_scores(
scores, step_no, state_update_rewards, state_update_dones,
infos['won'], dones)
if config.model.use_ltl and config.training.end_on_ltl_violation:
dones = list(dones)
for i, ltl_done in enumerate(state_update_dones):
valid = True if not still_running_mask else \
still_running_mask[-1][i]
if agent.states[i].ltl._violated and \
config.training.backwards_ltl and \
valid:
prev_facts = \
agent.states[i].past[-1].belief_graph._facts
curr_facts = \
agent.states[i].belief_graph._facts
entities = agent.states[i].ltl.entities
prev_facts = set([f
for f in prev_facts if
get_variable_name(f.names[0]) in
entities])
curr_facts = set([f for f in curr_facts
if get_variable_name(f.names[0])
in entities])
diff = [x for x in curr_facts -
prev_facts if x in curr_facts]
nots = [
('always',
('not',
str(proposition_from_textworld_logic(
proposition))))
for proposition in diff]
if len(nots) > 0:
not_form = nots[0]
for pred in nots[1:]:
not_form = progression.standardize(
('and', not_form, pred))
for j, state in enumerate(agent.states[i].past):
if not state.ltl._formulas:
continue
try:
form = state.ltl._formulas[0]
if form[0] == 'next':
new_form = \
('and', form, not_form)
else:
new_form = \
progression.standardize(
('and', form, not_form))
agent.states[i].past[j].ltl._formulas[0] = \
new_form
except:
pdb.set_trace()
if dones[i] and not ltl_done and \
step_no != config.training.steps_per_episode - 1:
# pdb.set_trace()
# raise ValueError("The game is done but not the LTL")
pass
dones[i] = dones[i] or ltl_done
dones = tuple(dones)
for i in range(config.training.batch_size):
if scores[i] > games_max_scores[i]:
# pdb.set_trace()
# raise ValueError("Can't have a reward > max")
pass
if episode_no >= config.training.learn_from_this_episode and \
total_steps % config.training.update_per_k_game_steps == 0:
logger.debug("Updating DQN")
loss, q_values = agent.update_dqn(episode_no)
if loss is not None:
cache_dqn_loss.append(loss.detach().cpu())
if step_no == config.training.steps_per_episode - 1:
# terminate the game because DQN requires one extra step
dones = [True] * len(dones)
dones_list.append(dones)
total_steps += 1
prev_step_dones = dones
still_running = [1. - float(item)
for item in prev_step_dones]
step_rewards = [float(curr) - float(prev) for curr,
prev in zip(scores, prev_rewards)] # list of float
# if config.training.reward_ltl and config.model.use_ltl and \
# not config.training.reward_ltl_positive_only:
# for i, (done, won) in enumerate(zip(dones, infos['won'])):
# if done and not won:
# step_rewards[i] = -1
# if config.training.persistent_negative_reward:
# for i, r in enumerate(scores):
# if r < 0 or prev_rewards[i] < 0:
# step_rewards[i] = r
step_game_points = [float(curr) - float(prev) for curr,
prev in zip(original_scores,
prev_original_scores)]
game_points.append(copy.copy(step_game_points))
game_rewards.append(copy.copy(step_rewards))
prev_rewards = scores
prev_original_scores = original_scores
still_running_mask.append(still_running)
if config.io.save_trajectories_frequency > 0:
trajectories = expand_trajectories(
obs=obs, infos=infos,
episode_no=episode_no - start_episode_no,
batch_size=batch_size, dones=dones,
states=agent.states, trajectories=trajectories,
admissible_actions=admissible_actions,
rewards=scores,
step_rewards=step_rewards,
game_points=original_scores,
max_game_points=original_max_scores,
max_rewards=games_max_scores,
step_game_points=step_game_points,
actions=actions,
)
# if all ended, break
if np.sum(still_running) == 0:
logger.debug('All games ended, breaking')
break
logger.debug("Done Episode")
mod_still_running_mask = np.array(
[[1] * config.training.batch_size] + still_running_mask[:-1])
still_running_mask = np.array(still_running_mask)
# if config.training.persistent_negative_reward:
# game_points = np.array(game_points)
# game_rewards = np.array(game_rewards)
# else:
game_points = np.array(game_points) * \
mod_still_running_mask # step x batch
game_rewards = np.array(game_rewards) * \
mod_still_running_mask # step x batch
avg_rewards_in_buffer = agent.experience.avg_rewards()
for b in range(batch_size):
# if still_running_mask[0][b] == 0:
# continue
# if (still_running_mask.shape[0] ==
# config.training.steps_per_episode and
# still_running_mask[-1][b] != 0):
# if (still_running_mask.shape[0] ==
# config.training.steps_per_episode and
# still_running_mask[-1][b] != 0):
# # need to pad one transition
# _need_pad = True
# tmp_game_rewards = game_rewards[:, b].tolist() + [0.]
# else:
# _need_pad = False
# tmp_game_rewards = game_rewards[:, b]
# if np.mean(tmp_game_rewards) < avg_rewards_in_buffer * \
# config.training.experience_replay[
# 'buffer_reward_threshold'] and \
# agent.experience.at_capacity:
# continue
# TODO TOGGLE THIS
# past_index = -min(config.training.steps_per_episode,
# len(agent.states[0].past))
past_index = 0
_need_pad = False
for i in range(game_rewards.shape[0]):
is_final = True
if mod_still_running_mask[i][b] != 0:
is_final = False
# assert actions_cache[i][1][b][actions_cache[i][0][b]] == \
# agent.states[b].past[past_index + i].action
agent.experience.add(Sample(
step=i,
action=actions_cache[i][0][b],
state=agent.states[b].past[past_index + i],
reward=game_rewards[i][b],
admissible_actions=actions_cache[i][1][b],
done=dones_list[i][b],
is_final=is_final))
if mod_still_running_mask[i][b] == 0:
break
# _need_pad = False
if _need_pad:
agent.experience.add(Sample(
step=i+1,
action=agent._word_vocab.pad_token, # 0
state=pad_state, # pad_state
reward=0.,
done=True,
# [agent._word_vocab.pad_token],
admissible_actions=actions_cache[i][1][b],
is_final=True))
for b in range(batch_size):
cache_game_points.append(np.sum(game_points, 0)[b])
cache_game_points_normalized.append(
(np.sum(game_points, 0) / original_max_scores)[b])
cache_game_rewards.append(np.sum(game_rewards, 0)[b])
cache_game_rewards_normalized.append(
(np.sum(game_rewards, 0) / games_max_scores)[b])
cache_game_steps.append(np.sum(still_running_mask, 0)[b])
cache_success.append(infos['won'][b])
# finish game
agent.finalize_episode(episode_no)
if episode_no < config.training.learn_from_this_episode:
continue
time_mark = datetime.datetime.now()
points_norm = np.mean(cache_game_points_normalized)
rewards_norm = np.mean(cache_game_rewards_normalized)
success = np.mean(cache_success)
if config.io.report_frequency != 0 and episode_no > 0 and \
(episode_no) % config.io.report_frequency <= \
(episode_no - batch_size) % config.io.report_frequency:
logger.info(
f"\nTrain: {episode_no:3d} | " +
f"Time: {str(time_mark - start_time).rsplit('.')[0]:s} | " +
f"dqn loss: {np.mean(cache_dqn_loss):2.5f} | " +
f"normalized game points: {points_norm:2.3f} | " +
f"normalized rewards: {rewards_norm:2.3f} | " +
f"game success: {success:.3f} | " +
f"used steps: {np.mean(cache_game_steps):2.3f}"
)
curr_train_performance = np.mean(cache_game_rewards_normalized)
curr_train_performance = success
if episode_no > 0 and \
episode_no % config.checkpoint.save_frequency == 0:
logger.info("Saved latest model")
agent.save_model(
episode_no, config.io.checkpoint_dir / 'latest.pt',
best_train, best_eval)
if config.checkpoint.save_each:
agent.save_model(
episode_no, config.io.checkpoint_dir /
f'episode_{episode_no}.pt',
best_train, best_eval)
if config.evaluate.run and episode_no > 0 and \
episode_no % config.evaluate.frequency == 0 \
and eval_env is not None:
logger.debug("Running Eval")
eval_game_points, eval_game_points_normalized,\
eval_rewards, \
eval_normalized_rewards, eval_game_step, \
eval_success, eval_trajectories = evaluate(
eval_env, agent, num_eval_game, config.evaluate)
trajectories_file = config.io.trajectories_dir /\
f'eval_trajectories_e={episode_no}.pkl'
save_trajectories(eval_trajectories, trajectories_file)
# TODO note this here...
# curr_eval_performance = eval_normalized_rewards
curr_eval_performance = eval_game_points_normalized
curr_eval_performance = eval_success
curr_performance = curr_eval_performance
if curr_eval_performance > best_eval:
best_eval = curr_eval_performance
logger.info("Saved best model")
agent.save_model(
episode_no, config.io.checkpoint_dir / 'best.pt',
best_train, best_eval)
agent.save_model(
episode_no, config.io.checkpoint_dir / 'best_eval.pt',
best_train, best_eval)
elif curr_eval_performance == best_eval:
if curr_eval_performance > 0.:
logger.info("Saved best model")
agent.save_model(episode_no, config.io.checkpoint_dir /
'best.pt',
best_train, best_eval)
agent.save_model(episode_no, config.io.checkpoint_dir /
'best_eval.pt',
best_train, best_eval)
else:
if curr_train_performance >= best_train:
logger.info("Saved best model")
agent.save_model(episode_no, config.io.checkpoint_dir /
'best.pt',
best_train, best_eval)
agent.save_model(episode_no, config.io.checkpoint_dir /
'best_train.pt',
best_train, best_eval)
else:
curr_eval_performance = 0.
curr_performance = curr_train_performance
if curr_train_performance >= best_train:
agent.save_model(
episode_no, config.io.checkpoint_dir /
(fprefix + 'best.pt'),
best_train, best_eval)
agent.save_model(
episode_no, config.io.checkpoint_dir /
(fprefix + 'best_train.pt'),
best_train, best_eval)
# update best train performance
if curr_train_performance >= best_train:
best_train = curr_train_performance
if prev_performance <= curr_performance:
patience = 0
else:
patience += 1
prev_performance = curr_performance
# if patient >= patience, resume from checkpoint
if config.training.patience > 0 and \
patience >= config.training.patience:
if (config.io.checkpoint_dir / 'best.pt').exists():
patience = 0
logger.info('Patience exceeded. ' +
'Reloading from a good checkpoint.')
agent.load_model(str(config.io.checkpoint_dir /
'best.pt'))
if np.mean(points_norm) > 0.96:
perfect_training += 1
else:
perfect_training = 0
logger.debug("Writing results to file")
with results_file.open('a') as f:
writer = csv.writer(f)
writer.writerow(
ResultsCSVField(
time=str(time_mark - start_time).rsplit(".")[0],
episode_no=episode_no,
dqn_loss=np.mean(cache_dqn_loss),
train_game_points=np.mean(cache_game_points),
train_normalized_game_points=points_norm,
train_rewards=np.mean(cache_game_rewards),
train_normalized_rewards=np.mean(rewards_norm),
train_steps=np.mean(cache_game_steps),
train_success=success,
eval_game_points=eval_game_points,
eval_normalized_game_points=eval_game_points_normalized,
eval_rewards=eval_rewards,
eval_normalized_rewards=eval_normalized_rewards,
eval_steps=eval_game_step,
eval_success=eval_success,
).values())
logger.debug("Done writing results to file")
# if curr_performance == 1. and curr_train_performance >= 0.95:
# break
if perfect_training >= 3:
logging.info("Perfect training, done training")
break
if episode_no > 0 and \
episode_no % config.io.save_trajectories_frequency == 0 and \
config.io.save_trajectories_frequency > 0:
logger.info("Saving train trajectories")
trajectories_file = config.io.trajectories_dir / \
f'train_trajectories_e={episode_no}.pkl'
save_trajectories(
trajectories, trajectories_file,)
trajectories = list()
logger.info(
"Train: End | " +
f"T: {str(time_mark - start_time).rsplit('.')[0]:s} | " +
f"dqn loss: {np.mean(cache_dqn_loss):2.5f} | " +
f"normalized game points: {points_norm:2.3f} | " +
f"normalized rewards: {rewards_norm:2.3f} | " +
f"game success: {success:.3f} | " +
f"used steps: {np.mean(cache_game_steps):2.3f}"
)
evaluate(eval_env, agent, num_eval_game, config.evaluate)
agent.save_model(episode_no, config.io.checkpoint_dir / 'latest.pt',
best_train, best_eval)
if __name__ == '__main__':
args = parser.parse_args()
train(args)
| 27,639 | 44.68595 | 81 |
py
|
LTL-GATA
|
LTL-GATA-main/src/logic.py
|
from typing import List
from copy import copy
import pdb
from textworld.logic import Variable as TWVar, Proposition as TWProp
CONSTANT_NAMES = {"P": "player", "I": "player",
"ingredient": None, "slot": None, "RECIPE": "cookbook"}
def get_variable_name(name: str) -> str:
return CONSTANT_NAMES[name] if name in CONSTANT_NAMES else name
class Variable(TWVar):
def __str__(self) -> str:
return super().__str__()
class Proposition(TWProp):
def __init__(self, seperator: str, **kwargs) -> None:
super(Proposition, self).__init__(**kwargs)
self.seperator = seperator
def __str__(self) -> str:
obj_subj = get_variable_name(self.names[0])
if len(self.names) == 1:
string = self.seperator.join([
obj_subj.replace(' ', self.seperator), 'is',
self.name])
else:
obj_other = get_variable_name(self.names[1])
string = self.seperator.join([
obj_subj.replace(' ', self.seperator),
self.name,
obj_other.replace(' ', self.seperator), ])
return string
def proposition_from_textworld_logic(proposition: TWProp,
seperator: str = '_') -> Proposition:
return Proposition(
seperator=seperator,
name=proposition.name,
arguments=[Variable(x) for x in proposition.names],)
def prune_actions(formula, actions: List[str]):
pruned = list()
if 'is_sliced' not in formula and 'is_chopped' not in formula and \
'is_diced' not in formula:
return actions
for i, action in enumerate(actions):
ing = None
if 'with knife' in action:
if 'slice' in action:
method = 'sliced'
ing = action.split("slice")[1].split("with")[0].strip()
if 'dice' in action:
method = 'diced'
ing = action.split("dice")[1].split("with")[0].strip()
if 'chop' in action:
method = 'chopped'
ing = action.split("chop")[1].split("with")[0].strip()
if ing is None or ing == '':
pruned.append(action)
continue
if ing in formula and method in formula:
pruned.append(action)
else:
pruned.append(action)
return pruned
| 2,406 | 31.093333 | 74 |
py
|
LTL-GATA
|
LTL-GATA-main/src/optim/radam.py
|
import math
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict):
for param in params:
if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[
[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
buffered = group['buffer'][int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * \
state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
elif self.degenerated_to_sgd:
step_size = 1.0 / (1 - beta1 ** state['step'])
else:
step_size = -1
buffered[2] = step_size
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'], )
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size *
group['lr'])
p.data.copy_(p_data_fp32)
elif step_size > 0:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'], )
p_data_fp32.add_(exp_avg, alpha=-step_size * group['lr'])
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, degenerated_to_sgd=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
# more conservative since it's an approximated value
if N_sma >= 5:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'])
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (
N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size)
p.data.copy_(p_data_fp32)
elif self.degenerated_to_sgd:
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* group['lr'])
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(exp_avg, alpha=-step_size)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup=warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(
p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + \
state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = scheduled_lr * \
math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(p_data_fp32, alpha=-group['weight_decay']
* scheduled_lr)
p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size, )
p.data.copy_(p_data_fp32)
return loss
| 11,028 | 39.848148 | 111 |
py
|
LTL-GATA
|
LTL-GATA-main/src/optim/__init__.py
|
from typing import Dict, Any
import torch
from optim.radam import RAdam
def get_optimizer(net: torch.nn.Module,
config: Dict[str, Any]) -> torch.optim.Optimizer:
# exclude some parameters from optimizer
param_frozen_list = [] # should be changed into torch.nn.ParameterList()
param_active_list = [] # should be changed into torch.nn.ParameterList()
for k, v in net.named_parameters():
keep_this = True
for keyword in set(config['fix_parameters_keywords']):
if keyword in k:
param_frozen_list.append(v)
keep_this = False
break
if keep_this:
param_active_list.append(v)
param_frozen_list = torch.nn.ParameterList(param_frozen_list)
param_active_list = torch.nn.ParameterList(param_active_list)
params = [{
'params': param_frozen_list, 'lr': 0.0},
{'params': param_active_list, 'lr': config['kwargs']['lr']}]
optimizer_kwargs = config['kwargs']
if config['name'] == 'adam':
optimizer = torch.optim.Adam(params, **optimizer_kwargs)
elif config['name'] == 'radam':
optimizer = RAdam(params, **optimizer_kwargs)
elif config['name'] == 'rmsprop':
optimizer = torch.optim.RMSprop(params, **optimizer_kwargs)
else:
raise NotImplementedError
scheduler = None
scheduler_kwargs = config['scheduler_kwargs']
if config['scheduler'] == 'steplr':
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, **scheduler_kwargs)
return optimizer, scheduler
| 1,588 | 35.953488 | 77 |
py
|
LTL-GATA
|
LTL-GATA-main/src/env/ltl.py
|
from typing import Tuple, List, Dict, Any
from gym.envs.registration import register
from gym import spaces
import numpy as np
import gym
from ltl import progression as ltl_progression
from ltl.samplers import get_ltl_sampler
class SimpleLTLEnv(gym.Env):
"""
Emulates the behaviour of
from textworld.gym.envs.textworld_batch:TextworldBatchGymEnv
"""
def __init__(self, letters: str, timeout: int, batch_size: int) -> None:
"""
@ CREDIT LTL2ACTION
letters:
- (str) propositions
timeout:
- (int) maximum lenght of the episode
"""
self.letters = letters
self.letter_types = list(set(letters)).sort()
self.observation_space = spaces.Discrete(1)
self.action_space = spaces.Discrete(len(self.letter_types))
self.time = 0
self.num_episodes = 0
self.timeout = timeout
self.proposition = None
self.batch_size = batch_size
def step(self, action) -> Tuple[List[str],
List[float],
List[bool],
Dict[str, List[Any]]]:
"""
This function executes an action in the environment.
"""
self.time += 1
reward = 0.0
done = self.time > self.timeout
obs = self._get_observation()
self.proposition = action
return obs, reward, done, dict()
def _get_observation(self):
return self.observation_space.sample()
def seed(self, seed: int = None) -> None:
return
def reset(self):
self.time = 0
self.num_episodes += 1
obs = self._get_observation()
return obs
def show(self):
print("Events:", self.get_events(),
"\tTimeout:", self.timeout - self.time)
def get_events(self):
return self.letter_types[
self.proposition] if self.proposition is not None else None
def get_propositions(self):
return self.letter_types
class SimpleLTLEnvDefault(SimpleLTLEnv):
def __init__(self, **kwargs):
"""
@ CREDIT LTL2ACTION
"""
super().__init__(letters="abcdefghijkl", timeout=75, **kwargs)
register(id='SimpleLTLEnv-v0', entry_point='SimpleLTLEnvDefault')
class WrappedLTLEnv(gym.Wrapper):
def __init__(self, env, progression_mode: str = "full",
ltl_sampler=None, intrinsic: float = 0.0) -> None:
"""
@ CREDIT LTL2ACTION
LTL environment
--------------------
It adds an LTL objective to the current environment
- The observations become a dictionary with an added "text" field
specifying the LTL objective
- It also automatically progress the formula and generates an
appropriate reward function
- However, it does requires the user to define a labeling function
and a set of training formulas
progression_mode:
- "full": the agent gets the full, progressed LTL formula as
part of the observation
- "partial": the agent sees which propositions (individually)
will progress or falsify the formula
- "none": the agent gets the full, original LTL formula as part
of the observation
"""
super().__init__(env)
self.progression_mode = progression_mode
self.propositions = self.env.get_propositions()
self.sampler = get_ltl_sampler(ltl_sampler, self.propositions)
self.observation_space = spaces.Dict(
{'features': env.observation_space})
self.known_progressions = {}
self.intrinsic = intrinsic
def reset(self):
self.known_progressions = {}
self.obs = self.env.reset()
# Defining an LTL goal
self.ltl_goal = self.sample_ltl_goal()
self.ltl_original = self.ltl_goal
# Adding the ltl goal to the observation
if self.progression_mode == "partial":
ltl_obs = {'features': self.obs,
'progress_info': self.progress_info(self.ltl_goal)}
else:
ltl_obs = {'features': self.obs, 'text': self.ltl_goal}
return ltl_obs
def step(self, action):
int_reward = 0
# executing the action in the environment
next_obs, original_reward, env_done, infos = self.env.step(action)
# progressing the ltl formula
truth_assignment = self.get_events(self.obs, action, next_obs)
self.ltl_goal = self.progression(self.ltl_goal, truth_assignment)
self.obs = next_obs
# Computing the LTL reward and done signal
ltl_reward = 0.0
ltl_done = False
if self.ltl_goal == 'True':
ltl_reward = 1.0
ltl_done = True
elif self.ltl_goal == 'False':
ltl_reward = -1.0
ltl_done = True
else:
ltl_reward = int_reward
# Computing the new observation and return the outcome of this action
if self.progression_mode == "full":
ltl_obs = {'features': self.obs, 'text': self.ltl_goal}
elif self.progression_mode == "none":
ltl_obs = {'features': self.obs, 'text': self.ltl_original}
elif self.progression_mode == "partial":
ltl_obs = {'features': self.obs,
'progress_info': self.progress_info(self.ltl_goal)}
else:
raise NotImplementedError
reward = original_reward + ltl_reward
done = env_done or ltl_done
return ltl_obs, reward, done, infos
def progression(self, ltl_formula, truth_assignment):
if (ltl_formula, truth_assignment) not in self.known_progressions:
result_ltl = ltl_progression.progress_and_clean(
ltl_formula, truth_assignment)
self.known_progressions[(
ltl_formula, truth_assignment)] = result_ltl
return self.known_progressions[(ltl_formula, truth_assignment)]
# # X is a vector where index i is 1 if prop i progresses the formula,
# -1 if it falsifies it, 0 otherwise.
def progress_info(self, ltl_formula):
propositions = self.env.get_propositions()
X = np.zeros(len(self.propositions))
for i in range(len(propositions)):
progress_i = self.progression(ltl_formula, propositions[i])
if progress_i == 'False':
X[i] = -1.
elif progress_i != ltl_formula:
X[i] = 1.
return X
def sample_ltl_goal(self):
# NOTE: The propositions must be represented by a char
# This function must return an LTL formula for the task
formula = self.sampler.sample()
if isinstance(self.sampler, SequenceSampler):
def flatten(bla):
output = []
for item in bla:
output += flatten(item) if isinstance(item,
tuple) else [item]
return output
# length = flatten(formula).count("and") + 1
self.env.timeout = 25 # 10 * length
return formula
def get_events(self, obs, act, next_obs):
# This function must return the events that currently hold on the environment
# NOTE: The events are represented by a string containing the propositions with positive values only (e.g., "ac" means that only propositions 'a' and 'b' hold)
return self.env.get_events()
class NoLTLWrapper(gym.Wrapper):
def __init__(self, env):
"""
Removes the LTL formula from an LTLEnv
It is useful to check the performance of off-the-shelf agents
"""
super().__init__(env)
self.observation_space = env.observation_space
# self.observation_space = env.observation_space['features']
def reset(self):
obs = self.env.reset()
# obs = obs['features']
# obs = {'features': obs}
return obs
def step(self, action):
# executing the action in the environment
obs, reward, done, info = self.env.step(action)
# obs = obs['features']
# obs = {'features': obs}
return obs, reward, done, info
def get_propositions(self):
return list([])
| 8,431 | 33 | 167 |
py
|
LTL-GATA
|
LTL-GATA-main/src/env/cooking.py
|
from typing import Tuple, List, Dict, Any, Union
from copy import copy, deepcopy
from pathlib import PosixPath
import logging
import random
import glob
import pdb
import re
import os
from textworld.gym.envs.textworld_batch import TextworldBatchGymEnv as TWEnv
from textworld.logic import Proposition, Variable
from textworld import EnvInfos
from logic import Proposition as LProposition, prune_actions
from ltl import LTL
import textworld.gym
import numpy as np
import codecs
import spacy
import gym
from components import Actions, Vocabulary
logger = logging.getLogger()
def preprocess_facts(facts, mapping, tokenizer=None) -> str:
if isinstance(facts, Proposition):
if facts.name in mapping:
facts.name = mapping[facts.name]
for arg in facts.arguments:
if arg.name in mapping:
arg.name = mapping[arg.name]
return copy(facts)
for i, prop in enumerate(facts):
if prop[0].name in mapping:
prop[0].name = mapping[prop[0].name]
for arg in prop[0].arguments:
if arg.name in mapping:
arg.name = mapping[arg.name]
facts = (copy(prop),)
return facts
def preprocess_string(string: str, mapping, tokenizer=None) -> str:
if string is None:
return "nothing"
string = string.replace("\n", ' ')
if "$$$$$$$" in string:
string = string.split("$$$$$$$")[-1]
string = re.sub(' +', ' ', string).strip()
if len(string) == 0:
return "nothing"
if tokenizer is not None:
string = " ".join([t.text for t in tokenizer(string)])
if 'frosted - glass' in string:
string = string.replace('frosted - glass', 'frosted-glass')
string = string.lower()
for tok, tokto in mapping.items():
string = string.replace(tok, tokto)
return string
class RecipeWrappedEnv:
"""
This is bad practice! Fake Env wrapper
"""
def __init__(self, env: TWEnv, vocab_dir: PosixPath,
real_valued_graph: bool,
randomized_nouns_verbs: bool,
train: bool,
prune: bool = False,
strip_instructions: bool = False,
eleven: bool = False) -> None:
# super(ModifiedEnv, self).__init__(**kwargs)
self.env = env
self.tokenizer = spacy.load('en_core_web_sm', disable=[
'ner', 'parser', 'tagger'])
self.real_valued_graph = real_valued_graph
self.word_vocab, self.ltl_vocab, self.relation_vocab, \
self.node_vocab, self.action_vocab = self.load_vocabs(vocab_dir,
eleven)
self.prev_adm = None
self.mapping = dict()
self.tokens = dict()
self.tok_idxs = dict()
self.prune = prune
self.strip_instructions = strip_instructions
if randomized_nouns_verbs and train:
self.tokens = {
'ingredients': (
'banana',
'block of cheese',
'carrot',
'orange bell pepper',
'pork chop',
'purple potato',
'red apple',
'red hot pepper',
'red onion',
'red potato',
'white onion',
'yellow bell pepper',
'yellow potato',
),
'cooking_methods': (
# cook I with C
'stove',
'oven',
),
'cooking_methods_facts': (
'fried',
'roasted',
),
'preparation_methods': (
# P I with knife
'chop',
'dice',
'slice'
),
'preparation_methods_facts': (
'chopped',
'diced',
'sliced'
),
}
self.randomized_nouns_verbs = randomized_nouns_verbs and train
def load_vocabs(self, vocab_dir: PosixPath,
eleven: bool) -> Tuple[
Vocabulary, Vocabulary, Vocabulary, Vocabulary]:
word_vocab = list()
with codecs.open(str(vocab_dir / 'word_vocab.txt'),
mode='r', encoding='utf-8', errors='ignore') as f:
for line in f:
if self.real_valued_graph and line.strip() == 'examined':
continue
word_vocab.append(line.strip())
if eleven and False:
ings = ['green_apple', 'green_bell_pepper', 'green_hot_pepper',
'red_tuna', 'white_tuna',
'lettuce', 'tomato', 'yellow_apple', 'yellow_onion']
ltl_vocab = ['_'.join([ing, i]) for ing in ings for i in [
'in_player', 'is_fried', 'is_roasted', 'is_diced',
'is_sliced', 'is_chopped']]
else:
ltl_vocab = list()
with codecs.open(str(vocab_dir / 'ltl_vocab.txt'),
mode='r', encoding='utf-8', errors='ignore') as f:
for line in f:
ltl_vocab.append(line.strip())
relation_vocab = list()
with codecs.open(str(vocab_dir / "relation_vocab.txt"),
mode='r', encoding='utf-8', errors='ignore') as f:
for line in f:
relation_vocab.append(line.strip().lower())
# add reverse relations
for i in range(len(relation_vocab)):
relation_vocab.append(relation_vocab[i] + "_reverse")
# if not use_ground_truth_graph:
if not self.real_valued_graph:
relation_vocab.append('self')
node_vocab = list()
with codecs.open(str(vocab_dir / 'node_vocab.txt'),
mode='r', encoding='utf-8', errors='ignore') as f:
for line in f:
if self.real_valued_graph and line.strip() == 'examined':
continue
node_vocab.append(line.strip().lower())
if eleven and False:
action_vocab = ['tomato', 'green', 'lettuce', 'tuna']
else:
action_vocab = list()
with codecs.open(str(vocab_dir / 'action_vocab.txt'),
mode='r', encoding='utf-8', errors='ignore') as f:
for line in f:
action_vocab.append(line.strip().lower())
return (Vocabulary(word_vocab, 'word-vocab'),
Vocabulary(ltl_vocab, 'ltl-vocab'),
Vocabulary(relation_vocab, 'relation-vocab',
original_only=True),
Vocabulary(node_vocab, 'node-vocab', original_only=True),
Vocabulary(action_vocab, 'action-vocab', original_only=True))
def process_obs_infos(self, obs: List[str], infos: Dict[str, List[Any]],
ltl_formulas: List[LTL],
) -> Tuple[List[str], Dict[str, List[Any]]]:
for commands in infos['admissible_commands']:
cmds = copy(commands)
for i, cmd in enumerate(cmds):
if cmd != 'examine cookbook' and cmd.split()[0] in {
'examine', 'look', 'inventory'}:
commands.remove(cmd)
if self.prune:
infos['admissible_commands'] = \
[prune_actions(
ltl if isinstance(ltl, str) else
ltl.tokenize(), actions) for ltl, actions in
zip(ltl_formulas, infos['admissible_commands'])]
# try:
# commands.remove('take cookbook from counter')
# except Exception:
# pass
# TODO this is inefficient clean up
self.real_adm = deepcopy(infos['admissible_commands'])
obs = [preprocess_string(
o, self.mapping, self.tokenizer) for o in obs]
if self.strip_instructions:
for i, o in enumerate(obs):
if 'you are hungry !' in o:
obs[i] = o.replace(
"you are hungry ! let 's cook a delicious meal . check the cookbook in the kitchen for the recipe . once done , enjoy your meal !", "")
elif 'you open the copy' in o:
obs[i] = 'you open the copy of " cooking : a modern approach ( 3rd ed . ) "'
elif 'you need to take the knife first' in o:
obs[i] = "you can ' t do that"
infos['admissible_commands'] = [[
preprocess_string(a, self.mapping, self.tokenizer) for a in
commands] for commands in infos['admissible_commands']]
if self.randomized_nouns_verbs:
infos['win_facts'] = [[
preprocess_facts(f, self.mapping) for f in
deepcopy(facts)] for facts in infos['win_facts']]
infos['facts'] = [[
preprocess_facts(f, self.mapping) for f in
deepcopy(facts)] for facts in infos['facts']]
return obs, infos
def seed(self, seed: int) -> None:
self.env.seed(seed)
def step(self, actions: List[Union[str, int]],
ltl_formulas: List[LTL],
) -> Tuple[List[str],
List[float],
List[bool],
Dict[str, List[Any]]]:
if not isinstance(actions[0], str):
str_actions = [cmds[i] for cmds, i in zip(self.real_adm, actions)]
actions = str_actions
obs, dones, scores, infos = self.env.step(actions)
obs, infos = self.process_obs_infos(obs, infos, ltl_formulas)
if True:
infos = self.update_infos(actions, infos)
return obs, dones, scores, infos
def update_infos(self, actions: Actions,
infos: Dict[str, Any]) -> Dict[str, Any]:
for i, action in enumerate(actions):
if action == 'examine cookbook':
infos['facts'][i].append(
LProposition(name='examined',
arguments=[Variable('cookbook', type='o')],
seperator='_'))
return infos
def reset(self) -> Tuple[List[str], Dict[str, List[Any]]]:
obs, infos = self.env.reset()
if self.randomized_nouns_verbs:
self.mapping = dict()
idxs = None
for k, toks in self.tokens.items():
# this will make it use the previous idxs
# to match
if 'facts' not in k:
idxs = random.sample(
np.arange(len(toks)).tolist(), len(toks))
for i, tok in enumerate(toks):
self.mapping[tok] = toks[idxs[i]]
self.prev_adm = deepcopy(infos['admissible_commands'])
return self.process_obs_infos(obs, infos, [''] * len(obs))
def get_cooking_game_env(data_dir: PosixPath,
vocab_dir: PosixPath,
difficulty_level: int,
requested_infos: EnvInfos,
max_episode_steps: int,
batch_size: int,
real_valued_graph: bool,
randomized_nouns_verbs: bool,
all_games: bool = False,
split: str = 'train',
training_size: int = 20,
game_limit: int = -1,
prune: bool = False,
strip_instructions: bool = False) -> Tuple[None, int]:
splits = {'train', 'valid', 'test'}
assert difficulty_level in {1, 2, 3, 4, 5,
6, 7, 8, 9, 10, 11, 12, 13, 99,
'r', 'mixed'}
assert split in splits
assert training_size in {1, 20, 100}
if all_games:
assert split == 'train'
logger.info(f'{split} : Batch Size : {batch_size}')
logger.info(f'{split} : Training Size : {training_size}')
if game_limit > 0:
logger.info(f'{split} : Game Limit : {game_limit}')
else:
logger.info(f'{split} : Game Limit : {training_size}')
logger.info(f'{split} : Difficulty level : {difficulty_level}')
# training games
if not all_games:
splits = [split]
game_file_names = []
for split in splits:
if split == 'train':
split += '_'
tsize = training_size
else:
tsize = ''
if difficulty_level == 'r':
diffs = [3, 7, 5, 9]
max_games = 25
if difficulty_level == 'mixed':
diffs = [1, 3, 7, 11]
max_games = 25
else:
diffs = [difficulty_level]
max_games = training_size
for difficulty_level in diffs:
game_path = f"{data_dir}/{split}" + \
f"{tsize}/difficulty_level_{difficulty_level}"
if os.path.isdir(game_path):
game_file_names += glob.glob(os.path.join(game_path, "*.z8"))[
:max_games]
else:
game_file_names.append(game_path)
if game_limit > 0:
game_file_names = game_file_names[:game_limit]
env_id = textworld.gym.register_games(
sorted(game_file_names), request_infos=requested_infos,
max_episode_steps=max_episode_steps, batch_size=batch_size,
name="training" if split == 'train' else 'eval',
asynchronous=False, auto_reset=False)
env = gym.make(env_id)
env = RecipeWrappedEnv(env, vocab_dir, real_valued_graph,
randomized_nouns_verbs,
train=split == 'train',
prune=prune,
strip_instructions=strip_instructions,
eleven=difficulty_level in {11, 'mixed'})
num_game = len(game_file_names)
return env, num_game
| 14,145 | 38.960452 | 159 |
py
|
LTL-GATA
|
LTL-GATA-main/src/env/__init__.py
|
from env.cooking import get_cooking_game_env
def get_game_env(game: str, **kwargs):
assert game in {'cooking'}
if game == 'cooking':
return get_cooking_game_env(**kwargs)
| 189 | 22.75 | 45 |
py
|
LTL-GATA
|
LTL-GATA-main/src/ltl/__init__.py
|
'''
Win facts for a single game are as:
[[],
[(Proposition('in', (Variable('red potato', 'f'), Variable('I', 'I'))),)],
[],
[(Proposition('chopped', (Variable('red potato', 'f'),)),)],
[(Proposition('in', (Variable('meal', 'meal'), Variable('I', 'I'))),)],
[(Proposition('consumed', (Variable('meal', 'meal'),)),)]]
'''
from typing import List, Tuple
from copy import deepcopy
import logging
import pdb
import re
from logic import Proposition, Variable, proposition_from_textworld_logic
from ltl import progression
from ltl.translator import Translator
logger = logging.getLogger()
class PadLTL:
def __init__(self):
self.name = 'PadLTL'
def tokenize(self):
return '( )'
def tree(self) -> None:
return None
SEPERATOR = '_'
class LTL:
'''
Class that converts and wraps a TextWorld game's win_facts
as LTL on a single game level (not batched)
'''
def __init__(
self,
use_ground_truth: bool,
facts: List[List[Tuple[Proposition, ...]]],
win_facts: List[List[Tuple[Proposition, ...]]],
fail_facts: List[List[Tuple[Proposition, ...]]],
difficulty: int,
first_obs: str,
reward_per_progression: bool = False,
reward_scale: int = 1,
as_bonus: bool = True,
single_token_prop: bool = True,
incomplete_cookbook: bool = False,
no_cookbook: bool = False,
single_reward: bool = False,
next_constrained: bool = False,
negative_for_fail: bool = False,
dont_progress: bool = False) -> None:
self.reward_scale = reward_scale
self.as_bonus = as_bonus
self.next_constrained = next_constrained
self.win_facts = win_facts
self.fail_facts = fail_facts
self.use_ground_truth = use_ground_truth
self.dont_progress = dont_progress
self.reward_per_progression = reward_per_progression
self.single_reward = single_reward
self.negative_for_fail = negative_for_fail
global SEPERATOR
self.single_token_prop = single_token_prop
self.no_cookbook = no_cookbook
self.progress = self.progress_diffall
self._reward = 0
self._done = False
self._violated = False
self.translator = Translator(level=difficulty)
if self.use_ground_truth:
self.translator.formulas = list()
cookbook_prop = 'next' if not incomplete_cookbook or \
no_cookbook else 'eventually'
examined_cookbook = Proposition(
name='examined',
arguments=[Variable('cookbook', type='o')],
seperator=SEPERATOR)
if difficulty in {5, 9}:
self.translator.formulas.append(
('eventually', 'player_at_kitchen'),
)
else:
self.translator.formulas.append((
cookbook_prop, str(examined_cookbook)))
self.translator.formulas.append(
progression.standardize((
self.facts_to_ltl(self.win_facts)))
)
else:
self.translator.generate_ltl(first_obs)
self.translator_copy = deepcopy(self.translator)
self.prev_facts = set(
[str(proposition_from_textworld_logic(
f, seperator=SEPERATOR)) for f in facts])
@property
def entities(self):
string = self.tokenize_recursively(self.translator_copy[
-len(self.translator.formulas)])
string = re.sub(' +', ' ', string).strip()
if not self.single_token_prop:
string = string.replace('_', ' ')
preds = [pred for pred in string.split(' ') if '_' in pred]
return {' '.join(tok.split('_')[:-2]) for tok in preds}
def tree(self) -> None:
return None
@property
def formula(self) -> str:
return self._formula
def progress_empty(self, *args, **kwargs):
return 0, False
def progress_diffall(self, facts: List[Proposition], action: str,
done: bool, observation: str):
# will only generate once, handles downstream
if not self.use_ground_truth:
self.translator.generate_ltl(observation)
self.translator_copy = deepcopy(self.translator)
if self._done:
return self._reward, self._done
if not self.translator.formulas:
return self._reward, self._done
if facts:
facts = set([str(proposition_from_textworld_logic(
f, seperator=SEPERATOR)) for f in facts])
events = facts
self.prev_facts = facts
formula = self.translator.formulas[0]
old_len = len(str(formula))
formula = progression.progress_and_clean(formula, events)
progressed = old_len > len(str(formula))
# if formula is True or formula is False:
# break
if formula == 'None':
pdb.set_trace()
if 'always' in str(formula) and 'eventually' not in str(formula):
formula = 'True'
self.translator.formulas[0] = formula
if formula == 'True':
del self.translator.formulas[0]
self._done = len(self.translator.formulas) == 0
if done and not self._done and self.negative_for_fail:
self._reward = -1 * self.reward_scale
self._done = True
self._violated = True
self.translator.violated = True
if not self.single_reward:
self._reward += 1 * self.reward_scale
elif self._done:
self._reward = 1
return self._reward, self._done
elif formula == 'False':
self._violated = True
self.translator.violated = True
if not self.single_reward:
# if self.as_bonus:
# self._reward -= 1 * self.reward_scale
# else:
self._reward = -1 * self.reward_scale
else:
self._reward = -1 * self.reward_scale
self._done = True
self.translator.formulas = list()
return self._reward, self._done
elif progressed and self.reward_per_progression:
assert not self.single_reward
self._reward += 1 * self.reward_scale
if done and not self._done:
self._reward = -1 * self.reward_scale
self._done = True
self._violated = True
self.translator.violated = True
return self._reward, self._done
if done and not self._done:
self._reward = -1 * self.reward_scale
self._done = True
self._violated = True
self.translator.violated = True
return self._reward, False
def facts_to_ltl(self, facts):
"""
Used if toggling ground truth - doesn't capture
"""
ltl = list()
prop = 'eventually'
for q_count, quest in enumerate(facts):
if len(quest) == 0:
continue
if len(quest) > 1:
quest_ltl = ['or']
for prop_tuples in quest:
if len(prop_tuples) == 0:
continue
if len(prop_tuples) > 1:
tuple_ltl = ['and']
for proposition in prop_tuples:
proposition = str(
proposition_from_textworld_logic(proposition,
seperator=SEPERATOR))
if len(prop_tuples) > 1:
tuple_ltl.append((prop, proposition))
else:
tuple_ltl = (prop, proposition)
if not isinstance(tuple_ltl, str):
tuple_ltl = tuple(tuple_ltl)
if len(quest) > 1:
quest_ltl.append(tuple_ltl)
else:
quest_ltl = tuple_ltl
if not isinstance(quest_ltl, str):
quest_ltl = tuple(quest_ltl)
ltl.append(quest_ltl)
if self.next_constrained:
curr_form = None
prev_form = None
for i, prop in enumerate(reversed(ltl)):
rel, pred = prop
if prev_form is None:
curr_form = prop
else:
curr_form = (
'eventually', ('and', pred, ('next', prev_form)))
prev_form = curr_form
ltl = curr_form
else:
curr_form = None
prev_form = None
for i, prop in enumerate(reversed(ltl)):
rel = 'and' if i == len(ltl) - 1 else 'and'
if prev_form is None:
curr_form = prop
else:
curr_form = (rel, prop, prev_form)
prev_form = curr_form
ltl = curr_form
return tuple(ltl)
def tokenize(self):
if self._violated:
return 'violated'
elif not self.translator.formulas:
return 'success'
if self.no_cookbook and 'cookbook' in str(self.translator.formulas[0]):
return 'null'
if self.dont_progress:
string = self.tokenize_recursively(self.translator_copy.formulas[
-len(self.translator_copy.formulas)])
else:
string = self.tokenize_recursively(self.translator.formulas[0])
string = re.sub(' +', ' ', string).strip()
if not self.single_token_prop:
string = string.replace('_', ' ')
return string
def tokenize_recursively(self, ltl):
string = ''
for item in ltl:
if isinstance(item, tuple):
string += f' {self.tokenize_recursively(item)}'
else:
string += f' {item}'
return string
| 10,141 | 35.351254 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/ltl/progression.py
|
"""
This code allows to progress LTL formulas. It requires installing the SPOT
library:
- https://spot.lrde.epita.fr/install.html
To encode LTL formulas, we use tuples, e.g.,
(
'and',
('until','True', ('and', 'd', ('until','True','c'))),
('until','True', ('and', 'a', ('until','True', ('and', 'b', ('until',
'True','c')))))
)
Some notes about the format:
- It supports the following temporal operators: "next", "until", "always",
and "eventually".
- It supports the following logical operators: "not", "or", "and".
- Propositions are assume to be one char.
- Negations are always followed by a proposition.
- true and false are encoded as "True" and "False"
"""
# from sympy import *
from textworld.logic import Proposition
from logic import Proposition, Variable
import spot
"""
This module contains functions to progress co-safe LTL formulas such as:
(
'and',
('until','True', ('and', 'd', ('until','True','c'))),
('until','True', ('and', 'a', ('until','True', ('and', 'b', ('until',
'True','c')))))
)
"""
def _is_prop_formula(f):
# returns True if the formula does not contains temporal operators
return 'next' not in str(f) and 'until' not in str(f)
def _subsume_until(f1, f2):
if str(f1) not in str(f2):
return False
while type(f2) != str:
if f1 == f2:
return True
if f2[0] == 'until':
f2 = f2[2]
elif f2[0] == 'and':
if _is_prop_formula(f2[1]) and not _is_prop_formula(f2[2]):
f2 = f2[2]
elif not _is_prop_formula(f2[1]) and _is_prop_formula(f2[2]):
f2 = f2[1]
else:
return False
else:
return False
return False
def _subsume_or(f1, f2):
if str(f1) not in str(f2):
return False
while type(f2) != str:
if f1 == f2:
return True
if f2[0] == 'until':
f2 = f2[2]
elif f2[0] == 'and':
if _is_prop_formula(f2[1]) and not _is_prop_formula(f2[2]):
f2 = f2[2]
elif not _is_prop_formula(f2[1]) and _is_prop_formula(f2[2]):
f2 = f2[1]
else:
return False
else:
return False
return False
def standardize(ltl_formula):
f = _get_spot_format(ltl_formula)
f = spot.formula(f)
f = spot.simplify(f)
f = f.__format__('l')
f, r = _get_std_format(f.split(' '))
assert len(r) == 0, "Format error" + str(f) + " " + str(r)
if f == 'None':
import pdb
pdb.set_trace()
return f
def progress_and_clean(ltl_formula, truth_assignment):
ltl = progress(ltl_formula, truth_assignment)
# I am using spot to simplify the resulting ltl formula
ltl_spot = _get_spot_format(ltl)
f = spot.formula(ltl_spot)
f = spot.simplify(f)
ltl_spot = f.__format__("l")
ltl_std, r = _get_std_format(ltl_spot.split(' '))
assert len(r) == 0, "Format error" + str(ltl_std) + " " + str(r)
if ltl_std == 'None':
import pdb
pdb.set_trace()
return ltl_std
def spotify(ltl_formula):
ltl_spot = _get_spot_format(ltl_formula)
f = spot.formula(ltl_spot)
f = spot.simplify(f)
ltl_spot = f.__format__("l")
# return ltl_spot
return f # .to_str('latex')
def _get_spot_format(ltl_std):
ltl_spot = str(ltl_std).replace("(", "").replace(")", "").replace(",", "")
ltl_spot = ltl_spot.replace("'until'", "U").replace(
"'not'", "!").replace("'or'", "|").replace("'and'", "&")
ltl_spot = ltl_spot.replace("'next'", "X").replace("'eventually'",
"F").replace(
"'always'", "G").replace("'True'", "t").replace("'False'",
"f").replace("\'",
"\"")
return ltl_spot
def _get_std_format(ltl_spot):
s = ltl_spot[0]
r = ltl_spot[1:]
# if s in ["X", "U", "&", "|"]:
if s in ["U", "&", "|"]:
v1, r1 = _get_std_format(r)
v2, r2 = _get_std_format(r1)
if s == "X":
op = 'next'
if s == "U":
op = 'until'
if s == "&":
op = 'and'
if s == "|":
op = 'or'
return (op, v1, v2), r2
if s in ["X", "F", "G", "!"]:
v1, r1 = _get_std_format(r)
if s == "X":
op = 'next'
if s == "F":
op = 'eventually'
if s == "G":
op = 'always'
if s == "!":
op = 'not'
return (op, v1), r1
if s == "f":
return 'False', r
if s == "t":
return 'True', r
if s[0] == '"':
return s.replace('"', ''), r
assert False, "Format error in spot2std"
def progress(ltl_formula, truth_assignment):
if isinstance(ltl_formula, str):
# True, False, or proposition
# ltl_formula is a proposition
if ltl_formula in truth_assignment:
return 'True'
else:
return 'False'
return ltl_formula
if ltl_formula[0] == 'not':
# negations should be over propositions only according to the cosafe
# ltl syntactic restriction
result = progress(ltl_formula[1], truth_assignment)
if result == 'True':
return 'False'
elif result == 'False':
return 'True'
else:
raise NotImplementedError(
"The following formula doesn't follow the cosafe syntactic " +
"restriction: " + str(ltl_formula))
if ltl_formula[0] == 'and':
res1 = progress(ltl_formula[1], truth_assignment)
res2 = progress(ltl_formula[2], truth_assignment)
if res1 == 'True' and res2 == 'True':
return 'True'
if res1 == 'False' or res2 == 'False':
return 'False'
if res1 == 'True':
return res2
if res2 == 'True':
return res1
if res1 == res2:
return res1
# if _subsume_until(res1, res2): return res2
# if _subsume_until(res2, res1): return res1
return ('and', res1, res2)
if ltl_formula[0] == 'or':
res1 = progress(ltl_formula[1], truth_assignment)
res2 = progress(ltl_formula[2], truth_assignment)
if res1 == 'True' or res2 == 'True':
return 'True'
if res1 == 'False' and res2 == 'False':
return 'False'
if res1 == 'False':
return res2
if res2 == 'False':
return res1
if res1 == res2:
return res1
# if _subsume_until(res1, res2): return res1
# if _subsume_until(res2, res1): return res2
return ('or', res1, res2)
if ltl_formula[0] == 'next':
return progress(ltl_formula[1], truth_assignment)
# NOTE: What about release and other temporal operators?
if ltl_formula[0] == 'eventually':
res = progress(ltl_formula[1], truth_assignment)
return ("or", ltl_formula, res)
if ltl_formula[0] == 'always':
res = progress(ltl_formula[1], truth_assignment)
return ("and", ltl_formula, res)
if ltl_formula[0] == 'until':
res1 = progress(ltl_formula[1], truth_assignment)
res2 = progress(ltl_formula[2], truth_assignment)
if res1 == 'False':
f1 = 'False'
elif res1 == 'True':
f1 = ('until', ltl_formula[1], ltl_formula[2])
else:
f1 = ('and', res1, ('until', ltl_formula[1], ltl_formula[2]))
if res2 == 'True':
return 'True'
if res2 == 'False':
return f1
# Returning ('or', res2, f1)
# if _subsume_until(f1, res2): return f1
# if _subsume_until(res2, f1): return res2
return ('or', res2, f1)
import pdb
pdb.set_trace()
if __name__ == '__main__':
# ltl = ('and',('eventually','a'),('and',('eventually','b'),('eventually',
# 'c')))
# ltl = ('and',('eventually','a'),('eventually',('and','b',('eventually',
# 'c'))))
# ltl = ('until',('not','a'),('and', 'b', ('eventually','d')))
ltl = ('until', ('not', 'a'), ('or', 'b', ('until', ('not', 'c'), 'd')))
while True:
print(ltl)
props = input()
ltl = progress_and_clean(ltl, props)
| 8,526 | 29.453571 | 78 |
py
|
LTL-GATA
|
LTL-GATA-main/src/ltl/translator.py
|
import re
from logic import Proposition, Variable, proposition_from_textworld_logic
import pdb
prep_map = {
'fry': 'fried',
'roast': 'roasted',
'grill': 'grilled',
'chop': 'chopped',
'dice': 'diced',
'slice': 'sliced',
}
ingredients = {
'banana', 'pork chop', 'carrot', 'parsley',
'chicken leg', 'yellow bell pepper',
'red potato', 'chicken wing', 'purple potato',
'yellow potato', 'black pepper', 'block of cheese',
'flour', 'cilantro', 'white onion', 'olive oil',
'orange bell pepper', 'water', 'red hot pepper',
'salt', 'red onion', 'red apple'
}
class Translator:
def __init__(self, level: int) -> None:
self.level = level
self.formulas = list()
self.first = True
self.cookbook_read = False
self.violated = False
def generate_ltl(self, obs: str) -> None:
if self.cookbook_read or self.violated:
return
if 'check the cookbook' in obs and self.first: # first obs
self.first = False
if self.level in {5, 9, 'mixed'}:
at_kitchen = Proposition(
name='at',
arguments=[Variable('player', type='I'),
Variable('kitchen', type='r')],
seperator='_')
self.formulas.append(('eventually', str(at_kitchen)))
examined_cookbook = Proposition(
name='examined',
arguments=[Variable('cookbook', type='o')],
seperator='_')
# self.formulas.append(('next', str(examined_cookbook)))
consumed = Proposition(
name='consumed',
arguments=[Variable('meal', type='meal')],
seperator='_')
self.formulas.append(('and', ('next', str(examined_cookbook)),
('eventually', str(consumed))))
# self.formulas.append(('eventually', str(consumed)))
elif not self.cookbook_read and \
'ingredients :' in obs and 'directions :' in obs:
self.cookbook_read = True
if 'gather' in obs and 'follow the directions' in obs:
split_ing = obs.split('ingredients : ')[-1]
split = split_ing.split(' directions :')
ings = [x for x in ingredients if x in split[0].strip()]
string = split[0].strip()
for i, ing in enumerate(ings):
string = string.replace(ing, f"xx{i}")
indices = list()
for x in string.split('xx'):
try:
x = int(x)
indices.append(x)
except Exception:
pass
ings = [ings[i] for i in indices]
prepare = 'prepare meal' in obs
preps = list()
if prepare:
# ' dice the orange bell pepper roast the orange bell pepper chop the purple potato fry the purple potato '
# string = split[1].replace('prepare meal', '').replace(
# f'the {ing}', '').strip()
string = split[1].replace(
'prepare meal', '').replace('the', '').strip()
string = re.sub('\s+', ' ', string)
content = string.split(' ')
action = None
actionable_ing = None
if content == [""]:
content = list()
prev_word = ''
for word in content:
if word in prep_map.keys() and not (prev_word == 'pork' and word == 'chop'):
if action is not None:
actionable_ing = re.sub(
'\s+', ' ', actionable_ing)
actionable_ing = actionable_ing.strip()
preps.append((action, actionable_ing))
action = word
actionable_ing = ''
else:
actionable_ing += f' {word}'
prev_word = word
if action is not None:
actionable_ing = re.sub(
'\s+', ' ', actionable_ing)
actionable_ing = actionable_ing.strip()
preps.append((action, actionable_ing))
# preps = string.split(' ')
if preps == ['']:
preps = list()
props = [
('eventually', str(Proposition(
name='in',
arguments=[
Variable(ing, type='f'),
Variable('player', type='I'), ],
seperator='_'))) for ing in ings]
for p, ing in preps:
try:
method = prep_map[p]
except:
pdb.set_trace()
props.append(('eventually', str(Proposition(
name=method,
arguments=[
Variable(ing, type='f'),
],
seperator='_'))))
if prepare:
props.append(('eventually', str(Proposition(
name='in',
arguments=[
Variable('meal', type='meal'),
Variable('player', type='I'), ],
seperator='_'))))
# props.append(self.formulas[-1])
curr_form = None
prev_form = None
for i, prop in enumerate(reversed(props)):
rel = 'and'
if prev_form is None:
curr_form = prop
else:
curr_form = (rel, prop, prev_form)
prev_form = curr_form
ltl = curr_form
self.formulas.append(tuple(ltl))
| 6,265 | 41.053691 | 127 |
py
|
LTL-GATA
|
LTL-GATA-main/src/model/features.py
|
from typing import List, Tuple
from argparse import Namespace
import pdb
from torch.autograd import Variable
import torch
from model.layers import Embedding, EncoderBlock, SelfAttention
from utils import max_len, to_pt, pad_sequences
class SimpleMLP(torch.nn.Module):
def __init__(self,
word_embedding_size: int,
action_net_hidden_size: int, **kwargs):
super(SimpleMLP, self).__init__(**kwargs)
self.layers = torch.nn.Sequential(
torch.nn.Linear(word_embedding_size, word_embedding_size),
torch.nn.ReLU(),
torch.nn.Linear(word_embedding_size, action_net_hidden_size),
)
def forward(self, inputs: torch.Tensor,) -> torch.Tensor:
out = self.layers(inputs)
return out
class SimpleLSTM(torch.nn.Module):
def __init__(self,
word_embedding_size: int,
hidden_size: int,
num_layers: int,
action_net_hidden_size: int,
**kwargs):
super(SimpleLSTM, self).__init__(**kwargs)
self.num_layers = num_layers
self.hidden_size = hidden_size
self.lstm = torch.nn.LSTM(
input_size=word_embedding_size,
hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
self.head = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.Linear(hidden_size, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, action_net_hidden_size)
)
self._dummy = torch.nn.Parameter(torch.empty(0))
@property
def device(self) -> str:
return self._dummy.device
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
batch_size, length, hidden = inputs.shape
h_0 = Variable(torch.zeros(self.num_layers, batch_size,
self.hidden_size)).to(device=self.device)
c_0 = Variable(torch.zeros(self.num_layers, batch_size,
self.hidden_size)).to(device=self.device)
output, (hn, cn) = self.lstm(inputs, (h_0, c_0))
hn = hn.view(-1, self.hidden_size)
out = self.head(hn)
return out
class TextEncoder(torch.nn.Module):
def __init__(self,
config: Namespace,
vocab: List[str],
**kwargs) -> None:
super(TextEncoder, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
for k, v in vars(config).items():
setattr(self, k, v)
self.load_vocab(vocab)
if self.use_pretrained_lm:
from model.pretrained_lm import get_model_tokenizer
self.lm, self.tokenizer = get_model_tokenizer(
self.pretrained_lm_name,
self.pretrained_lm_checkpoint)
# ----- Add Tokens -----
vocab += list(self.tokenizer.special_tokens_map.values())
# tokens = list(dict.fromkeys(tokens))
add_tokens = [token for token in vocab.tokens if token not in
self.tokenizer.vocab]
self.tokenizer.add_tokens(add_tokens)
self.lm.resize_token_embeddings(len(self.tokenizer))
if self.pretrained_lm_name == 'bert':
embedding_dim = 768
self.encoder = torch.nn.Linear(embedding_dim,
self.action_net_hidden_size)
# ----- Add Tokens -----
# ----- Delete Unused Tokens -----
# count = 0
# word_embedding_idxs = list()
# vocab = list(self.tokenizer.get_vocab().items())
# for tok, idx in vocab:
# if tok not in tokens:
# del self.tokenizer.vocab[tok]
# else:
# self.tokenizer.vocab[tok] = count
# word_embedding_idxs.append(idx)
# count += 1
# self.tokenizer.added_tokens_encoder.clear()
# self.tokenizer.added_tokens_decoder.clear()
# assert len(self.tokenizer) == len(tokens)
# word_embeddings = self.lm.embeddings.word_embeddings.weight[
# word_embedding_idxs]
# self.lm.resize_token_embeddings(len(self.tokenizer))
# self.lm.embeddings.word_embeddings.weight.data = word_embeddings
# ----- Delete Unused Tokens -----
else:
self.word_embedding = Embedding(
embedding_size=self.word_embedding_size,
vocab_size=len(vocab),
id2word=vocab.tokens,
dropout_rate=0.,
load_pretrained=True,
trainable=False,
embedding_oov_init="random" if not
self.one_hot_encoding else 'onehot',
pretrained_embedding_path=self.pretrained_embedding_path)
self.word_embedding_prj = torch.nn.Linear(
self.word_embedding_size, self.action_net_hidden_size,
bias=False)
if self.self_attention:
self.self_attention = SelfAttention(
self.action_net_hidden_size, self.n_heads, 0.)
else:
self.self_attention = None
if config.lstm_backbone:
self.encoder = SimpleLSTM(
word_embedding_size=self.word_embedding_size,
hidden_size=128,
num_layers=self.num_encoders,
action_net_hidden_size=self.action_net_hidden_size)
elif config.mlp_backbone:
self.encoder = SimpleMLP(
word_embedding_size=self.word_embedding_size,
action_net_hidden_size=self.action_net_hidden_size)
else:
self.encoder = torch.nn.ModuleList([
EncoderBlock(
conv_num=self.encoder_conv_num,
ch_num=self.action_net_hidden_size, k=5,
block_hidden_dim=self.action_net_hidden_size,
n_head=self.n_heads, dropout=0.,)
for _ in range(self.num_encoders)])
self.num_encoders = self.num_encoders
if not self.trainable:
for param in self.parameters():
param.requires_grad = False
if self.mlm_loss:
self.mlm_head = torch.nn.Sequential(
torch.nn.Linear(self.action_net_hidden_size,
self.action_net_hidden_size, bias=True),
torch.nn.LayerNorm(self.action_net_hidden_size,
eps=1e-12, elementwise_affine=True),
torch.nn.Linear(self.action_net_hidden_size,
len(self.vocab), bias=True),
)
if False:
self.inverse_dynamics = torch.nn.Sequential(
torch.nn.Linear(self.action_net_hidden_size * 2,
self.action_net_hidden_size * 2),
torch.nn.ReLU(),
torch.nn.Linear(self.action_net_hidden_size * 2,
self.action_net_hidden_size)
)
def load_vocab(self, vocab) -> None:
self.vocab = vocab
@property
def device(self) -> str:
return self._dummy.device
def compute_inverse_dynamics_loss(self, obs: List[str],
next_obs: List[str],
actions: List[str],
hat: bool = True):
encoded_obs, obs_mask = self.forward(obs)
encoded_next_obs, next_obs_mask = self.forward(next_obs)
encoded_obs = torch.sum(encoded_obs, dim=1)
encoded_next_obs = torch.sum(encoded_next_obs, dim=1)
actions_inv = self.inverse_dynamics(torch.cat((
encoded_obs, encoded_next_obs - encoded_obs), dim=1))
return None
def compute_mlm_loss(self, text: List[str]):
if not self.mlm_loss:
return None
inputs = self.tokenize(text)
labels = torch.clone(inputs)
rand = torch.rand(inputs.shape, device=self.device)
masking_mask = (rand < 0.15) * (inputs != self.vocab.pad_token_id)
inputs[torch.where(masking_mask)] = self.vocab.mask_token_id
sequence_output, _ = self.forward(inputs, compute_word_ids=False)
predictions = self.mlm_head(sequence_output)
# print([self.vocab.tokens[idx]
# for idx in inputs[0] if idx != self.vocab.pad_token_id])
# print([self.vocab.tokens[idx]
# for idx in torch.argmax(predictions[0], dim=1)
# if idx != self.vocab.pad_token_id])
loss_fcn = torch.nn.CrossEntropyLoss()
labels[torch.where(labels == self.vocab.pad_token_id)] = -100
return loss_fcn(predictions.view((-1, len(self.vocab))),
labels.view(-1))
def tokenize(self, text: List[str]) -> torch.Tensor:
word_list = [item.split() for item in text]
word_id_list = [[self.vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word_ids = to_pt(input_word, True)
return input_word_ids
def forward(self, text: List[str],
compute_word_ids: bool = True,
position_encoding_method: str = 'cossine',
trees=None) -> Tuple[torch.Tensor, torch.Tensor]:
"""
@arguments:
text: list of strings of shape [batch-size]
pad: number of additional padding items to append
to text list of strings for batch reasons
@returns:
encodings: encoded text, Tensor, size
[batch-size, sequence-length, embed-size]
mask: mask of size
[batch-size, sequence-length]
"""
if self.use_pretrained_lm:
inputs = self.tokenizer.batch_encode_plus(
text, padding=True, add_special_tokens=True,
return_tensors='pt')
for k, v in inputs.items():
inputs[k] = v.to(self.device)
outputs = self.lm(**inputs)
outputs = outputs.last_hidden_state
outputs = self.encoder(outputs)
return outputs, inputs.attention_mask
else:
if compute_word_ids:
input_word_ids = self.tokenize(text)
else:
input_word_ids = text
embeddings, mask = self.word_embedding(
input_word_ids) # batch x time x emb
squared_mask = torch.bmm(mask.unsqueeze(-1), mask.unsqueeze(1))
position_encoding = None
if position_encoding_method == 'tree':
raise NotImplementedError
if self.lstm_backbone:
encoded_text = embeddings
encoded_text = self.encoder(encoded_text)
elif self.mlp_backbone:
encoded_text = torch.sum(embeddings, dim=1)
encoded_text = self.encoder(encoded_text)
else:
embeddings = self.word_embedding_prj(embeddings)
embeddings = embeddings * \
mask.unsqueeze(-1) # batch x time x hid
encoded_text = embeddings
for i in range(self.num_encoders):
# batch x time x enc
encoded_text = self.encoder[i](
encoded_text, squared_mask, i * (
self.encoder_conv_num + 2) + 1, self.num_encoders,
position_encoding_method=position_encoding_method,
position_encoding=position_encoding)
if self.self_attention is not None:
mask_squared = torch.bmm(mask.unsqueeze(dim=-1),
mask.unsqueeze(dim=1))
encoded_text, _ = self.self_attention(
encoded_text, mask_squared, encoded_text, encoded_text)
return encoded_text, mask
class BagOfWords(torch.nn.Module):
def __init__(self,
config: Namespace,
vocab: List[str],
**kwargs) -> None:
super(BagOfWords, self).__init__(**kwargs)
self._dummy = torch.nn.Parameter(torch.empty(0))
for k, v in vars(config).items():
setattr(self, k, v)
self.load_vocab(vocab)
def load_vocab(self, vocab) -> None:
self.vocab = vocab
@property
def device(self) -> str:
return self._dummy.device
def tokenize(self, text: List[str]) -> torch.Tensor:
word_list = [item.split() for item in text]
word_id_list = [[self.vocab[tok] for tok in tokens]
for tokens in word_list]
input_word = pad_sequences(
word_id_list, maxlen=max_len(word_id_list)).astype('int32')
input_word_ids = to_pt(input_word, True)
return input_word_ids
def forward(self, text: List[str],
compute_word_ids: bool = True,
position_encoding_method: str = 'cossine',
trees=None) -> Tuple[torch.Tensor, torch.Tensor]:
...
| 13,426 | 41.090909 | 78 |
py
|
LTL-GATA
|
LTL-GATA-main/src/model/utils.py
|
import torch
import math
def get_timing_signal(length: int, channels: int,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4) -> torch.Tensor:
position = torch.arange(length).type(torch.float32)
num_timescales = channels // 2
log_timescale_increment = (math.log(
float(max_timescale) / float(min_timescale)) /
(float(num_timescales)-1))
inv_timescales = min_timescale * torch.exp(
torch.arange(num_timescales).type(torch.float32
) * -log_timescale_increment)
scaled_time = position.unsqueeze(1) * inv_timescales.unsqueeze(0)
signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
m = torch.nn.ZeroPad2d((0, (channels % 2), 0, 0))
signal = m(signal)
signal = signal.view(1, length, channels)
return signal
def PosEncoder(x: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
length = x.size(1)
channels = x.size(2)
signal = get_timing_signal(length, channels, min_timescale, max_timescale)
return x + (signal.cuda() if x.is_cuda else signal)
def TreePosEncoder(x):
...
| 1,224 | 34 | 79 |
py
|
LTL-GATA
|
LTL-GATA-main/src/model/layers.py
|
import torch.nn.functional as F
import numpy as np
import torch
import h5py
from model.utils import PosEncoder, TreePosEncoder
class H5EmbeddingManager(object):
def __init__(self, h5_path):
f = h5py.File(h5_path, 'r')
self.W = np.array(f['embedding'])
# print("embedding data type=%s, shape=%s" %
# (type(self.W), self.W.shape))
self.id2word = f['words_flatten'][0].split(b'\n')
self.id2word = [item.decode("utf-8") for item in self.id2word]
self.word2id = dict(zip(self.id2word, range(len(self.id2word))))
def __getitem__(self, item):
item_type = type(item)
if item_type is str:
index = self.word2id[item]
embs = self.W[index]
return embs
else:
raise RuntimeError("don't support type: %s" % type(item))
def word_embedding_initialize(self, words_list, dim_size=300, scale=0.1,
oov_init='random'):
shape = (len(words_list), dim_size)
self.rng = np.random.RandomState(42)
if 'zero' == oov_init:
W2V = np.zeros(shape, dtype='float32')
elif 'one' == oov_init:
W2V = np.ones(shape, dtype='float32')
elif 'onehot' == oov_init:
if len(words_list) > dim_size:
raise ValueError("Can't one-hot encode vocab size > dim size")
W2V = np.zeros((dim_size, dim_size))
np.fill_diagonal(W2V, 1)
assert (np.diag(W2V) == np.ones(dim_size)).all()
return W2V
else:
W2V = self.rng.uniform(
low=-scale, high=scale, size=shape).astype('float32')
W2V[0, :] = 0 # for padding i guess
in_vocab = np.ones(shape[0], dtype=np.bool)
word_ids = list()
for i, word in enumerate(words_list):
if '_' in word:
ids = [self.word2id[w]
if w in self.word2id else None for w in word.split('_')]
if not any(ids):
in_vocab[i] = False
else:
word_ids.append(ids)
elif word in self.word2id:
word_ids.append(self.word2id[word])
else:
in_vocab[i] = False
for i, (add, ids) in enumerate(zip(in_vocab, word_ids)):
if add:
if isinstance(ids, list):
W2V[i] = np.mean([self.W[x] for x in ids], axis=0)
else:
W2V[i] = self.W[ids]
# W2V[in_vocab] = self.W[np.array(word_ids, dtype='int32')][:, :dim_size]
return W2V
class Embedding(torch.nn.Module):
'''
inputs: x: batch x ...
outputs:embedding: batch x ... x emb
mask: batch x ...
'''
def __init__(self, embedding_size, vocab_size, dropout_rate=0.0,
trainable=True, id2word=None,
embedding_oov_init='random', load_pretrained=False,
pretrained_embedding_path=None):
super(Embedding, self).__init__()
self.embedding_size = embedding_size
self.vocab_size = vocab_size
self.id2word = id2word
self.dropout_rate = dropout_rate
self.load_pretrained = load_pretrained
self.embedding_oov_init = embedding_oov_init
self.pretrained_embedding_path = pretrained_embedding_path
self.trainable = trainable
self.embedding_layer = torch.nn.Embedding(
self.vocab_size, self.embedding_size, padding_idx=0)
self.init_weights()
def init_weights(self):
init_embedding_matrix = self.embedding_init()
if self.embedding_layer.weight.is_cuda:
init_embedding_matrix = init_embedding_matrix.cuda()
self.embedding_layer.weight = torch.nn.Parameter(init_embedding_matrix)
if not self.trainable:
self.embedding_layer.weight.requires_grad = False
def embedding_init(self):
# Embeddings
if self.load_pretrained is False:
word_embedding_init = np.random.uniform(
low=-0.05, high=0.05, size=(self.vocab_size,
self.embedding_size))
word_embedding_init[0, :] = 0
else:
embedding_initr = H5EmbeddingManager(
self.pretrained_embedding_path)
word_embedding_init = embedding_initr.word_embedding_initialize(
self.id2word,
dim_size=self.embedding_size,
oov_init=self.embedding_oov_init)
del embedding_initr
word_embedding_init = torch.from_numpy(word_embedding_init).float()
return word_embedding_init
def compute_mask(self, x):
mask = torch.ne(x, 0).float()
if x.is_cuda:
mask = mask.cuda()
return mask
def forward(self, x):
embeddings = self.embedding_layer(x) # batch x time x emb
embeddings = F.dropout(
embeddings, p=self.dropout_rate, training=self.training)
mask = self.compute_mask(x) # batch x time
return embeddings, mask
class ScaledDotProductAttention(torch.nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = torch.nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
attn = masked_softmax(attn, mask, 2)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
class SelfAttention(torch.nn.Module):
''' From Multi-Head Attention module
https://github.com/jadore801120/attention-is-all-you-need-pytorch'''
def __init__(self, block_hidden_dim, n_head, dropout=0.1):
super().__init__()
self.n_head = n_head
self.block_hidden_dim = block_hidden_dim
self.w_qs = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_ks = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
self.w_vs = torch.nn.Linear(
block_hidden_dim, n_head * block_hidden_dim, bias=False)
torch.nn.init.normal_(self.w_qs.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_ks.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
torch.nn.init.normal_(self.w_vs.weight, mean=0,
std=np.sqrt(2.0 / (block_hidden_dim * 2)))
self.attention = ScaledDotProductAttention(
temperature=np.power(block_hidden_dim, 0.5))
self.fc = torch.nn.Linear(n_head * block_hidden_dim, block_hidden_dim)
self.layer_norm = torch.nn.LayerNorm(self.block_hidden_dim)
torch.nn.init.xavier_normal_(self.fc.weight)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, q, mask, k, v):
# q: batch x len_q x hid
# k: batch x len_k x hid
# v: batch x len_v x hid
# mask: batch x len_q x len_k
batch_size, len_q = q.size(0), q.size(1)
len_k, len_v = k.size(1), v.size(1)
assert mask.size(1) == len_q
assert mask.size(2) == len_k
residual = q
q = self.w_qs(q).view(batch_size, len_q,
self.n_head, self.block_hidden_dim)
k = self.w_ks(k).view(batch_size, len_k,
self.n_head, self.block_hidden_dim)
v = self.w_vs(v).view(batch_size, len_v,
self.n_head, self.block_hidden_dim)
q = q.permute(2, 0, 1, 3).contiguous().view(
-1, len_q, self.block_hidden_dim) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(
-1, len_k, self.block_hidden_dim) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(
-1, len_v, self.block_hidden_dim) # (n*b) x lv x dv
mask = mask.repeat(self.n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(self.n_head, batch_size,
len_q, self.block_hidden_dim)
output = output.permute(1, 2, 0, 3).contiguous().view(
batch_size, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class DepthwiseSeparableConv(torch.nn.Module):
def __init__(self, in_ch, out_ch, k, bias=True):
super().__init__()
self.depthwise_conv = torch.nn.Conv1d(
in_channels=in_ch, out_channels=in_ch,
kernel_size=k, groups=in_ch, padding=k // 2, bias=False)
self.pointwise_conv = torch.nn.Conv1d(
in_channels=in_ch, out_channels=out_ch,
kernel_size=1, padding=0, bias=bias)
def forward(self, x):
x = x.transpose(1, 2)
res = torch.relu(self.pointwise_conv(self.depthwise_conv(x)))
res = res.transpose(1, 2)
return res
class EncoderBlock(torch.nn.Module):
def __init__(self, conv_num, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.convs = torch.nn.ModuleList(
[DepthwiseSeparableConv(ch_num, ch_num, k) for _ in range(
conv_num)])
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_C = torch.nn.ModuleList(
[torch.nn.LayerNorm(block_hidden_dim) for _ in range(conv_num)])
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
self.conv_num = conv_num
def forward(self, x, mask, layer, blks,
position_encoding_method: str = 'cossine',
position_encoding: torch.Tensor = None):
total_layers = (self.conv_num + 2) * blks
# conv layers
if position_encoding is not None:
out = x + position_encoding
else:
if position_encoding_method == 'cossine':
position_encoder = PosEncoder
else:
raise ValueError("Unkown position encoding method " +
f"{position_encoding_method}")
out = position_encoder(x)
for i, conv in enumerate(self.convs):
res = out
out = self.norm_C[i](out)
if (i) % 2 == 0:
out = F.dropout(out, p=self.dropout, training=self.training)
out = conv(out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# self attention
out, _ = self.self_att(out, mask, out, out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
return out
def layer_dropout(self, inputs, residual, dropout):
if self.training is True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout,
training=self.training) + residual
else:
return inputs + residual
class DecoderBlock(torch.nn.Module):
def __init__(self, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.obs_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.node_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_0 = torch.nn.Linear(block_hidden_dim * 2, block_hidden_dim)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
def forward(self, x, mask, self_att_mask, obs_enc_representations,
obs_mask, node_enc_representations, node_mask, layer, blks):
total_layers = blks * 3
# conv layers
out = PosEncoder(x)
res = out
# self attention
out, _ = self.self_att(out, self_att_mask, out, out)
out_self = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out_self, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# attention with encoder outputs
out_obs, obs_attention = self.obs_att(
out, obs_mask, obs_enc_representations, obs_enc_representations)
out_node, _ = self.node_att(
out, node_mask, node_enc_representations, node_enc_representations)
out = torch.cat([out_obs, out_node], -1)
out = self.FFN_0(out)
out = torch.relu(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# Fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(layer) / total_layers)
layer += 1
return out, out_self, out_obs, obs_attention
def layer_dropout(self, inputs, residual, dropout):
if self.training is True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout,
training=self.training) + residual
else:
return inputs + residual
class PointerSoftmax(torch.nn.Module):
def __init__(self, input_dim, hidden_dim):
super().__init__()
self.pointer_softmax_context = torch.nn.Linear(input_dim, hidden_dim)
self.pointer_softmax_target = torch.nn.Linear(input_dim, hidden_dim)
self.pointer_softmax_squash = torch.nn.Linear(hidden_dim, 1)
def forward(self, target_target_representations,
target_source_representations, trg_decoder_output,
target_mask, target_source_attention, source_mask,
input_source):
# target_target_representations: batch x target_len x hid
# target_source_representations: batch x target_len x hid
# trg_decoder_output: batch x target len x vocab
# target mask: batch x target len
# target_source_attention: batch x target len x source len
# source mask: batch x source len
# input source: batch x source len
batch_size = target_source_attention.size(0)
target_len = target_source_attention.size(1)
source_len = target_source_attention.size(2)
switch = self.pointer_softmax_context(
target_source_representations) # batch x trg_len x hid
# batch x trg_len x hid
switch = switch + \
self.pointer_softmax_target(target_target_representations)
switch = torch.tanh(switch)
switch = switch * target_mask.unsqueeze(-1)
switch = self.pointer_softmax_squash(
switch).squeeze(-1) # batch x trg_len
switch = torch.sigmoid(switch)
switch = switch * target_mask # batch x target len
switch = switch.unsqueeze(-1) # batch x target len x 1
target_source_attention = target_source_attention * \
source_mask.unsqueeze(1)
from_vocab = trg_decoder_output # batch x target len x vocab
from_source = torch.autograd.Variable(torch.zeros(
batch_size * target_len,
from_vocab.size(-1))) # batch x target len x vocab
if from_vocab.is_cuda:
from_source = from_source.cuda()
input_source = input_source.unsqueeze(1).expand(
batch_size, target_len, source_len)
input_source = input_source.contiguous().view(
batch_size * target_len, -1) # batch*target_len x source_len
from_source = from_source.scatter_add_(
1, input_source, target_source_attention.view(
batch_size * target_len, -1))
# batch x target_len x vocab
from_source = from_source.view(batch_size, target_len, -1)
# batch x target_len x vocab
merged = switch * from_vocab + (1.0 - switch) * from_source
merged = merged * target_mask.unsqueeze(-1)
return merged
def masked_softmax(x: torch.Tensor, m: torch.Tensor = None,
axis: int = -1) -> torch.Tensor:
'''
Softmax with mask (optional)
'''
x = torch.clamp(x, min=-15.0, max=15.0)
if m is not None:
m = m.float()
x = x * m
e_x = torch.exp(x - torch.max(x, dim=axis, keepdim=True)[0])
if m is not None:
e_x = e_x * m
softmax = e_x / (torch.sum(e_x, dim=axis, keepdim=True) + 1e-6)
return softmax
class CQAttention(torch.nn.Module):
def __init__(self, block_hidden_dim: int, dropout: float = 0.):
super().__init__()
self.dropout = dropout
w4C = torch.empty(block_hidden_dim, 1)
w4Q = torch.empty(block_hidden_dim, 1)
w4mlu = torch.empty(1, 1, block_hidden_dim)
torch.nn.init.xavier_uniform_(w4C)
torch.nn.init.xavier_uniform_(w4Q)
torch.nn.init.xavier_uniform_(w4mlu)
self.w4C = torch.nn.Parameter(w4C)
self.w4Q = torch.nn.Parameter(w4Q)
self.w4mlu = torch.nn.Parameter(w4mlu)
bias = torch.empty(1)
torch.nn.init.constant_(bias, 0)
self.bias = torch.nn.Parameter(bias)
def forward(self, C: torch.Tensor, Q: torch.Tensor,
Cmask: torch.Tensor, Qmask: torch.Tensor) -> torch.Tensor:
S = self.trilinear_for_attention(C, Q)
Cmask = Cmask.unsqueeze(-1)
Qmask = Qmask.unsqueeze(1)
S1 = masked_softmax(S, Qmask, axis=2)
S2 = masked_softmax(S, Cmask, axis=1)
A = torch.bmm(S1, Q)
B = torch.bmm(torch.bmm(S1, S2.transpose(1, 2)), C)
out = torch.cat([C, A, torch.mul(C, A), torch.mul(C, B)], dim=2)
return out
def trilinear_for_attention(self, C: torch.Tensor,
Q: torch.Tensor) -> torch.Tensor:
C = F.dropout(C, p=self.dropout, training=self.training)
Q = F.dropout(Q, p=self.dropout, training=self.training)
max_q_len = Q.size(-2)
max_context_len = C.size(-2)
subres0 = torch.matmul(C, self.w4C).expand([-1, -1, max_q_len])
subres1 = torch.matmul(Q, self.w4Q).transpose(
1, 2).expand([-1, max_context_len, -1])
subres2 = torch.matmul(C * self.w4mlu, Q.transpose(1, 2))
res = subres0 + subres1 + subres2
res += self.bias
return res
class LSTMCell(torch.nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.pre_act_linear = torch.nn.Linear(
input_size + hidden_size, 4 * hidden_size, bias=False)
if use_bias:
self.bias_f = torch.nn.Parameter(torch.FloatTensor(hidden_size))
self.bias_iog = torch.nn.Parameter(
torch.FloatTensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.pre_act_linear.weight.data)
if self.use_bias:
self.bias_f.data.fill_(1.0)
self.bias_iog.data.fill_(0.0)
def get_init_hidden(self, bsz, use_cuda):
h_0 = torch.autograd.Variable(
torch.FloatTensor(bsz, self.hidden_size).zero_())
c_0 = torch.autograd.Variable(
torch.FloatTensor(bsz, self.hidden_size).zero_())
if use_cuda:
h_0, c_0 = h_0.cuda(), c_0.cuda()
return h_0, c_0
def forward(self, input_, mask_=None, h_0=None, c_0=None):
"""
Args:
input_: A (batch, input_size) tensor containing input features.
mask_: (batch)
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
if h_0 is None or c_0 is None:
h_init, c_init = self.get_init_hidden(
input_.size(0), use_cuda=input_.is_cuda)
if h_0 is None:
h_0 = h_init
if c_0 is None:
c_0 = c_init
if mask_ is None:
mask_ = torch.ones_like(torch.sum(input_, -1))
if input_.is_cuda:
mask_ = mask_.cuda()
pre_act = self.pre_act_linear(
torch.cat([input_, h_0], -1)) # batch x 4*hid
if self.use_bias:
pre_act = pre_act + \
torch.cat([self.bias_f, self.bias_iog]).unsqueeze(0)
f, i, o, g = torch.split(
pre_act, split_size_or_sections=self.hidden_size, dim=1)
expand_mask_ = mask_.unsqueeze(1) # batch x 1
c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
c_1 = c_1 * expand_mask_ + c_0 * (1 - expand_mask_)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
h_1 = h_1 * expand_mask_ + h_0 * (1 - expand_mask_)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def masked_mean(x, m=None, dim=1):
"""
mean pooling when there're paddings
input: tensor: batch x time x h
mask: batch x time
output: tensor: batch x h
"""
if m is None:
return torch.mean(x, dim=dim)
x = x * m.unsqueeze(-1)
mask_sum = torch.sum(m, dim=-1) # batch
tmp = torch.eq(mask_sum, 0).float()
if x.is_cuda:
tmp = tmp.cuda()
mask_sum = mask_sum + tmp
res = torch.sum(x, dim=dim) # batch x h
res = res / mask_sum.unsqueeze(-1)
return res
class ObservationDiscriminator(torch.nn.Module):
def __init__(self, n_h):
super(ObservationDiscriminator, self).__init__()
self.f_k = torch.nn.Bilinear(2 * n_h, n_h, 1)
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, torch.nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, c, h_p, p_mask, h_n, n_mask, s_bias1=None, s_bias2=None):
masked_ave_hp = masked_mean(h_p, p_mask)
masked_ave_hn = masked_mean(h_n, n_mask)
sc_1 = self.f_k(c, masked_ave_hp)
sc_2 = self.f_k(c, masked_ave_hn)
logits = torch.cat([sc_1, sc_2], dim=0)
return logits
class DecoderBlockForObsGen(torch.nn.Module):
def __init__(self, ch_num, k, block_hidden_dim, n_head, dropout):
super().__init__()
self.dropout = dropout
self.self_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.obs_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.node_att = SelfAttention(block_hidden_dim, n_head, dropout)
self.FFN_0 = torch.nn.Linear(block_hidden_dim * 2, block_hidden_dim)
self.FFN_1 = torch.nn.Linear(ch_num, ch_num)
self.FFN_2 = torch.nn.Linear(ch_num, ch_num)
self.norm_1 = torch.nn.LayerNorm(block_hidden_dim)
self.norm_2 = torch.nn.LayerNorm(block_hidden_dim)
def forward(self, x, mask, self_att_mask, prev_action_enc_representations, prev_action_mask, node_enc_representations, node_mask, l, blks):
total_layers = blks * 3
# conv layers
out = PosEncoder(x)
res = out
# self attention
out, _ = self.self_att(out, self_att_mask, out, out)
out_self = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out_self, res, self.dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_1(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# attention with encoder outputs
out_obs, obs_attention = self.obs_att(
out, prev_action_mask, prev_action_enc_representations, prev_action_enc_representations)
out_node, _ = self.node_att(
out, node_mask, node_enc_representations, node_enc_representations)
out = torch.cat([out_obs, out_node], -1)
out = self.FFN_0(out)
out = torch.relu(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(l) / total_layers)
l += 1
res = out
out = self.norm_2(out)
out = F.dropout(out, p=self.dropout, training=self.training)
# Fully connected layers
out = self.FFN_1(out)
out = torch.relu(out)
out = self.FFN_2(out)
out = out * mask.unsqueeze(-1)
out = self.layer_dropout(
out, res, self.dropout * float(l) / total_layers)
l += 1
return out, out_self # , out_obs, obs_attention
def layer_dropout(self, inputs, residual, dropout):
if self.training == True:
pred = torch.empty(1).uniform_(0, 1) < dropout
if pred:
return residual
else:
return F.dropout(inputs, dropout, training=self.training) + residual
else:
return inputs + residual
| 27,037 | 38.761765 | 143 |
py
|
LTL-GATA
|
LTL-GATA-main/src/model/pretrained_lm.py
|
from pathlib import Path
from transformers import DistilBertModel, DistilBertTokenizer
PRETRAINED_LANGUAGE_MODEL = None
TOKENIZER = None
def get_model_tokenizer(model: str, checkpoint: Path = None):
global PRETRAINED_LANGUAGE_MODEL
global TOKENIZER
if PRETRAINED_LANGUAGE_MODEL is None:
if model == 'bert':
if checkpoint is None:
checkpoint = 'distilbert-base-uncased'
PRETRAINED_LANGUAGE_MODEL = DistilBertModel.from_pretrained(
checkpoint)
for param in PRETRAINED_LANGUAGE_MODEL.parameters():
param.requires_grad = False
TOKENIZER = DistilBertTokenizer.from_pretrained(
'distilbert-base-uncased')
return PRETRAINED_LANGUAGE_MODEL, TOKENIZER
| 784 | 31.708333 | 72 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.