prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
| mge.dtr.enable() | megengine.dtr.enable |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = | mge.load(args.teacher_weight_file) | megengine.load |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = | GradManager() | megengine.autodiff.GradManager |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = | dist.launcher(worker, n_gpus=args.devices) | megengine.distributed.launcher |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = | mge.load(args.weight_file) | megengine.load |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if | dist.get_rank() | megengine.distributed.get_rank |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys] + ["{}:%f".format(loss) for loss in distiller.loss_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate_cos(optimizer, cur_iter, total_iter):
base_lr = 1e-4
# Warm up
lr = 0.5 * base_lr * (1 + math.cos(cur_iter / total_iter * math.pi))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys] + ["{}:%f".format(loss) for loss in distiller.loss_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate_cos(optimizer, cur_iter, total_iter):
base_lr = 1e-4
# Warm up
lr = 0.5 * base_lr * (1 + math.cos(cur_iter / total_iter * math.pi))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * dist.get_world_size() * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite( | RandomSampler(train_dataset, batch_size, drop_last=True) | megengine.data.RandomSampler |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image= | mge.tensor(mini_batch["data"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info= | mge.tensor(mini_batch["im_info"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes= | mge.tensor(mini_batch["gt_boxes"]) | megengine.tensor |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[ | dist.make_allreduce_cb("mean", dist.WORLD) | megengine.distributed.make_allreduce_cb |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys] + ["{}:%f".format(loss) for loss in distiller.loss_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate_cos(optimizer, cur_iter, total_iter):
base_lr = 1e-4
# Warm up
lr = 0.5 * base_lr * (1 + math.cos(cur_iter / total_iter * math.pi))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * dist.get_world_size() * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
| T.RandomHorizontalFlip() | megengine.data.transform.RandomHorizontalFlip |
# -*- coding: utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import copy
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
from megengine.optimizer import AdamW, SGD
import math
from megengine.core._imperative_rt.utils import _set_defrag
_set_defrag(True)
from layers.tools.data_mapper import data_mapper
from layers.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 256 * 1024 * 1024, 4.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-df", "--distill_file", default="distill_configs/ICD.py", type=str, help="distill description file"
)
parser.add_argument(
"-tf", "--teacher_file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-tw", "--teacher_weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--devices", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batch size for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
parser.add_argument(
"-l", "--load_head", action='store_true'
)
parser.add_argument(
"-sp", "--save_path", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.devices)
from datetime import datetime
now = datetime.now()
log_dir = "log-of-ICD-{}-{}".format(os.path.basename(args.file).split(".")[0], now.strftime('%H:%M') + ('-Inherent' if args.load_head else ''))
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
args.save_path = log_dir
if args.devices > 1:
trainer = dist.launcher(worker, n_gpus=args.devices)
trainer(args)
else:
worker(args)
def worker(args):
mge.dtr.eviction_threshold = "6GB" # set the eviction threshold to 5 GB
mge.dtr.enable() # enable the DTR optimization
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
########### BUILD TEACHER MODEL ############
current_teacher = import_from_file(args.teacher_file)
cfg_tea = current_teacher.Cfg()
cfg_tea.backbone_pretrained = False
model_tea = current_teacher.Net(cfg_tea)
model_tea.train()
# model_tea.training = True # run in train mod
state_dict_tea = mge.load(args.teacher_weight_file)
if "state_dict" in state_dict_tea:
state_dict_tea = state_dict_tea["state_dict"]
model_tea.load_state_dict(state_dict_tea)
############### LOADED ####################
################## DISTILLER ##############
distiller_cfg = import_from_file(args.distill_file)
distiller = distiller_cfg.Net()
############### END ####################
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size * dist.get_world_size(),
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay,
)
opt_d = AdamW(
distiller.parameters(),
lr=1e-4,
weight_decay=1e-4,
)
params_with_grad.extend(distiller.parameters())
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("mean", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if args.load_head:
print('Loading Prameters Besides from Backbones.')
res = model.load_state_dict({k:v for k, v in model_tea.state_dict().items() if 'bottom_up' not in k}, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters()) # sync parameters
dist.bcast_list_(distiller.parameters()) # sync parameters
dist.bcast_list_(model.buffers()) # sync buffers
dist.bcast_list_(distiller.buffers()) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
############## REGISTER ###############
# To get intermediate representations
holder = [None, None]
def register_tea_hooker(m, i, o):
holder[0] = o
return
model_tea.backbone.register_forward_hook(register_tea_hooker)
def register_stu_hooker(m, i, o):
holder[1] = o
return
model.backbone.register_forward_hook(register_stu_hooker)
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, model_tea, distiller, holder, train_loader, opt, opt_d, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "{}/epoch_{}.pkl".format(
args.save_path, epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, model_tea, distiller, feat_holder, data_queue, opt, opt_d, gm, epoch, args):
def train_func(image, im_info, gt_boxes, **args):
model_tea(image=image, im_info=im_info, gt_boxes=gt_boxes)
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
loss_distill = distiller(feat_holder[0], feat_holder[1], image, gt_boxes, im_info, distill_flag=0 if args['cur_step'] < 1000 else 1)
loss_dict.update(loss_distill)
loss_dict["total_loss"] = loss_dict["total_loss"] + sum(loss_distill.values())
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
mge.optimizer.clip_grad_norm(
distiller.parameters(), 0.01)
opt.step().clear_grad()
opt_d.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses + distiller.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
full_step = tot_step * model.cfg.max_epoch
for step in range(tot_step):
cur_step = tot_step * epoch + step
adjust_learning_rate(opt, epoch, step, model.cfg, args)
adjust_learning_rate_cos(opt_d, cur_step, full_step)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"]),
cur_step=cur_step,
full_step=full_step
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys] + ["{}:%f".format(loss) for loss in distiller.loss_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate_cos(optimizer, cur_iter, total_iter):
base_lr = 1e-4
# Warm up
lr = 0.5 * base_lr * (1 + math.cos(cur_iter / total_iter * math.pi))
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * dist.get_world_size() * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
def build_dataset(dataset_dir, cfg):
data_cfg = copy.deepcopy(cfg.train_dataset)
data_name = data_cfg.pop("name")
data_cfg["root"] = os.path.join(dataset_dir, data_name, data_cfg["root"])
if "ann_file" in data_cfg:
data_cfg["ann_file"] = os.path.join(dataset_dir, data_name, data_cfg["ann_file"])
data_cfg["order"] = ["image", "boxes", "boxes_category", "info"]
return data_mapper[data_name](**data_cfg)
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
| T.ToMode() | megengine.data.transform.ToMode |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = | tensor(av) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = | tensor(bv) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = | tensor(av) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = | tensor(bv) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = | tensor(av) | megengine.core.tensor |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = | F.sigmoid(a) | megengine.functional.sigmoid |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = | trace(run_saved_context, symbolic=symbolic) | megengine.jit.trace |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose( | F.grad(c, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose( | F.grad(c, b, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose( | F.grad(c, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose( | F.grad(c, b, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = | F.maximum(maxv, -minv) | megengine.functional.maximum |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return | F.round(x / scale) | megengine.functional.round |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
| F.grad(q_2, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
F.grad(s, a, use_virtual_grad=False).numpy(),
F.grad(s2, a, use_virtual_grad=False).numpy(),
)
run(False, False)
run(True, False)
run(True, True)
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0)
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
aa, bb = Test()(a, b)
assertTensorClose(
| F.grad(aa, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
F.grad(s, a, use_virtual_grad=False).numpy(),
F.grad(s2, a, use_virtual_grad=False).numpy(),
)
run(False, False)
run(True, False)
run(True, True)
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0)
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
aa, bb = Test()(a, b)
assertTensorClose(
F.grad(aa, a, use_virtual_grad=False).numpy(), np.array([1.0], dtype=np.float32)
)
assertTensorClose(
| F.grad(aa, b, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
F.grad(s, a, use_virtual_grad=False).numpy(),
F.grad(s2, a, use_virtual_grad=False).numpy(),
)
run(False, False)
run(True, False)
run(True, True)
def test_none_in_out_grad():
class Test(Function):
def forward(self, a, b):
return a, b
def backward(self, grad_a, grad_b):
assert grad_b is None
return (grad_a, 0)
a = tensor(np.array([1.0], dtype=np.float32))
b = tensor(np.array([2.0], dtype=np.float32))
aa, bb = Test()(a, b)
assertTensorClose(
F.grad(aa, a, use_virtual_grad=False).numpy(), np.array([1.0], dtype=np.float32)
)
assertTensorClose(
F.grad(aa, b, use_virtual_grad=False).numpy(), np.array([0.0], dtype=np.float32)
)
def test_zero_grad():
class StopGradient(Function):
def forward(self, a):
return a
def backward(self, *_):
return None
a = tensor(np.array([1.0], dtype=np.float32))
b = a * 3.0
c = a * 4.0
loss = StopGradient()(b) + c
assertTensorClose(
| F.grad(loss, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + | F.exp(-x) | megengine.functional.exp |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + | F.exp(-x) | megengine.functional.exp |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
| F.grad(s, a, use_virtual_grad=False) | megengine.functional.grad |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import copy
import numpy as np
import megengine.functional as F
from megengine.core import Function, tensor
from megengine.jit import trace
from megengine.test import assertTensorClose
def test_a_plus_b():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
class MulFunc(Function):
def forward(self, a, b):
return a * b
def backward(self, grad_o):
return (grad_o * b * 2, grad_o * a * 3)
c = MulFunc()(a, b).sum()
assertTensorClose(c.numpy(), (av * bv).sum())
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), bv * 2)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), av * 3)
def test_skip_invalid_grad():
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
bv = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
b = tensor(bv)
cookie = tensor(np.random.random(data_shape).astype(np.float32))
class EqWithFakeGrad(Function):
def forward(self, a, b):
return a == b
def backward(self, grad_o):
_ = grad_o
return cookie, cookie
c = EqWithFakeGrad()(a, b).sum()
assertTensorClose(c.numpy(), (av == bv).sum().astype(np.float32))
assertTensorClose(F.grad(c, a, use_virtual_grad=False).numpy(), cookie)
assertTensorClose(F.grad(c, b, use_virtual_grad=False).numpy(), cookie)
def test_ste():
class STE(Function):
def forward(self, x):
maxv, minv = x.max(), x.min()
scale = F.maximum(maxv, -minv) / 127
return F.round(x / scale) * scale
def backward(self, grad_y):
return grad_y
data_shape = (1, 9, 2, 6)
av = np.random.random(data_shape).astype(np.float32)
a = tensor(av)
q = STE()(a)
q_2 = (q * 2.0).sum()
assertTensorClose(
F.grad(q_2, a, use_virtual_grad=False).numpy(),
np.broadcast_to(np.array([2.0], dtype=np.float32), data_shape),
)
def test_deepcopy():
class Sigmoid(Function):
def __init__(self, param):
super().__init__()
self.param = param
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
origin = Sigmoid(0)
new = copy.deepcopy(Sigmoid(0))
assert new.param == origin.param
def test_save_context():
class Sigmoid(Function):
def forward(self, x):
y = 1 / (1 + F.exp(-x))
self.save_for_backward(y)
return y
def backward(self, grad_y):
(y,) = self.saved_tensors
return grad_y * y * (1 - y)
def run_saved_context(a, net=None):
return net(a)
def run(use_trace, symbolic):
a = tensor(np.array([1926.0817], dtype=np.float32))
net = Sigmoid()
func_run = run_saved_context
if use_trace:
func_run = trace(run_saved_context, symbolic=symbolic)
s = func_run(a, net=net)
s2 = F.sigmoid(a)
assertTensorClose(s.numpy(), s2.numpy())
assertTensorClose(
F.grad(s, a, use_virtual_grad=False).numpy(),
| F.grad(s2, a, use_virtual_grad=False) | megengine.functional.grad |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
| mge._full_sync() | megengine._full_sync |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
| mge._full_sync() | megengine._full_sync |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = | jit.trace(self.model_step, symbolic=True) | megengine.jit.trace |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with amp.autocast(enabled=self.cfg.amp.enabled):
outputs = self.model(samples)
losses = self.loss(outputs, targets)
if isinstance(losses, mge.Tensor):
total_loss = losses
elif isinstance(losses, dict):
if "total_loss" in losses:
total_loss = losses["total_loss"]
else:
# only key contains "loss" will be calculated.
total_loss = sum([v for k, v in losses.items() if "loss" in k])
losses["total_loss"] = total_loss
else:
# list or tuple
total_loss = sum(losses)
total_loss = total_loss / self.cfg.solver.accumulation_steps
# this is made compatible with one hot labels
if targets.ndim == 2:
targets = F.argmax(targets, axis=1)
accs = | F.metric.topk_accuracy(outputs, targets, (1, 5)) | megengine.functional.metric.topk_accuracy |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with | amp.autocast(enabled=self.cfg.amp.enabled) | megengine.amp.autocast |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with amp.autocast(enabled=self.cfg.amp.enabled):
outputs = self.model(samples)
losses = self.loss(outputs, targets)
if isinstance(losses, mge.Tensor):
total_loss = losses
elif isinstance(losses, dict):
if "total_loss" in losses:
total_loss = losses["total_loss"]
else:
# only key contains "loss" will be calculated.
total_loss = sum([v for k, v in losses.items() if "loss" in k])
losses["total_loss"] = total_loss
else:
# list or tuple
total_loss = sum(losses)
total_loss = total_loss / self.cfg.solver.accumulation_steps
# this is made compatible with one hot labels
if targets.ndim == 2:
targets = | F.argmax(targets, axis=1) | megengine.functional.argmax |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with amp.autocast(enabled=self.cfg.amp.enabled):
outputs = self.model(samples)
losses = self.loss(outputs, targets)
if isinstance(losses, mge.Tensor):
total_loss = losses
elif isinstance(losses, dict):
if "total_loss" in losses:
total_loss = losses["total_loss"]
else:
# only key contains "loss" will be calculated.
total_loss = sum([v for k, v in losses.items() if "loss" in k])
losses["total_loss"] = total_loss
else:
# list or tuple
total_loss = sum(losses)
total_loss = total_loss / self.cfg.solver.accumulation_steps
# this is made compatible with one hot labels
if targets.ndim == 2:
targets = F.argmax(targets, axis=1)
accs = F.metric.topk_accuracy(outputs, targets, (1, 5))
if self.cfg.amp.enabled:
grad_scaler.backward(grad_manager, total_loss)
else:
grad_manager.backward(total_loss)
if self.progress.iter % self.cfg.solver.accumulation_steps == 0:
self.modify_grad()
optimizer.step().clear_grad()
self.model_ema_step()
return losses, accs
def modify_grad(self):
grad_cfg = self.cfg.solver.grad_clip
# TODO: support advanced params for grad clip in the future
params = self.model.parameters()
if grad_cfg.name is None:
return
elif grad_cfg.name == "norm":
| optim.clip_grad_norm(params, grad_cfg.max_norm) | megengine.optimizer.clip_grad_norm |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with amp.autocast(enabled=self.cfg.amp.enabled):
outputs = self.model(samples)
losses = self.loss(outputs, targets)
if isinstance(losses, mge.Tensor):
total_loss = losses
elif isinstance(losses, dict):
if "total_loss" in losses:
total_loss = losses["total_loss"]
else:
# only key contains "loss" will be calculated.
total_loss = sum([v for k, v in losses.items() if "loss" in k])
losses["total_loss"] = total_loss
else:
# list or tuple
total_loss = sum(losses)
total_loss = total_loss / self.cfg.solver.accumulation_steps
# this is made compatible with one hot labels
if targets.ndim == 2:
targets = F.argmax(targets, axis=1)
accs = F.metric.topk_accuracy(outputs, targets, (1, 5))
if self.cfg.amp.enabled:
grad_scaler.backward(grad_manager, total_loss)
else:
grad_manager.backward(total_loss)
if self.progress.iter % self.cfg.solver.accumulation_steps == 0:
self.modify_grad()
optimizer.step().clear_grad()
self.model_ema_step()
return losses, accs
def modify_grad(self):
grad_cfg = self.cfg.solver.grad_clip
# TODO: support advanced params for grad clip in the future
params = self.model.parameters()
if grad_cfg.name is None:
return
elif grad_cfg.name == "norm":
optim.clip_grad_norm(params, grad_cfg.max_norm)
elif grad_cfg.name == "value":
| optim.clip_grad_value(params, grad_cfg.lower, grad_cfg.upper) | megengine.optimizer.clip_grad_value |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import copy
import time
from typing import Iterable
import megengine as mge
import megengine.amp as amp
import megengine.distributed as dist
import megengine.functional as F
import megengine.module as M
import megengine.optimizer as optim
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import MeterBuffer
from megengine import jit
from basecls.data import DataLoaderType
from basecls.layers import Preprocess, build_loss
from basecls.solver import Solver
from basecls.utils import registers
__all__ = ["ClsTrainer"]
@registers.trainers.register()
class ClsTrainer(BaseTrainer):
"""Classification trainer.
Args:
cfg: config for training.
model: model for training.
dataloader: dataloader for training.
solver: solver for training.
hooks: hooks for training.
Attributes:
cfg: config for training.
model: model for training.
ema: model exponential moving average.
dataloader: dataloader for training.
solver: solver for training.
progress: object for recording training process.
loss: loss function for training.
meter : object for recording metrics.
"""
def __init__(
self,
cfg: ConfigDict,
model: M.Module,
dataloader: DataLoaderType,
solver: Solver,
hooks: Iterable[BaseHook] = None,
):
super().__init__(model, dataloader, solver, hooks)
self.cfg = cfg
self.ema = copy.deepcopy(model) if cfg.model_ema.enabled else None
self.preprocess = Preprocess(cfg.preprocess.img_mean, cfg.preprocess.img_std)
self.loss = build_loss(cfg)
self.meter = MeterBuffer(cfg.log_every_n_iter)
if cfg.trace:
# FIXME: tracing makes the training slower than before, why?
self.model_step = jit.trace(self.model_step, symbolic=True)
def train(self):
start_training_info = (1, 1)
max_iter = len(self.dataloader)
max_training_info = (self.cfg.solver.max_epoch, max_iter)
super().train(start_training_info, max_training_info)
def before_train(self):
super().before_train()
def before_epoch(self):
super().before_epoch()
self.dataloader_iter = iter(self.dataloader)
def after_epoch(self):
del self.dataloader_iter
super().after_epoch()
def train_one_iter(self):
"""Basic logic of training one iteration."""
data_tik = time.perf_counter()
data = next(self.dataloader_iter)
samples, targets = self.preprocess(data)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
data_tok = time.perf_counter()
train_tik = time.perf_counter()
losses, accs = self.model_step(samples, targets)
mge._full_sync() # use full_sync func to sync launch queue for dynamic execution
train_tok = time.perf_counter()
# TODO: stats and accs
loss_meters = {"loss": losses.item()}
stat_meters = {"stat_acc@1": accs[0].item() * 100, "stat_acc@5": accs[1].item() * 100}
time_meters = {"train_time": train_tok - train_tik, "data_time": data_tok - data_tik}
self.meter.update(**loss_meters, **stat_meters, **time_meters)
def model_step(self, samples, targets):
optimizer = self.solver.optimizer
grad_manager = self.solver.grad_manager
grad_scaler = self.solver.grad_scaler
with grad_manager:
with amp.autocast(enabled=self.cfg.amp.enabled):
outputs = self.model(samples)
losses = self.loss(outputs, targets)
if isinstance(losses, mge.Tensor):
total_loss = losses
elif isinstance(losses, dict):
if "total_loss" in losses:
total_loss = losses["total_loss"]
else:
# only key contains "loss" will be calculated.
total_loss = sum([v for k, v in losses.items() if "loss" in k])
losses["total_loss"] = total_loss
else:
# list or tuple
total_loss = sum(losses)
total_loss = total_loss / self.cfg.solver.accumulation_steps
# this is made compatible with one hot labels
if targets.ndim == 2:
targets = F.argmax(targets, axis=1)
accs = F.metric.topk_accuracy(outputs, targets, (1, 5))
if self.cfg.amp.enabled:
grad_scaler.backward(grad_manager, total_loss)
else:
grad_manager.backward(total_loss)
if self.progress.iter % self.cfg.solver.accumulation_steps == 0:
self.modify_grad()
optimizer.step().clear_grad()
self.model_ema_step()
return losses, accs
def modify_grad(self):
grad_cfg = self.cfg.solver.grad_clip
# TODO: support advanced params for grad clip in the future
params = self.model.parameters()
if grad_cfg.name is None:
return
elif grad_cfg.name == "norm":
optim.clip_grad_norm(params, grad_cfg.max_norm)
elif grad_cfg.name == "value":
optim.clip_grad_value(params, grad_cfg.lower, grad_cfg.upper)
else:
raise ValueError(f"Grad clip type '{grad_cfg.name}' not supported")
def model_ema_step(self):
"""Implement momentum based Exponential Moving Average (EMA) for model states
https://github.com/rwightman/pytorch-image-models/blob/master/timm/utils/model_ema.py
Also inspired by Pycls https://github.com/facebookresearch/pycls/pull/138/, which is more
flexible and efficient
Heuristically, one can use a momentum of 0.9999 as used by Tensorflow and 0.9998 as used
by timm, which updates model ema every iter. To be more efficient, one can set
``update_period`` to e.g. 8 or 32 to speed up your training, and decrease your momentum
at scale: set ``momentum=0.9978`` from 0.9999 (32 times) when you ``update_period=32``.
Also, to make model EMA really work (improve generalization), one should carefully tune
the momentum based on various factors, e.g. the learning rate scheduler,
the total batch size, the training epochs, e.t.c.
To initialize a momentum in Pycls style, one set ``model_ema.alpha = 1e-5`` instead.
Momentum will be calculated through ``_calculate_pycls_momentum``.
"""
if self.ema is None:
return
ema_cfg = self.cfg.model_ema
cur_iter, cur_epoch = self.progress.iter, self.progress.epoch
if cur_iter % ema_cfg.update_period == 0:
if cur_epoch > (ema_cfg.start_epoch or self.cfg.solver.warmup_epochs):
momentum = (
ema_cfg.momentum
if ema_cfg.alpha is None
else _calculate_pycls_momentum(
alpha=ema_cfg.alpha,
total_batch_size=self.cfg.batch_size * | dist.get_world_size() | megengine.distributed.get_world_size |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
| AutoNaming.clear() | megengine.utils.naming.AutoNaming.clear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = | trace(func, symbolic=symbolic, capture_as_const=True) | megengine.jit.tracing.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = | trace(f, symbolic=True, capture_as_const=True) | megengine.jit.tracing.trace |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
| quantize_qat(m) | megengine.quantization.quantize.quantize_qat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
| quantize(m) | megengine.quantization.quantize.quantize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
| quantize_qat(m) | megengine.quantization.quantize.quantize_qat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
| quantize(m) | megengine.quantization.quantize.quantize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
self.linear.weight.name = "user-weight"
self.linear.bias.name = "user-bias"
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
| quantize_qat(m) | megengine.quantization.quantize.quantize_qat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
self.linear.weight.name = "user-weight"
self.linear.bias.name = "user-bias"
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
| quantize(m) | megengine.quantization.quantize.quantize |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = | G.load_graph(file) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = | G.load_graph(file) | megengine.core.tensor.megbrain_graph.load_graph |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = | cgtools.get_oprs_seq(outputs) | megengine.utils.comp_graph_tools.get_oprs_seq |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = | Parameter(1.0, name="k") | megengine.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = | Parameter(2.0, name="k") | megengine.Parameter |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = | M.Linear(3, 3) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = | M.Linear(3, 3, name="x") | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = | F.relu(x) | megengine.functional.relu |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = | M.QuantStub() | megengine.module.QuantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = | M.Linear(3, 3, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = | M.DequantStub() | megengine.module.DequantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = | M.QuantStub() | megengine.module.QuantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = | M.Linear(3, 3, bias=True, name="user-linear") | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = | M.DequantStub() | megengine.module.DequantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = | M.QuantStub() | megengine.module.QuantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = | M.Linear(3, 3, bias=True) | megengine.module.Linear |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [M.Linear(3, 3) for _ in range(2)]
self.l1 = tuple(self.l0)
self.l2 = dict(zip(["l2-0", "l2-1"], self.l0))
def forward(self, x):
for i in range(2):
x = self.l0[i](x)
x = self.l1[i](x)
x = self.l2["l2-%d" % i](x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].outputs[0].name == "simple.l0.1.ADD[2]"
assert ops[-1].name == "simple.l0.1.ADD[2]"
assert ops[-2].name == "simple.l0.1.MatrixMul[2]"
assert ops[-3].name == "simple.l0.1.ADD[1]"
assert ops[-4].name == "simple.l0.1.MatrixMul[1]"
assert ops[-5].name == "simple.l0.1.ADD[0]"
assert ops[-6].name == "simple.l0.1.MatrixMul[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_named_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3, name="x")
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.x.ADD"
assert ops[-2].name == "simple.x.MatrixMul"
assert ops[-1].outputs[0].name == "simple.x.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_same_operators(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
x = F.relu(x)
x = F.relu(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.RELU[1]"
assert ops[-2].name == "simple.RELU[0]"
@pytest.mark.parametrize("symbolic", [False, True])
def test_not_keep_opr_name(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic, False)[-1]
assert op.name == "MUL(x,const<2>[2])[4]"
@pytest.mark.parametrize("tensor_name, var_name", [("data", "data"), (None, "arg_0")])
def test_catch_input_name(tensor_name, var_name):
def f(x):
return 2 * x
func = trace(f, symbolic=True, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)), name=tensor_name)
func(x).numpy()
file = io.BytesIO()
func.dump(file, optimize_for_inference=False, keep_opr_name=True, keep_var_name=2)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
op = cgtools.get_oprs_seq(outputs)[-1]
assert op.inputs[0].name == var_name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.linear.MatrixMul",
"simple.linear.ADD",
"simple.linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True, name="user-linear")
self.dequant = M.DequantStub()
def forward(self, x):
out = self.quant(x)
out = self.linear(out)
out = self.dequant(out)
return out
m = Simple("simple")
quantize_qat(m)
quantize(m)
m.eval()
ops = _dump_and_load(m, symbolic)
ops_name = (
"x",
"simple.quant.TypeCvt",
"simple.user-linear.MatrixMul",
"simple.user-linear.ADD",
"simple.user-linear.TypeCvt",
"simple.dequant.TypeCvt",
)
for op, name in zip(ops, ops_name):
assert op.name == name
@pytest.mark.parametrize("symbolic", [False, True])
def test_quantized_module_user_naming_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__(name=name)
self.quant = M.QuantStub()
self.linear = M.Linear(3, 3, bias=True)
self.dequant = | M.DequantStub() | megengine.module.DequantStub |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import io
import numpy as np
import pytest
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine import Parameter, Tensor
from megengine.core.tensor import megbrain_graph as G
from megengine.jit.tracing import trace
from megengine.quantization.quantize import quantize, quantize_qat
from megengine.utils.naming import AutoNaming
def _dump_and_load(func, symbolic, keep_opr_name=True):
AutoNaming.clear()
func = trace(func, symbolic=symbolic, capture_as_const=True)
x = Tensor(np.ones(shape=(2, 3)))
func(x).numpy()
file = io.BytesIO()
func.dump(
file,
optimize_for_inference=False,
arg_names=("x",),
keep_opr_name=keep_opr_name,
keep_var_name=2,
)
file.seek(0)
outputs = G.load_graph(file).output_vars_list
ops = cgtools.get_oprs_seq(outputs)
return ops
@pytest.mark.parametrize("symbolic", [False, True])
def test_auto_naming(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, x):
return x + x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "simple.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_tensor(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(1.0, name="k")
def forward(self, x):
x = x + x
x.name = "o_x"
return x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "simple.ADD"
assert op.outputs[0].name == "o_x"
@pytest.mark.parametrize("symbolic", [False, True])
def test_user_named_param(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.k = Parameter(2.0, name="k")
def forward(self, x):
return self.k * x
m = Simple("simple")
op = _dump_and_load(m, symbolic)[-1]
assert op.inputs[0].name == "x"
assert op.inputs[1].name == "simple.k"
@pytest.mark.parametrize("symbolic", [False, True])
def test_without_module(symbolic):
def f(x):
return 2 * x
op = _dump_and_load(f, symbolic)[-1]
assert op.name == "MUL"
@pytest.mark.parametrize("symbolic", [False, True])
def test_ignore_top_module(symbolic):
class Simple(M.Module):
def forward(self, x):
return x + x
m = Simple()
op = _dump_and_load(m, symbolic)[-1]
assert op.name == "ADD"
assert op.outputs[0].name == "ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.linear = M.Linear(3, 3)
def forward(self, x):
x = self.linear(x)
return x
m = Simple("simple")
ops = _dump_and_load(m, symbolic)
assert ops[-1].name == "simple.linear.ADD"
assert ops[-2].name == "simple.linear.MatrixMul"
assert ops[-1].outputs[0].name == "simple.linear.ADD"
@pytest.mark.parametrize("symbolic", [False, True])
def test_with_submodule_in_container(symbolic):
class Simple(M.Module):
def __init__(self, name):
super().__init__()
self.name = name
self.l0 = [ | M.Linear(3, 3) | megengine.module.Linear |
import megengine
import megengine.module as M
import megengine.functional as F
def default_init_weights(module, scale=1, nonlinearity="relu"):
"""
nonlinearity: leaky_relu
"""
for m in module.modules():
if isinstance(m, M.Conv2d):
| M.init.msra_normal_(m.weight, mode="fan_in", nonlinearity=nonlinearity) | megengine.module.init.msra_normal_ |
import megengine
import megengine.module as M
import megengine.functional as F
def default_init_weights(module, scale=1, nonlinearity="relu"):
"""
nonlinearity: leaky_relu
"""
for m in module.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_in", nonlinearity=nonlinearity)
m.weight *= scale
if m.bias is not None:
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
from megengine import Parameter
from .init import ones_, zeros_
from .module import Module
class GroupNorm(Module):
"""
Simple implementation of GroupNorm. Only support 4d tensor now.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True, **kwargs):
super().__init__(**kwargs)
assert num_channels % num_groups == 0
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, self.num_groups, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x * x).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / | F.sqrt(var + self.eps) | megengine.functional.sqrt |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
from megengine import Parameter
from .init import ones_, zeros_
from .module import Module
class GroupNorm(Module):
"""
Simple implementation of GroupNorm. Only support 4d tensor now.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True, **kwargs):
super().__init__(**kwargs)
assert num_channels % num_groups == 0
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, self.num_groups, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x * x).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(N, C, H, W)
if self.affine:
x = self.weight.reshape(1, -1, 1, 1) * x + self.bias.reshape(1, -1, 1, 1)
return x
def _module_info_string(self) -> str:
s = (
"groups={num_groups}, channels={num_channels}, "
"eps={eps}, affine={affine}"
)
return s.format(**self.__dict__)
class InstanceNorm(Module):
"""
Simple implementation of InstanceNorm. Only support 4d tensor now.
Reference: https://arxiv.org/abs/1607.08022.
Note that InstanceNorm equals using GroupNome with num_groups=num_channels.
"""
def __init__(self, num_channels, eps=1e-05, affine=True, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype="float32"))
self.bias = Parameter(np.zeros(num_channels, dtype="float32"))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, C, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x ** 2).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / | F.sqrt(var + self.eps) | megengine.functional.sqrt |
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import megengine.functional as F
from megengine import Parameter
from .init import ones_, zeros_
from .module import Module
class GroupNorm(Module):
"""
Simple implementation of GroupNorm. Only support 4d tensor now.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True, **kwargs):
super().__init__(**kwargs)
assert num_channels % num_groups == 0
self.num_groups = num_groups
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype=np.float32))
self.bias = Parameter(np.zeros(num_channels, dtype=np.float32))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, self.num_groups, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x * x).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(N, C, H, W)
if self.affine:
x = self.weight.reshape(1, -1, 1, 1) * x + self.bias.reshape(1, -1, 1, 1)
return x
def _module_info_string(self) -> str:
s = (
"groups={num_groups}, channels={num_channels}, "
"eps={eps}, affine={affine}"
)
return s.format(**self.__dict__)
class InstanceNorm(Module):
"""
Simple implementation of InstanceNorm. Only support 4d tensor now.
Reference: https://arxiv.org/abs/1607.08022.
Note that InstanceNorm equals using GroupNome with num_groups=num_channels.
"""
def __init__(self, num_channels, eps=1e-05, affine=True, **kwargs):
super().__init__(**kwargs)
self.num_channels = num_channels
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(num_channels, dtype="float32"))
self.bias = Parameter(np.zeros(num_channels, dtype="float32"))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
N, C, H, W = x.shape
assert C == self.num_channels
x = x.reshape(N, C, -1)
mean = x.mean(axis=2, keepdims=True)
var = (x ** 2).mean(axis=2, keepdims=True) - mean * mean
x = (x - mean) / F.sqrt(var + self.eps)
x = x.reshape(N, C, H, W)
if self.affine:
x = self.weight.reshape(1, -1, 1, 1) * x + self.bias.reshape(1, -1, 1, 1)
return x
def _module_info_string(self) -> str:
s = "channels={num_channels}, eps={eps}, affine={affine}"
return s.format(**self.__dict__)
class LayerNorm(Module):
"""
Simple implementation of LayerNorm. Support tensor of any shape as input.
Reference: https://arxiv.org/pdf/1803.08494.pdf.
"""
def __init__(self, normalized_shape, eps=1e-05, affine=True, **kwargs):
super().__init__(**kwargs)
if isinstance(normalized_shape, int):
normalized_shape = (normalized_shape,)
self.normalized_shape = tuple(normalized_shape)
self.eps = eps
self.affine = affine
if self.affine:
self.weight = Parameter(np.ones(self.normalized_shape, dtype="float32"))
self.bias = Parameter(np.zeros(self.normalized_shape, dtype="float32"))
else:
self.weight = None
self.bias = None
self.reset_parameters()
def reset_parameters(self):
if self.affine:
ones_(self.weight)
zeros_(self.bias)
def forward(self, x):
x_shape = x.shape
dim_delta = len(x_shape) - len(self.normalized_shape)
non_flatten_shape = x_shape[:dim_delta]
x = x.reshape(*non_flatten_shape, -1)
mean = x.mean(axis=-1, keepdims=True)
var = (x ** 2).mean(axis=-1, keepdims=True) - mean * mean
x = (x - mean) / | F.sqrt(var + self.eps) | megengine.functional.sqrt |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
| set_symbolic_shape(True) | megengine.core._trace_option.set_symbolic_shape |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = F.warp_perspective(data, M, (48, 160)) # (N, 3, 48, 160)
return {"data": new_data}
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, quad):
x = self.pre_process(data, quad)
x = self.traced_module(x)
return x
def test_preprocess():
batch_size = 2
module = Main()
data = mge.tensor(
np.random.randint(0, 256, size=(batch_size, 3, 48, 160)), dtype=np.float32
)
traced_module = | trace_module(module, {"data": data}) | megengine.traced_module.trace_module |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = F.warp_perspective(data, M, (48, 160)) # (N, 3, 48, 160)
return {"data": new_data}
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, quad):
x = self.pre_process(data, quad)
x = self.traced_module(x)
return x
def test_preprocess():
batch_size = 2
module = Main()
data = mge.tensor(
np.random.randint(0, 256, size=(batch_size, 3, 48, 160)), dtype=np.float32
)
traced_module = trace_module(module, {"data": data})
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
quad = mge.tensor(np.random.normal(size=(batch_size, 4, 2)), dtype=np.float32)
expect = module(data, quad)
traced_module = | trace_module(module, data, quad) | megengine.traced_module.trace_module |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = F.warp_perspective(data, M, (48, 160)) # (N, 3, 48, 160)
return {"data": new_data}
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, quad):
x = self.pre_process(data, quad)
x = self.traced_module(x)
return x
def test_preprocess():
batch_size = 2
module = Main()
data = mge.tensor(
np.random.randint(0, 256, size=(batch_size, 3, 48, 160)), dtype=np.float32
)
traced_module = trace_module(module, {"data": data})
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
quad = mge.tensor(np.random.normal(size=(batch_size, 4, 2)), dtype=np.float32)
expect = module(data, quad)
traced_module = trace_module(module, data, quad)
actual = traced_module(data, quad)
for i, j in zip(expect, actual):
np.testing.assert_array_equal(i, j)
func = | trace(traced_module, capture_as_const=True) | megengine.jit.trace |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = F.warp_perspective(data, M, (48, 160)) # (N, 3, 48, 160)
return {"data": new_data}
class Net(M.Module):
def __init__(self, traced_module):
super().__init__()
self.pre_process = PreProcess()
self.traced_module = traced_module
def forward(self, data, quad):
x = self.pre_process(data, quad)
x = self.traced_module(x)
return x
def test_preprocess():
batch_size = 2
module = Main()
data = mge.tensor(
np.random.randint(0, 256, size=(batch_size, 3, 48, 160)), dtype=np.float32
)
traced_module = trace_module(module, {"data": data})
obj = pickle.dumps(traced_module)
traced_module = pickle.loads(obj)
module = Net(traced_module)
module.eval()
quad = mge.tensor(np.random.normal(size=(batch_size, 4, 2)), dtype=np.float32)
expect = module(data, quad)
traced_module = trace_module(module, data, quad)
actual = traced_module(data, quad)
for i, j in zip(expect, actual):
np.testing.assert_array_equal(i, j)
func = trace(traced_module, capture_as_const=True)
actual = func(data, quad)
for i, j in zip(expect, actual):
np.testing.assert_array_equal(i, j)
model = io.BytesIO()
func.dump(model, arg_names=("data", "quad"))
model.seek(0)
infer_cg = | cgtools.GraphInference(model) | megengine.utils.comp_graph_tools.GraphInference |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = | F.zeros((1,)) | megengine.functional.zeros |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = | F.ones((1,)) | megengine.functional.ones |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = | F.broadcast_to(self.I, quad.shape) | megengine.functional.broadcast_to |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = | F.broadcast_to(self.A, (N, 8, 8)) | megengine.functional.broadcast_to |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul(F.matinv(A), B)[:, :, 0], I[:, 0:1, 0]], axis=1).reshape(
-1, 3, 3
)
new_data = | F.warp_perspective(data, M, (48, 160)) | megengine.functional.warp_perspective |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = | F.repeat(self.bb_out, N, axis=0) | megengine.functional.repeat |
import io
import pickle
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import megengine.utils.comp_graph_tools as cgtools
from megengine.core._trace_option import set_symbolic_shape
from megengine.jit import trace
from megengine.traced_module import trace_module
set_symbolic_shape(True)
class Main(M.Module):
def forward(self, x):
return x["data"]
class PreProcess(M.Module):
def __init__(self):
super().__init__()
self.A = F.zeros((1,))
self.I = F.ones((1,))
self.bb_out = mge.tensor(
np.array([[[0, 0], [160, 0], [160, 48], [0, 48]]], dtype="float32")
)
def forward(self, data, quad):
"""
data: (1, 3, 48, 160)
quad: (1, 4, 2)
"""
N = quad.shape[0]
dst = F.repeat(self.bb_out, N, axis=0).reshape(-1, 4, 2)
I = F.broadcast_to(self.I, quad.shape)
A = F.broadcast_to(self.A, (N, 8, 8))
A[:, 0:4, 0:2] = quad
A[:, 4:8, 5:6] = I[:, :, 0:1]
A[:, 0:4, 6:8] = -quad * dst[:, :, 0:1]
A[:, 4:8, 3:5] = quad
A[:, 0:4, 2:3] = I[:, :, 0:1]
A[:, 4:8, 6:8] = -quad * dst[:, :, 1:2]
B = dst.transpose(0, 2, 1).reshape(-1, 8, 1)
M = F.concat([F.matmul( | F.matinv(A) | megengine.functional.matinv |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = | tensor(x) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = | tensor(y) | megengine.tensor |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import numpy as np
import pytest
import megengine.functional as F
import megengine.functional.elemwise as elemwise
from megengine import tensor
from megengine.core.tensor import dtype
from megengine.functional.elemwise import Elemwise
from megengine.jit import trace
def test_abs():
np.testing.assert_allclose(
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
)
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
def test_elemwise_mode_string():
for key, mode in vars(Elemwise.Mode).items():
if isinstance(mode, Elemwise.Mode):
assert key == mode
assert Elemwise(mode=key) == Elemwise(mode=mode)
def test_multiply():
np.testing.assert_allclose(
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
)
np.testing.assert_allclose(
F.mul(4.0, tensor([3.0, 4.0])).numpy(),
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
)
np.testing.assert_allclose(
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
np.multiply(
np.array([3.0, 4.0], dtype=np.float32),
np.array([3.0, 4.0], dtype=np.float32),
),
)
def test_div():
np.testing.assert_allclose(
F.div(tensor([3.0, 4.0]), 2).numpy(),
np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
)
np.testing.assert_allclose(
F.floor_div(tensor([-5.0, -7.0]), 2).numpy(),
np.floor_divide(np.array([-5.0, -7.0], dtype=np.float32), 2),
)
np.testing.assert_allclose(
(tensor([-5, -7]) // 2).numpy(),
np.floor_divide(np.array([-5, -7], dtype=np.int32), 2),
)
def test_clamp():
"""Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
`F.clip` will fall into wrong conditions unexpectedly.
"""
x = np.linspace(-6, 6, dtype="float32")
np.testing.assert_allclose(
F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
)
np.testing.assert_allclose(
F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
)
def test_isnan():
for case in [[1, float("nan"), 0]]:
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
def test_isinf():
for case in [[1, float("inf"), 0]]:
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
def test_sign():
for case in [[1, -1, 0]]:
x = tensor(case)
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
def test_cosh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.cosh(x)
y_mge = F.cosh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_sinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.sinh(x)
y_mge = F.sinh(tensor(x)).numpy()
np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
def test_asinh():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.arcsinh(x)
y_mge = F.asinh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_acosh():
x = np.arange(0, 10000).astype("float32") / 100 + 1
y_np = np.arccosh(x)
y_mge = F.acosh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_atanh():
np.random.seed(42)
x = np.random.rand(100).astype("float32") * 2 - 1
y_np = np.arctanh(x)
y_mge = F.atanh(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
def test_hswish():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hswish(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_silu():
x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
y_np = x / (1 + np.exp(-x))
y_mge = F.silu(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_hsigmoid():
np.random.seed(42)
x = np.random.randn(100).astype("float32")
y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
y_mge = F.hsigmoid(tensor(x)).numpy()
np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
def test_logical_oprs():
x = np.array([[True, False], [False, True]])
y = np.array([[True, True], [False, False]])
xx = tensor(x)
yy = tensor(y)
np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
def test_logaddexp():
x = np.random.randn(2, 100)
y = np.random.randn(2, 100)
xx = | tensor(x) | megengine.tensor |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.