python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import wandb
import mlflow
import time
import torch
import torch.cuda.profiler as profiler
from typing import Union, Dict, Tuple
from modulus.distributed import DistributedManager, gather_loss
from .wandb import alert
from .console import PythonLogger
class LaunchLogger(object):
"""Modulus Launch logger
An abstracted logger class that takes care of several fundamental logging functions.
This class should first be initialized and then used via a context manager. This will
auto compute epoch metrics. This is the standard logger for Modulus examples.
Parameters
----------
name_space : str
Namespace of logger to use. This will define the loggers title in the console and
the wandb group the metric is plotted
epoch : int, optional
Current epoch, by default 1
num_mini_batch : Union[int, None], optional
Number of mini-batches used to calculate the epochs progress, by default None
profile : bool, optional
Profile code using nvtx markers, by default False
mini_batch_log_freq : int, optional
Frequency to log mini-batch losses, by default 100
epoch_alert_freq : Union[int, None], optional
Epoch frequency to send training alert, by default None
Example
-------
>>> from modulus.launch.logging import LaunchLogger
>>> LaunchLogger.initialize()
>>> epochs = 3
>>> for i in range(epochs):
... with LaunchLogger("Train", epoch=i) as log:
... # Log 3 mini-batches manually
... log.log_minibatch({"loss": 1.0})
... log.log_minibatch({"loss": 2.0})
... log.log_minibatch({"loss": 3.0})
"""
_instances = {}
console_backend = True
wandb_backend = False
mlflow_backend = False
tensorboard_backend = False
enable_profiling = False
mlflow_run = None
mlflow_client = None
def __new__(cls, name_space, *args, **kwargs):
# If namespace already has an instance just return that
if name_space in cls._instances:
return cls._instances[name_space]
# Otherwise create new singleton instance for this namespace
self = super().__new__(cls) # don't pass remaining parameters to object.__new__
cls._instances[name_space] = self
# Constructor set up to only be ran once by a logger
self.pyLogger = PythonLogger(name_space)
self.total_iteration_index = None
# Distributed
self.root = True
if DistributedManager.is_initialized():
self.root = DistributedManager().rank == 0
# Profiler utils
if torch.cuda.is_available():
self.profiler = torch.autograd.profiler.emit_nvtx(
enabled=cls.enable_profiling
)
self.start_event = torch.cuda.Event(enable_timing=True)
self.end_event = torch.cuda.Event(enable_timing=True)
else:
self.profiler = None
return self
def __init__(
self,
name_space: str,
epoch: int = 1,
num_mini_batch: Union[int, None] = None,
profile: bool = False,
mini_batch_log_freq: int = 100,
epoch_alert_freq: Union[int, None] = None,
):
self.name_space = name_space
self.mini_batch_index = 0
self.minibatch_losses = {}
self.epoch_losses = {}
self.mini_batch_log_freq = mini_batch_log_freq
self.epoch_alert_freq = epoch_alert_freq
self.epoch = epoch
self.num_mini_batch = num_mini_batch
self.profile = profile
# Init initial iteration based on current epoch
if self.total_iteration_index is None:
if num_mini_batch is not None:
self.total_iteration_index = (epoch - 1) * num_mini_batch
else:
self.total_iteration_index = 0
# Set x axis metric to epoch for this namespace
if self.wandb_backend:
wandb.define_metric(name_space + "/mini_batch_*", step_metric="iter")
wandb.define_metric(name_space + "/*", step_metric="epoch")
def log_minibatch(self, losses: Dict[str, float]):
"""Logs metrics for a mini-batch epoch
This function should be called every mini-batch iteration. It will accumulate
loss values over a datapipe. At the end of a epoch the average of these losses
from each mini-batch will get calculated.
Parameters
----------
losses : Dict[str, float]
Dictionary of metrics/loss values to log
"""
self.mini_batch_index += 1
self.total_iteration_index += 1
for name, value in losses.items():
if name not in self.minibatch_losses:
self.minibatch_losses[name] = 0
self.minibatch_losses[name] += value
# Log of mini-batch loss
if self.mini_batch_index % self.mini_batch_log_freq == 0:
# Backend Logging
mini_batch_metrics = {}
for name, value in losses.items():
mini_batch_metrics[f"{self.name_space}/mini_batch_{name}"] = value
self._log_backends(
mini_batch_metrics, step=("iter", self.total_iteration_index)
)
# Console
if self.root:
message = f"Mini-Batch Losses:"
for name, value in losses.items():
message += f" {name} = {value:10.3e},"
message = message[:-1]
# If we have datapipe length we can get a percent complete
if self.num_mini_batch:
mbp = 100 * (float(self.mini_batch_index) / self.num_mini_batch)
message = f"[{mbp:.02f}%] " + message
self.pyLogger.log(message)
def log_epoch(self, losses: Dict[str, float]):
"""Logs metrics for a single epoch
Parameters
----------
losses : Dict[str, float]
Dictionary of metrics/loss values to log
"""
for name, value in losses.items():
self.epoch_losses[name] = value
def __enter__(self):
self.mini_batch_index = 0
self.minibatch_losses = {}
self.epoch_losses = {}
# Trigger profiling
if self.profile and self.profiler:
self.logger.warning(f"Starting profile for epoch {self.epoch}")
self.profiler.__enter__()
profiler.start()
# Timing stuff
if torch.cuda.is_available():
self.start_event.record()
else:
self.start_event = time.time()
if self.mlflow_backend:
self.mlflow_client.update_run(self.mlflow_run.info.run_id, "RUNNING")
return self
def __exit__(self, exc_type, exc_value, exc_tb):
# Abnormal exit dont log
if exc_type is not None:
if self.mlflow_backend:
self.mlflow_client.set_terminated(
self.mlflow_run.info.run_id, status="KILLED"
)
return
# Gather mini-batch losses
for name, value in self.minibatch_losses.items():
process_loss = value / self.mini_batch_index
self.epoch_losses[name] = process_loss
# Compute global loss
if DistributedManager.is_initialized() and DistributedManager().distributed:
self.epoch_losses[f"Global {name}"] = gather_loss(process_loss)
if self.root:
# Console printing
# TODO: add out of total epochs progress
message = f"Epoch {self.epoch} Metrics:"
for name, value in self.epoch_losses.items():
message += f" {name} = {value:10.3e},"
message = message[:-1]
self.pyLogger.info(message)
metrics = {
f"{self.name_space}/{key}": value
for key, value in self.epoch_losses.items()
}
# Exit profiling
if self.profile and self.profiler:
self.logger.warning("Ending profile")
self.profiler.__exit__()
profiler.end()
# Timing stuff, TODO: histograms not line plots
if torch.cuda.is_available():
self.end_event.record()
torch.cuda.synchronize()
# Returns milliseconds
# https://pytorch.org/docs/stable/generated/torch.cuda.Event.html#torch.cuda.Event.elapsed_time
epoch_time = self.start_event.elapsed_time(self.end_event) / 1000.0
else:
end_event = time.time()
epoch_time = end_event - self.start_event
# Return MS for time / iter
time_per_iter = 1000 * epoch_time / max([1, self.mini_batch_index])
if self.root:
message = f"Epoch Execution Time: {epoch_time:10.3e}s"
message += f", Time/Iter: {time_per_iter:10.3e}ms"
self.pyLogger.info(message)
metrics[f"{self.name_space}/Epoch Time (s)"] = epoch_time
metrics[f"{self.name_space}/Time per iter (ms)"] = time_per_iter
self._log_backends(metrics, step=("epoch", self.epoch))
# TODO this should be in some on delete method / clean up
if self.mlflow_backend:
self.mlflow_client.set_terminated(
self.mlflow_run.info.run_id, status="FINISHED"
)
# Alert
if (
self.epoch_alert_freq
and self.root
and self.epoch % self.epoch_alert_freq == 0
):
if self.wandb_backend:
# TODO: Make this a little more informative?
alert(
title=f"{sys.argv[0]} training progress report",
text=f"Run {wandb.run.name} is at epoch {self.epoch}.",
)
def _log_backends(
self,
metric_dict: Dict[str, float],
step: Tuple[str, int] = None,
print: bool = False,
):
"""Logs a dictionary of metrics to different supported backends
Parameters
----------
metric_dict : Dict[str, float]
Metric dictionary
step : Tuple[str, int], optional
Tuple containing (step name, step index), by default None
print : bool, optional
Print metrics, by default False
"""
# MLFlow Logging
if self.mlflow_backend:
for key, value in metric_dict.items():
# If value is None just skip
if value is None:
continue
# Keys only allow alpha numeric, ., -, /, _ and spaces
key = re.sub("[^a-zA-Z0-9\.\-\s\/\_]+", "", key)
self.mlflow_client.log_metric(
self.mlflow_run.info.run_id, key, value, step=step[1]
)
# WandB Logging
if self.wandb_backend:
# For WandB send step in as a metric
# Step argument in lod function does not work with multiple log calls at
# different intervals
metric_dict[step[0]] = step[1]
wandb.log(metric_dict)
@classmethod
def toggle_wandb(cls, value: bool):
"""Toggle WandB logging
Parameters
----------
value : bool
Use WandB logging
"""
cls.wandb_backend = value
@classmethod
def toggle_mlflow(cls, value: bool):
"""Toggle MLFlow logging
Parameters
----------
value : bool
Use MLFlow logging
"""
cls.mlflow_backend = value
@staticmethod
def initialize(use_wandb: bool = False, use_mlflow: bool = False):
"""Initialize logging singleton
Parameters
----------
use_wandb : bool, optional
Use WandB logging, by default False
use_mlflow : bool, optional
Use MLFlow logging, by default False
"""
if wandb.run is None and use_wandb:
PythonLogger().warning("WandB not initialized, turning off")
use_wandb = False
if LaunchLogger.mlflow_run is None and use_mlflow:
PythonLogger().warning("MLFlow not initialized, turning off")
use_mlflow = False
if use_wandb:
LaunchLogger.toggle_wandb(True)
wandb.define_metric("epoch")
wandb.define_metric("iter")
if use_mlflow:
LaunchLogger.toggle_mlflow(True)
| modulus-launch-main | modulus/launch/logging/launch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from datetime import datetime
from modulus.distributed import DistributedManager
def create_ddp_group_tag(group_name: str = None) -> str:
"""Creates a common group tag for logging
For some reason this does not work with multi-node. Seems theres a bug in PyTorch
when one uses a distributed util before DDP
Parameters
----------
group_name : str, optional
Optional group name prefix. If None will use "DDP_Group_", by default None
Returns
-------
str
Group tag
"""
dist = DistributedManager()
if dist.rank == 0:
# Store time stamp as int tensor for broadcasting
tint = lambda x: int(datetime.now().strftime(f"%{x}"))
time_index = torch.IntTensor(
[tint(x) for x in ["m", "d", "y", "H", "M", "S"]]
).to(dist.device)
else:
time_index = torch.IntTensor([0, 0, 0, 0, 0, 0]).to(dist.device)
if torch.distributed.is_available():
# Broadcast group ID to all processes
torch.distributed.broadcast(time_index, src=0)
time_string = f"{time_index[0]}/{time_index[1]}/{time_index[2]}_\
{time_index[3]}-{time_index[4]}-{time_index[5]}"
if group_name is None:
group_name = "DDP_Group"
return group_name + "_" + time_string
| modulus-launch-main | modulus/launch/logging/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
import shutil
import torch.nn as nn
import modulus
from typing import Callable
from modulus.models.mlp import FullyConnected
from modulus.launch.utils import save_checkpoint, load_checkpoint
@pytest.fixture()
def checkpoint_folder() -> str:
return "./checkpoints"
@pytest.fixture(params=["modulus", "pytorch"])
def model_generator(request) -> Callable:
# Create fully-connected NN generator function
if request.param == "modulus":
generator = lambda x: FullyConnected(
in_features=x,
out_features=x,
num_layers=2,
layer_size=8,
)
else:
generator = lambda x: nn.Sequential(
nn.Linear(x, 8),
nn.ReLU(),
nn.Linear(8, x),
)
return generator
@pytest.mark.parametrize("device", ["cuda:0", "cpu"])
def test_model_checkpointing(
device, model_generator, checkpoint_folder, rtol: float = 1e-3, atol: float = 1e-3
):
"""Test checkpointing util for model"""
mlp_model_1 = model_generator(8).to(device)
mlp_model_2 = model_generator(4).to(device)
input_1 = torch.randn(4, 8).to(device)
input_2 = torch.randn(4, 4).to(device)
output_1 = mlp_model_1(input_1)
output_2 = mlp_model_2(input_2)
# Save model weights to checkpoint
save_checkpoint(checkpoint_folder, models=[mlp_model_1, mlp_model_2])
# Load twin set of models for importing weights
mlp_model_1 = model_generator(8).to(device)
mlp_model_2 = model_generator(4).to(device)
new_output_1 = mlp_model_1(input_1)
new_output_2 = mlp_model_2(input_2)
# Assert models are now different
assert not torch.allclose(output_1, new_output_1, rtol, atol)
assert not torch.allclose(output_2, new_output_2, rtol, atol)
# Load model weights from checkpoint
load_checkpoint(checkpoint_folder, models=[mlp_model_1, mlp_model_2], device=device)
new_output_1 = mlp_model_1(input_1)
new_output_2 = mlp_model_2(input_2)
assert torch.allclose(output_1, new_output_1, rtol, atol)
assert torch.allclose(output_2, new_output_2, rtol, atol)
# Clean up
shutil.rmtree(checkpoint_folder)
| modulus-launch-main | test/utils/test_checkpoint.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that copyright headers exists"""
import argparse
import itertools
import re
import sys
import json
from datetime import datetime
from pathlib import Path
def get_top_comments(_data):
"""
Get all lines where comments should exist
"""
lines_to_extract = []
for i, line in enumerate(_data):
# If empty line, skip
if line in ["", "\n", "", "\r", "\r\n"]:
continue
# If it is a comment line, we should get it
if line.startswith("#"):
lines_to_extract.append(i)
# Assume all copyright headers occur before any import or from statements
# and not enclosed in a comment block
elif "import" in line:
break
elif "from" in line:
break
comments = []
for line in lines_to_extract:
comments.append(_data[line])
return comments
def get_gitignore_files():
gitignore_path = Path(__file__).parent.parent.parent.resolve() / Path(".gitignore")
gitignore_files = []
if gitignore_path.exists():
with open(gitignore_path, "r") as gitignore_file:
for line in gitignore_file:
line = line.strip()
if line and not line.startswith("#"):
gitignore_files.append(line)
return gitignore_files
def main():
with open(Path(__file__).parent.resolve() / Path("config.json")) as f:
config = json.loads(f.read())
print(f"License check config:")
print(json.dumps(config, sort_keys=True, indent=4))
current_year = int(datetime.today().year)
starting_year = 2023
python_header_path = Path(__file__).parent.resolve() / Path(
config["copyright_file"]
)
working_path = Path(__file__).parent.resolve() / Path(config["dir"])
exts = config["include-ext"]
with open(python_header_path, "r", encoding="utf-8") as original:
pyheader = original.read().split("\n")
pyheader_lines = len(pyheader)
# Build list of files to check
exclude_paths = [
(Path(__file__).parent / Path(path)).resolve().rglob("*")
for path in config["exclude-dir"]
]
all_exclude_paths = itertools.chain.from_iterable(exclude_paths)
exclude_filenames = [p for p in all_exclude_paths if p.suffix in exts]
filenames = [p for p in working_path.resolve().rglob("*") if p.suffix in exts]
filenames = [
filename for filename in filenames if filename not in exclude_filenames
]
problematic_files = []
gpl_files = []
ignored_files = get_gitignore_files()
for filename in filenames:
# Skip files listed in .gitignore
# TODO need a more robust pattern matching
ignored_files = [s.replace("*", "") for s in ignored_files]
if any(pattern in str(filename) for pattern in ignored_files):
continue
with open(str(filename), "r", encoding="utf-8") as original:
data = original.readlines()
data = get_top_comments(data)
if len(data) < pyheader_lines - 1:
if data and "# ignore_header_test" in data[0]:
continue
print(f"{filename} has less header lines than the copyright template")
problematic_files.append(filename)
continue
found = False
for i, line in enumerate(data):
if re.search(re.compile("Copyright.*NVIDIA.*", re.IGNORECASE), line):
found = True
# Check 1st line manually
year_good = False
for year in range(starting_year, current_year + 1):
year_line = pyheader[0].format(CURRENT_YEAR=year)
if year_line in data[i]:
year_good = True
break
year_line_aff = year_line.split(".")
year_line_aff = (
year_line_aff[0] + " & AFFILIATES." + year_line_aff[1]
)
if year_line_aff in data[i]:
year_good = True
break
if not year_good:
problematic_files.append(filename)
print(f"{filename} had an error with the year")
break
# while "opyright" in data[i]:
# i += 1
# for j in range(1, pyheader_lines):
# if pyheader[j] not in data[i + j - 1]:
# problematic_files.append(filename)
# print(f"{filename} missed the line: {pyheader[j]}")
# break
if found:
break
if not found:
print(f"{filename} did not match the regex: `Copyright.*NVIDIA.*`")
problematic_files.append(filename)
# test if GPL license exists
for lines in data:
if "gpl" in lines.lower():
gpl_files.append(filename)
break
if len(problematic_files) > 0:
print(
"test_header.py found the following files that might not have a copyright header:"
)
for _file in problematic_files:
print(_file)
if len(gpl_files) > 0:
print("test_header.py found the following files that might have GPL copyright:")
for _file in gpl_files:
print(_file)
assert len(problematic_files) == 0, "header test failed!"
assert len(gpl_files) == 0, "found gpl license, header test failed!"
print("Success: File headers look good!")
if __name__ == "__main__":
main()
| modulus-launch-main | test/ci_tests/header_check.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sphinx_rtd_theme
from modulus.launch import __version__ as version
project = "NVIDIA Modulus Launch"
copyright = "2023, NVIDIA Modulus Team"
author = "NVIDIA Modulus Team"
release = version
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"recommonmark",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"nbsphinx",
]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
pdf_documents = [
("index", "rst2pdf", "Sample rst2pdf doc", "Your Name"),
]
napoleon_custom_sections = ["Variable Shape"]
# -- Options for HTML output -------------------------------------------------
# HTML theme options
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"logo_only": True,
"display_version": True,
"prev_next_buttons_location": "bottom",
"style_external_links": False,
"style_nav_header_background": "#000000",
# Toc options
"collapse_navigation": False,
"sticky_navigation": False,
# 'navigation_depth': 10,
"sidebarwidth": 12,
"includehidden": True,
"titles_only": False,
}
# Additional html options
html_static_path = ["_static"]
html_css_files = [
"css/nvidia_styles.css",
]
html_js_files = ["js/pk_scripts.js"]
# html_last_updated_fmt = ''
# Additional sphinx switches
math_number_all = True
todo_include_todos = True
numfig = True
_PREAMBLE = r"""
\usepackage{amsmath}
\usepackage{esint}
\usepackage{mathtools}
\usepackage{stmaryrd}
"""
latex_elements = {
"preamble": _PREAMBLE,
# other settings go here
}
latex_preamble = [
(
"\\usepackage{amssymb}",
"\\usepackage{amsmath}",
"\\usepackage{amsxtra}",
"\\usepackage{bm}",
"\\usepackage{esint}",
"\\usepackage{mathtools}",
"\\usepackage{stmaryrd}",
),
]
autosectionlabel_maxdepth = 1
templates_path = ["_templates"]
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"README.md",
"CONTRIBUTING.md",
"LICENSE.txt",
]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
pdf_documents = [
("index", "rst2pdf", "Sample rst2pdf doc", "Your Name"),
]
napoleon_custom_sections = ["Variable Shape"]
| modulus-launch-main | docs/conf.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import numpy as np
import torch
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as dali_pth
except ImportError:
raise ImportError(
"DALI dataset requires NVIDIA DALI package to be installed. "
+ "The package can be installed at:\n"
+ "https://docs.nvidia.com/deeplearning/dali/user-guide/docs/installation.html"
)
from dataclasses import dataclass
from typing import Iterable, List, Union, Tuple
from pathlib import Path
from torch.utils.data import Dataset
from modulus.datapipes.datapipe import Datapipe
from modulus.datapipes.meta import DatapipeMetaData
Tensor = torch.Tensor
@dataclass
class MetaData(DatapipeMetaData):
name: str = "ERA5HDF5"
# Optimization
auto_device: bool = True
cuda_graphs: bool = True
# Parallel
ddp_sharding: bool = True
class ERA5HDF5Datapipe(Datapipe):
"""ERA5 DALI data pipeline for HDF5 files
Parameters
----------
data_dir : str
Directory where ERA5 data is stored
stats_dir : Union[str, None], optional
Directory to data statistic numpy files for normalization, if None, no normalization
will be used, by default None
channels : Union[List[int], None], optional
Defines which ERA5 variables to load, if None will use all in HDF5 file, by default None
batch_size : int, optional
Batch size, by default 1
stride : int, optional
Number of steps between input and output variables. For example, if the dataset
contains data at every 6 hours, a stride 1 = 6 hour delta t and
stride 2 = 12 hours delta t, by default 1
num_input_steps : int, optional
Number of timesteps are included in the input variables, by default 1
num_output_steps : int, optional
Number of timesteps are included in the output variables, by default 1
grid_type : str, optional
Type of grid in the input NetCDF file. Must be one of the following: "latlon",
"cubesphere", by default "latlon"
patch_size : Union[Tuple[int, int], int, None], optional
If specified, crops input and output variables so image dimensions are
divisible by patch_size, by default None
num_samples_per_year : int, optional
Number of samples randomly taken from each year. If None, all will be use, by default None
shuffle : bool, optional
Shuffle dataset, by default True
num_workers : int, optional
Number of workers, by default 1
device: Union[str, torch.device], optional
Device for DALI pipeline to run on, by default cuda
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
"""
def __init__(
self,
data_dir: str,
stats_dir: Union[str, None] = None,
channels: Union[List[int], None] = None,
batch_size: int = 1,
num_input_steps: int = 1,
num_output_steps: int = 1,
grid_type: str = "latlon",
stride: int = 1,
patch_size: Union[Tuple[int, int], int, None] = None,
num_samples_per_year: Union[int, None] = None,
shuffle: bool = True,
num_workers: int = 1,
device: Union[str, torch.device] = "cuda",
process_rank: int = 0,
world_size: int = 1,
):
super().__init__(meta=MetaData())
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.data_dir = Path(data_dir)
self.stats_dir = Path(stats_dir) if not stats_dir is None else None
self.channels = channels
self.stride = stride
self.num_input_steps = num_input_steps
self.num_output_steps = num_output_steps
self.grid_type = grid_type
self.num_samples_per_year = num_samples_per_year
self.process_rank = process_rank
self.world_size = world_size
if isinstance(patch_size, int):
patch_size = (patch_size, patch_size)
self.patch_size = patch_size
# Set up device, needed for pipeline
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
self.device = device
# check root directory exists
if not self.data_dir.is_dir():
raise IOError(f"Error, data directory {self.data_dir} does not exist")
if not self.stats_dir is None and not self.stats_dir.is_dir():
raise IOError(f"Error, stats directory {self.stats_dir} does not exist")
# check valid grid type
self.allowed_grid_types = ["latlon", "cubesphere"]
if self.grid_type not in self.allowed_grid_types:
raise ValueError(
f"Invalid grid type. Must be one of: {', '.join(self.allowed_grid_types)}"
)
self.parse_dataset_files()
self.load_statistics()
self.pipe = self._create_pipeline()
def parse_dataset_files(self) -> None:
"""Parses the data directory for valid HDF5 files and determines training samples
Raises
------
ValueError
In channels specified or number of samples per year is not valid
"""
# get all input data files
self.data_paths = sorted(self.data_dir.glob("????.h5"))
for data_path in self.data_paths:
self.logger.info(f"ERA5 file found: {data_path}")
self.n_years = len(self.data_paths)
self.logger.info(f"Number of years: {self.n_years}")
# get total number of examples and image shape from the first file,
# assuming other files have exactly the same format.
self.logger.info(f"Getting file stats from {self.data_paths[0]}")
with h5py.File(self.data_paths[0], "r") as f:
# truncate the dataset to avoid out-of-range sampling
data_samples_per_year = (
f["fields"].shape[0]
- self.num_input_steps * self.stride
- self.num_output_steps * self.stride
)
self.img_shape = f["fields"].shape[2:]
# If channels not provided, use all of them
if self.channels is None:
self.channels = [i for i in range(f["fields"].shape[1])]
# If num_samples_per_year use all
if self.num_samples_per_year is None:
self.num_samples_per_year = data_samples_per_year
# Adjust image shape if patch_size defined
if self.grid_type == "latlon":
if self.patch_size is not None:
self.img_shape = [
s - s % self.patch_size[i] for i, s in enumerate(self.img_shape)
]
self.logger.info(f"Input image shape: {self.img_shape}")
# Get total length
self.total_length = self.n_years * self.num_samples_per_year
self.length = self.total_length
# Sanity checks
if max(self.channels) >= f["fields"].shape[1]:
raise ValueError(
f"Provided channel has indexes greater than the number \
of fields {f['fields'].shape[1]}"
)
if self.num_samples_per_year > data_samples_per_year:
raise ValueError(
f"num_samples_per_year ({self.num_samples_per_year}) > number of \
samples available ({data_samples_per_year})!"
)
self.logger.info(f"Number of samples/year: {self.num_samples_per_year}")
self.logger.info(f"Number of channels available: {f['fields'].shape[1]}")
def load_statistics(self) -> None:
"""Loads ERA5 statistics from pre-computed numpy files
The statistic files should be of name global_means.npy and global_std.npy with
a shape of [1, C, 1, 1] located in the stat_dir.
Raises
------
IOError
If mean or std numpy files are not found
AssertionError
If loaded numpy arrays are not of correct size
"""
# If no stats dir we just skip loading the stats
if self.stats_dir is None:
self.mu = None
self.std = None
return
# load normalisation values
mean_stat_file = self.stats_dir / Path("global_means.npy")
std_stat_file = self.stats_dir / Path("global_stds.npy")
if not mean_stat_file.exists():
raise IOError(f"Mean statistics file {mean_stat_file} not found")
if not std_stat_file.exists():
raise IOError(f"Std statistics file {std_stat_file} not found")
if self.grid_type == "latlon":
# has shape [1, C, 1, 1]
self.mu = np.load(str(mean_stat_file))[:, self.channels]
# has shape [1, C, 1, 1]
self.sd = np.load(str(std_stat_file))[:, self.channels]
if not self.mu.shape == self.sd.shape == (1, len(self.channels), 1, 1):
raise AssertionError("Error, normalisation arrays have wrong shape")
else: # cubed sphere
# has shape [1, C, 1, 1, 1]
self.mu = np.load(str(mean_stat_file))[:, self.channels]
self.mu = np.expand_dims(self.mu, -1)
# has shape [1, C, 1, 1, 1]
self.sd = np.load(str(std_stat_file))[:, self.channels]
self.sd = np.expand_dims(self.sd, -1)
if not self.mu.shape == self.sd.shape == (1, len(self.channels), 1, 1, 1):
raise AssertionError("Error, normalisation arrays have wrong shape")
def _create_pipeline(self) -> dali.Pipeline:
"""Create DALI pipeline
Returns
-------
dali.Pipeline
HDF5 DALI pipeline
"""
pipe = dali.Pipeline(
batch_size=self.batch_size,
num_threads=2,
prefetch_queue_depth=2,
py_num_workers=self.num_workers,
device_id=self.device.index,
py_start_method="spawn",
)
with pipe:
source = ERA5DaliExternalSource(
data_paths=self.data_paths,
num_samples=self.total_length,
channels=self.channels,
stride=self.stride,
num_input_steps=self.num_input_steps,
num_output_steps=self.num_output_steps,
num_samples_per_year=self.num_samples_per_year,
batch_size=self.batch_size,
shuffle=self.shuffle,
process_rank=self.process_rank,
world_size=self.world_size,
)
# Update length of dataset
self.length = len(source) // self.batch_size
# Read current batch.
invar, outvar, invar_idx, outvar_idx, year_idx = dali.fn.external_source(
source,
num_outputs=5,
parallel=False,
batch=False,
)
# if self.device.type == "cuda":
# # Move tensors to GPU as external_source won't do that.
# invar = invar.gpu()
# outvar = outvar.gpu()
# Crop.
if self.grid_type == "latlon":
h, w = self.img_shape
invar = invar[:, :h, :w]
outvar = outvar[:, :, :h, :w]
# Standardize.
if not self.stats_dir is None:
invar = dali.fn.normalize(invar, mean=self.mu, stddev=self.sd)
outvar = dali.fn.normalize(outvar, mean=self.mu, stddev=self.sd)
# Set outputs.
pipe.set_outputs(invar, outvar, invar_idx, outvar_idx, year_idx)
return pipe
def __iter__(self):
# Reset the pipeline before creating an iterator to enable epochs.
self.pipe.reset()
# Create DALI PyTorch iterator.
return dali_pth.DALIGenericIterator(
[self.pipe], ["invar", "outvar", "invar_idx", "outvar_idx", "year_idx"]
)
def __len__(self):
return self.length
class ERA5DaliExternalSource:
"""DALI Source for lazy-loading the HDF5 ERA5 files
Parameters
----------
data_paths : Iterable[str]
Directory where ERA5 data is stored
num_samples : int
Total number of training samples
channels : Iterable[int]
List representing which ERA5 variables to load
stride : int
Number of steps between input and output variables
num_input_steps : int
Number of timesteps are included in the input variables
num_output_steps : int
Number of timesteps are included in the output variables
num_samples_per_year : int
Number of samples randomly taken from each year
batch_size : int, optional
Batch size, by default 1
shuffle : bool, optional
Shuffle dataset, by default True
process_rank : int, optional
Rank ID of local process, by default 0
world_size : int, optional
Number of training processes, by default 1
Note
----
For more information about DALI external source operator:
https://docs.nvidia.com/deeplearning/dali/archives/dali_1_13_0/user-guide/docs/examples/general/data_loading/parallel_external_source.html
"""
def __init__(
self,
data_paths: Iterable[str],
num_samples: int,
channels: Iterable[int],
num_input_steps: int,
num_output_steps: int,
stride: int,
num_samples_per_year: int,
batch_size: int = 1,
shuffle: bool = True,
process_rank: int = 0,
world_size: int = 1,
):
self.data_paths = list(data_paths)
# Will be populated later once each worker starts running in its own process.
self.data_files = None
self.num_samples = num_samples
self.chans = list(channels)
self.num_input_steps = num_input_steps
self.num_output_steps = num_output_steps
self.stride = stride
self.num_samples_per_year = num_samples_per_year
self.batch_size = batch_size
self.shuffle = shuffle
self.last_epoch = None
self.indices = np.arange(num_samples)
# Shard from indices if running in parallel
self.indices = np.array_split(self.indices, world_size)[process_rank]
# Get number of full batches, ignore possible last incomplete batch for now.
# Also, DALI external source does not support incomplete batches in parallel mode.
self.num_batches = len(self.indices) // self.batch_size
def __call__(
self, sample_info: dali.types.SampleInfo
) -> Tuple[Tensor, Tensor, np.ndarray, np.ndarray, np.ndarray]:
if sample_info.iteration >= self.num_batches:
raise StopIteration()
if self.data_files is None:
# This will be called once per worker. Workers are persistent,
# so there is no need to explicitly close the files - this will be done
# when corresponding pipeline/dataset is destroyed.
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
# Shuffle before the next epoch starts.
if self.shuffle and sample_info.epoch_idx != self.last_epoch:
# All workers use the same rng seed so the resulting
# indices are the same across workers.
np.random.default_rng(seed=sample_info.epoch_idx).shuffle(self.indices)
self.last_epoch = sample_info.epoch_idx
# Get local indices from global index.
idx = self.indices[sample_info.idx_in_epoch]
year_idx = idx // self.num_samples_per_year
in_idx = idx % self.num_samples_per_year
data = self.data_files[year_idx]["fields"]
# Has [T,C,H,W] shape.
invar_idx = []
invar = np.empty((self.num_input_steps,) + data.shape[1:], dtype=data.dtype)
for i in range(self.num_input_steps):
in_idx = in_idx + i * self.stride
invar_idx.append(in_idx)
invar[i] = data[in_idx, self.chans]
# Has [T,C,H,W] shape.
outvar_idx = []
outvar = np.empty((self.num_output_steps,) + data.shape[1:], dtype=data.dtype)
for i in range(self.num_output_steps):
out_idx = in_idx + (i + 1) * self.stride
outvar_idx.append(out_idx)
outvar[i] = data[out_idx, self.chans]
invar_idx = np.array(invar_idx)
outvar_idx = np.array(outvar_idx)
year_idx = np.array(year_idx)
return invar, outvar, invar_idx, outvar_idx, year_idx
def __len__(self):
return len(self.indices)
| modulus-launch-main | examples/weather/dlwp/era5_hdf5.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import hydra
import matplotlib.pyplot as plt
import xarray
import datetime
from torch.nn.parallel import DistributedDataParallel
import torch.nn.functional as F
from omegaconf import DictConfig
from era5_hdf5 import ERA5HDF5Datapipe
from modulus.distributed import DistributedManager
from modulus.utils import StaticCaptureTraining, StaticCaptureEvaluateNoGrad
from modulus.models.dlwp import DLWP
from cube_sphere_plotter_no_subplots import cube_sphere_plotter
from modulus.launch.logging import LaunchLogger, PythonLogger, initialize_mlflow
from modulus.launch.utils import load_checkpoint, save_checkpoint
import modulus.utils.sfno.zenith_angle as zenith_angle
from torch.optim.lr_scheduler import ReduceLROnPlateau
from hydra.utils import to_absolute_path
Tensor = torch.Tensor
def loss_func(x, y, p=2.0):
yv = y.reshape(x.size()[0], -1)
xv = x.reshape(x.size()[0], -1)
diff_norms = torch.linalg.norm(xv - yv, ord=p, dim=1)
y_norms = torch.linalg.norm(yv, ord=p, dim=1)
return torch.mean(diff_norms / y_norms)
def compute_tisr(start_year, year_idx, sample_idx, longrid, latgrid):
# Compute TISR
batched_tisr = []
for i, year_id in enumerate(year_idx):
tisr = []
for id in sample_idx[i]:
year = start_year + year_id
start_date = datetime.datetime(year.item(), 1, 1, 0, 0)
time_delta = datetime.timedelta(hours=id.item() * 6)
result_time = start_date + time_delta
tisr.append(
np.maximum(
zenith_angle.cos_zenith_angle(result_time, longrid, latgrid), 0
)
- (1 / np.pi)
) # subtract mean value
batched_tisr.append(np.stack(tisr, axis=0))
batched_tisr = np.expand_dims(
np.stack(batched_tisr, axis=0), axis=2
) # add channel dimension
return batched_tisr
def prepare_input(
input_list,
tisr_list,
lsm,
topographic_height,
):
# TODO: Add an assertion check here to ensure the idx_list has same number of elements as the input_list!
for i in range(len(input_list)):
input_list[i] = torch.cat((input_list[i], tisr_list[i]), dim=1)
input_model = torch.cat(
input_list, dim=1
) # concat the time dimension into channels
repeat_vals = (
input_list[0].shape[0],
-1,
-1,
-1,
-1,
) # repeat along batch dimension
lsm = lsm.expand(*repeat_vals)
# normalize topographic height
topographic_height = (topographic_height - 3.724e03) / 8.349e03
topographic_height = topographic_height.expand(*repeat_vals)
input_model = torch.cat((input_model, lsm, topographic_height), dim=1)
return input_model
@torch.no_grad()
def validation_and_plotting_step(
eval_step,
arch,
datapipe,
datapipe_start_year,
nr_output_channels=14,
num_input_steps=2,
lsm=None,
longrid=None,
latgrid=None,
topographic_height=None,
epoch=0,
channels=[0, 1],
plotting=False,
device=None,
):
loss_epoch = 0
num_examples = 0
# Dealing with DDP wrapper
if hasattr(arch, "module"):
arch = arch.module
arch.eval()
for i, data in enumerate(datapipe):
invar = data[0]["invar"].to(device)
outvar = data[0]["outvar"].to(device)
invar_idx = data[0]["invar_idx"]
outvar_idx = data[0]["outvar_idx"]
year_idx = data[0]["year_idx"]
invar_tisr = compute_tisr(
datapipe_start_year, year_idx, invar_idx, longrid, latgrid
)
outvar_tisr = compute_tisr(
datapipe_start_year, year_idx, outvar_idx, longrid, latgrid
)
invar_tisr_tensor = torch.tensor(invar_tisr, dtype=invar.dtype).to(invar.device)
outvar_tisr_tensor = torch.tensor(outvar_tisr, dtype=outvar.dtype).to(
invar.device
)
invar_list = torch.split(invar, 1, dim=1) # split along the time dimension
invar_list = [tensor.squeeze(dim=1) for tensor in invar_list]
tisr_list = torch.split(
invar_tisr_tensor, 1, dim=1
) # split along the time dimension
tisr_list = [tensor.squeeze(dim=1) for tensor in tisr_list]
lsm_tensor = (
torch.tensor(lsm, dtype=torch.float).to(invar.device).unsqueeze(dim=0)
)
topographic_height_tensor = (
torch.tensor(topographic_height, dtype=torch.float)
.to(invar.device)
.unsqueeze(dim=0)
)
invar_model = prepare_input(
invar_list,
tisr_list,
lsm_tensor,
topographic_height_tensor,
)
pred_outvar = torch.zeros_like(outvar)
# multi step loss.
for t in range(outvar.shape[1] // num_input_steps):
output = eval_step(arch, invar_model)
if t != outvar.shape[1] // num_input_steps - 1:
invar_model = output
invar_list = list(
torch.split(
invar_model, (nr_output_channels // num_input_steps), dim=1
)
)
tisr_list = torch.split(
outvar_tisr_tensor[
:, t * num_input_steps : (t + 1) * num_input_steps
],
1,
dim=1,
)
tisr_list = [tensor.squeeze(dim=1) for tensor in tisr_list]
invar_model = prepare_input(
invar_list,
tisr_list,
lsm_tensor,
topographic_height_tensor,
)
output_list = torch.split(
output, nr_output_channels // num_input_steps, dim=1
)
output_list = [tensor.unsqueeze(dim=1) for tensor in output_list]
output = torch.cat(output_list, dim=1)
pred_outvar[:, t * 2] = output[:, 0]
pred_outvar[:, t * 2 + 1] = output[:, 1]
loss_epoch += F.mse_loss(
outvar[:, t * num_input_steps : t * num_input_steps + num_input_steps],
output,
).detach()
num_examples += invar.shape[0]
if plotting and i == 0:
pred_outvar = pred_outvar.detach().cpu().numpy()
outvar = outvar.detach().cpu().numpy()
for chan in channels:
plt.close("all")
fig, ax = plt.subplots(
3, pred_outvar.shape[1], figsize=(4 * outvar.shape[1], 8)
)
for t in range(outvar.shape[1]):
vmin, vmax = np.min(pred_outvar[0, t, chan]), np.max(
pred_outvar[0, t, chan]
)
im = ax[0, t].imshow(
cube_sphere_plotter(pred_outvar[0, t, chan]),
vmin=vmin,
vmax=vmax,
origin="lower",
)
fig.colorbar(im, ax=ax[0, t])
im = ax[1, t].imshow(
cube_sphere_plotter(outvar[0, t, chan]),
vmin=vmin,
vmax=vmax,
origin="lower",
)
fig.colorbar(im, ax=ax[1, t])
im = ax[2, t].imshow(
cube_sphere_plotter(
pred_outvar[0, t, chan] - outvar[0, t, chan]
),
origin="lower",
)
fig.colorbar(im, ax=ax[2, t])
ax[0, t].set_xticks([])
ax[0, t].set_yticks([])
ax[1, t].set_xticks([])
ax[1, t].set_yticks([])
ax[2, t].set_xticks([])
ax[2, t].set_yticks([])
ax[0, t].set_title(f"Pred: {t}")
ax[1, t].set_title(f"True: {t}")
ax[2, t].set_title(f"Diff: {t}")
fig.savefig(f"era5_validation_channel{chan}_epoch{epoch}.png", dpi=300)
arch.train()
return loss_epoch.detach() / num_examples
@hydra.main(version_base="1.2", config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
DistributedManager.initialize()
dist = DistributedManager()
initialize_mlflow(
experiment_name="Modulus-Launch-Dev",
experiment_desc="Modulus launch development",
run_name="DLWP-Training",
run_desc="DLWP ERA5 Training",
user_name="Modulus User",
mode="offline",
)
LaunchLogger.initialize(use_mlflow=True)
logger = PythonLogger("main") # General python logger
nr_input_channels = cfg.nr_input_channels
nr_output_channels = cfg.nr_output_channels
num_input_steps = 2
num_output_steps = 4
arch = DLWP(
nr_input_channels=nr_input_channels, nr_output_channels=nr_output_channels
).to(dist.device)
# Distributed learning
if dist.world_size > 1:
ddps = torch.cuda.Stream()
with torch.cuda.stream(ddps):
arch = DistributedDataParallel(
arch,
device_ids=[dist.local_rank],
output_device=dist.device,
broadcast_buffers=dist.broadcast_buffers,
find_unused_parameters=dist.find_unused_parameters,
)
torch.cuda.current_stream().wait_stream(ddps)
# load static datasets
lsm = xarray.open_dataset(
to_absolute_path("./static_datasets/land_sea_mask_rs_cs.nc")
)["lsm"].values
topographic_height = xarray.open_dataset(
to_absolute_path("./static_datasets/geopotential_rs_cs.nc")
)["z"].values
latlon_grids = xarray.open_dataset(
to_absolute_path("./static_datasets/latlon_grid_field_rs_cs.nc")
)
latgrid, longrid = latlon_grids["latgrid"].values, latlon_grids["longrid"].values
# convert static datasets to tensors
lsm_tensor = torch.tensor(lsm, dtype=torch.float).to(dist.device).unsqueeze(dim=0)
topographic_height_tensor = (
torch.tensor(topographic_height, dtype=torch.float)
.to(dist.device)
.unsqueeze(dim=0)
)
optimizer = torch.optim.Adam(
arch.parameters(),
betas=(0.9, 0.999),
lr=0.001,
weight_decay=0.0,
)
scheduler = ReduceLROnPlateau(
optimizer, mode="min", factor=0.2, patience=20, min_lr=1e-6, verbose=True
)
datapipe = ERA5HDF5Datapipe(
data_dir="/data/train/",
stats_dir="/data/stats/",
channels=None,
num_samples_per_year=1460
- num_input_steps
- num_output_steps, # Need better shard fix
# num_samples_per_year=1408, # Need better shard fix
num_input_steps=num_input_steps,
num_output_steps=num_output_steps,
batch_size=cfg.batch_size.train,
grid_type="cubesphere",
patch_size=None,
device=dist.device,
num_workers=1,
shuffle=True,
process_rank=dist.rank,
world_size=dist.world_size,
)
# if dist.rank == 0:
val_datapipe = ERA5HDF5Datapipe(
data_dir="/data/test/",
stats_dir="/data/stats/",
channels=None,
num_samples_per_year=1460
- num_input_steps
- num_output_steps, # Need better shard fix
# num_samples_per_year=1408, # Need better shard fix
num_input_steps=num_input_steps,
num_output_steps=num_output_steps,
batch_size=cfg.batch_size.validate,
grid_type="cubesphere",
patch_size=None,
device=dist.device,
num_workers=1,
shuffle=False,
process_rank=dist.rank,
world_size=dist.world_size,
)
if dist.rank == 0:
out_of_sample_datapipe = ERA5HDF5Datapipe(
data_dir="/data/out_of_sample/",
stats_dir="/data/stats/",
channels=None,
num_samples_per_year=4, # Need better shard fix
num_input_steps=num_input_steps,
num_output_steps=16,
batch_size=cfg.batch_size.out_of_sample,
grid_type="cubesphere",
patch_size=None,
device=dist.device,
num_workers=1,
shuffle=False,
)
loaded_epoch = load_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
device=dist.device,
)
@StaticCaptureEvaluateNoGrad(
model=arch, logger=logger, use_graphs=False, use_amp=False
)
def eval_step_forward(arch, invar):
return arch(invar)
@StaticCaptureTraining(
model=arch, optim=optimizer, logger=logger, use_graphs=True, use_amp=False
)
def train_step_forward(
arch, invar, outvar, invar_tisr, outvar_tisr, lsm, topographic_height
):
invar_list = torch.split(invar, 1, dim=1) # split along the time dimension
invar_list = [tensor.squeeze(dim=1) for tensor in invar_list]
tisr_list = torch.split(invar_tisr, 1, dim=1) # split along the time dimension
tisr_list = [tensor.squeeze(dim=1) for tensor in tisr_list]
invar_model = prepare_input(
invar_list,
tisr_list,
lsm,
topographic_height,
)
# multi step loss.
loss = 0.0
for t in range(outvar.shape[1] // num_input_steps):
output = arch(invar_model)
if t != outvar.shape[1] // num_input_steps - 1:
invar_model = output
invar_list = list(
torch.split(
invar_model, (nr_output_channels // num_input_steps), dim=1
)
)
tisr_list = torch.split(
outvar_tisr[:, t * num_input_steps : (t + 1) * num_input_steps],
1,
dim=1,
)
tisr_list = [tensor.squeeze(dim=1) for tensor in tisr_list]
invar_model = prepare_input(
invar_list,
tisr_list,
lsm,
topographic_height,
)
output_list = torch.split(
output, nr_output_channels // num_input_steps, dim=1
)
output_list = [tensor.unsqueeze(dim=1) for tensor in output_list]
output = torch.cat(output_list, dim=1)
loss += F.mse_loss(
outvar[:, t * num_input_steps : t * num_input_steps + num_input_steps],
output,
)
return loss
# Create static tensors for training
for i, data in enumerate(datapipe):
if i < 1:
static_invar = data[0]["invar"].to(dist.device)
static_outvar = data[0]["outvar"].to(dist.device)
invar_idx = data[0]["invar_idx"]
outvar_idx = data[0]["outvar_idx"]
year_idx = data[0]["year_idx"]
invar_tisr = compute_tisr(1980, year_idx, invar_idx, longrid, latgrid)
outvar_tisr = compute_tisr(1980, year_idx, outvar_idx, longrid, latgrid)
static_invar_tisr_tensor = torch.tensor(invar_tisr, dtype=torch.float).to(
dist.device
)
static_outvar_tisr_tensor = torch.tensor(outvar_tisr, dtype=torch.float).to(
dist.device
)
else:
break
# Main training loop
max_epoch = cfg.max_epoch
for epoch in range(max(1, loaded_epoch + 1), max_epoch + 1):
with LaunchLogger(
"train", epoch=epoch, num_mini_batch=len(datapipe), epoch_alert_freq=1
) as log:
for data in datapipe:
invar = data[0]["invar"].to(dist.device)
outvar = data[0]["outvar"].to(dist.device)
invar_idx = data[0]["invar_idx"]
outvar_idx = data[0]["outvar_idx"]
year_idx = data[0]["year_idx"]
invar_tisr = compute_tisr(1980, year_idx, invar_idx, longrid, latgrid)
outvar_tisr = compute_tisr(1980, year_idx, outvar_idx, longrid, latgrid)
invar_tisr_tensor = torch.tensor(invar_tisr, dtype=invar.dtype).to(
dist.device
)
outvar_tisr_tensor = torch.tensor(outvar_tisr, dtype=outvar.dtype).to(
dist.device
)
static_invar.copy_(invar)
static_outvar.copy_(outvar)
static_invar_tisr_tensor.copy_(invar_tisr_tensor)
static_outvar_tisr_tensor.copy_(outvar_tisr_tensor)
loss = train_step_forward(
arch,
static_invar,
static_outvar,
static_invar_tisr_tensor,
static_outvar_tisr_tensor,
lsm_tensor,
topographic_height_tensor,
)
log.log_minibatch({"Mini-batch loss": loss.detach()})
log.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
with LaunchLogger("valid", epoch=epoch) as log:
val_loss = validation_and_plotting_step(
eval_step_forward,
arch,
val_datapipe,
2016,
nr_output_channels,
num_input_steps,
lsm,
longrid,
latgrid,
topographic_height,
epoch=epoch,
channels=[0, 1, 2, 3, 4, 5, 6],
plotting=False,
device=dist.device,
)
log.log_epoch({"Val loss": val_loss})
if dist.rank == 0:
# plot the data on out of sample dataset
out_of_sample_loss = validation_and_plotting_step(
eval_step_forward,
arch,
out_of_sample_datapipe,
2018,
nr_output_channels,
num_input_steps,
lsm,
longrid,
latgrid,
topographic_height,
epoch=epoch,
channels=[0, 1, 2, 3, 4, 5, 6],
plotting=True,
device=dist.device,
)
if dist.world_size > 1:
torch.distributed.barrier()
# scheduler step
scheduler.step(val_loss)
if epoch % 2 == 0 and dist.rank == 0:
save_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
epoch=epoch,
)
if __name__ == "__main__":
main()
| modulus-launch-main | examples/weather/dlwp/train_dlwp.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
cross_plot_map = {
0: (1, 0),
1: (1, 1),
2: (1, 2),
3: (1, 3),
4: (0, 0),
5: (2, 0),
}
rotations = {
0: 0,
1: 0,
2: 0,
3: 0,
4: 0,
5: 0,
}
def rearrange_to_cross(data, cross_plot_map=cross_plot_map, rotations=rotations):
cross_data = {}
data_min, data_max = np.min(data), np.max(data)
for tile in range(6):
row, col = cross_plot_map[tile]
rotated_data = np.rot90(data[tile], k=rotations[tile])
cross_data[(row, col)] = rotated_data
return cross_data, data_min, data_max
def plot_cross_subplot(data, data_min, data_max):
data_total = np.empty((64 * 3, 64 * 4))
data_total[:] = np.nan
for (row, col), face_data in data.items():
data_total[row * 64 : (row + 1) * 64, col * 64 : (col + 1) * 64] = face_data
return data_total
def cube_sphere_plotter(data):
cross_data, data_min, data_max = rearrange_to_cross(data)
return plot_cross_subplot(cross_data, data_min, data_max)
| modulus-launch-main | examples/weather/dlwp/cube_sphere_plotter_no_subplots.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cdsapi
def download_data(var, year, save_path):
c = cdsapi.Client()
if var[0] == "single_level":
config = {
"product_type": "reanalysis",
"format": "netcdf",
"variable": var[1],
"year": year,
"month": "01",
"day": ["01", "02", "03"],
"time": ["00:00", "06:00", "12:00", "18:00"],
}
c.retrieve(
"reanalysis-era5-single-levels",
config,
save_path,
)
elif var[0] == "pressure_level":
config = {
"product_type": "reanalysis",
"format": "netcdf",
"variable": var[1],
"pressure_level": var[2],
"year": year,
"month": "01",
"day": ["01", "02", "03"],
"time": ["00:00", "06:00", "12:00", "18:00"],
}
c.retrieve(
"reanalysis-era5-pressure-levels",
config,
save_path,
)
var_list = [
("pressure_level", "temperature", "850"),
("pressure_level", "geopotential", "1000"),
("pressure_level", "geopotential", "700"),
("pressure_level", "geopotential", "500"),
("pressure_level", "geopotential", "300"),
("single_level", "total_column_water"),
("single_level", "2m_temperature"),
]
for i, var in enumerate(var_list):
if not os.path.exists("data/train_temp/"):
os.makedirs("data/train_temp/")
download_data(var, "1979", "./data/train_temp/1979_" + str(i) + ".nc")
for i, var in enumerate(var_list):
if not os.path.exists("data/test_temp/"):
os.makedirs("data/test_temp/")
download_data(var, "2017", "./data/test_temp/2017_" + str(i) + ".nc")
for i, var in enumerate(var_list):
if not os.path.exists("data/out_of_sample_temp/"):
os.makedirs("data/out_of_sample_temp/")
download_data(var, "2018", "./data/out_of_sample_temp/2018_" + str(i) + ".nc")
| modulus-launch-main | examples/weather/dlwp/data_curation/data_download_simple.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import numpy as np
import xarray as xr
import h5py
from scipy.sparse import coo_matrix
from collections import defaultdict
def processor(files, dest):
# group files by year
files_by_year = defaultdict(list)
for file in files:
basename = os.path.basename(file)
year = basename.rsplit("_", 1)[0]
files_by_year[year].append(file)
input_map_wts = xr.open_dataset("./map_LL721x1440_CS64.nc")
i = input_map_wts.row.values - 1
j = input_map_wts.col.values - 1
data = input_map_wts.S.values
M = coo_matrix((data, (i, j)))
results = {}
# process files year by year
for year, filenames in files_by_year.items():
result_arrays = []
filenames = sorted(filenames, key=lambda x: x[-4])
for filename in filenames:
with xr.open_dataset(filename) as ds:
data_var_name = list(ds.data_vars)[0]
# read the data variable and multiply by the matrix
data = ds[data_var_name].values
num_time = data.shape[0]
result = np.reshape(
np.reshape(data, (num_time, -1)) * M.T, (num_time, 6, 64, 64)
)
result_arrays.append(result.astype(np.float32))
# concatenate the arrays
result_stack = np.stack(result_arrays, axis=1)
results[year] = result_stack
for year, result in results.items():
print(year, result.shape)
if not os.path.exists(dest):
os.makedirs(dest)
output_filename = dest + f"{year}.h5"
print(output_filename)
# store result in a HDF5 file
with h5py.File(output_filename, "w") as hf:
hf.create_dataset("fields", data=result)
processor(glob.glob("./data/train_temp/*.nc"), "./data/train/")
processor(glob.glob("./data/test_temp/*.nc"), "./data/test/")
processor(glob.glob("./data/out_of_sample_temp/*.nc"), "./data/out_of_sample/")
| modulus-launch-main | examples/weather/dlwp/data_curation/post_processing.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def count_trainable_params(model: torch.nn.Module) -> int:
"""Count the number of trainable parameters in a model.
Args:
model (torch.nn.Module): Model to count parameters of.
Returns:
int: Number of trainable parameters.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| modulus-launch-main | examples/weather/graphcast/train_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from pydantic import BaseModel
from typing import Tuple, Optional
class Constants(BaseModel):
"""GraphCast constants"""
processor_layers: int = 16
hidden_dim: int = 64 # 512
segments: int = 1
force_single_checkpoint: bool = False
checkpoint_encoder: bool = True
checkpoint_processor: bool = False
checkpoint_decoder: bool = False
force_single_checkpoint_finetune: bool = False
checkpoint_encoder_finetune: bool = True
checkpoint_processor_finetune: bool = True
checkpoint_decoder_finetune: bool = True
concat_trick: bool = True
cugraphops_encoder: bool = False
cugraphops_processor: bool = False
cugraphops_decoder: bool = False
recompute_activation: bool = False
wb_mode: str = "disabled"
dataset_path: str = "/data"
static_dataset_path: str = "datasets/static"
latlon_res: Tuple[int, int] = (721, 1440)
num_workers: int = 0 # 8
num_channels: int = 3 # 34
num_channels_val: int = 3
num_val_steps: int = 8
num_val_spy: int = 1 # SPY: Samples Per Year
grad_clip_norm: Optional[float] = 32.0
jit: bool = False
amp: bool = False
amp_dtype: str = "bfloat16"
full_bf16: bool = True
watch_model: bool = False
lr: float = 1e-3
lr_step3: float = 3e-7
num_iters_step1 = 1000
num_iters_step2 = 299000
num_iters_step3 = 11000
step_change_freq = 1000
save_freq: int = 1 # 500
val_freq: int = 1 # 1000
ckpt_path: str = "checkpoints_34var"
val_dir: str = "validation_34var"
ckpt_name: str = "model_34var.pt"
pyt_profiler: bool = False
profile: bool = False
profile_range: Tuple = (90, 110)
icospheres_path: str = os.path.join(
Path(__file__).parent.resolve(), "icospheres.json"
)
| modulus-launch-main | examples/weather/graphcast/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-launch-main | examples/weather/graphcast/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from contextlib import nullcontext
from torch.cuda.amp import GradScaler
from torch.nn.parallel import DistributedDataParallel
import numpy as np
import time
import wandb as wb
import torch.cuda.profiler as profiler
from torch.optim.lr_scheduler import SequentialLR, LinearLR, CosineAnnealingLR, LambdaLR
# import modules
import os
from modulus.models.graphcast.graph_cast_net import GraphCastNet
from modulus.utils.graphcast.loss import CellAreaWeightedLossFunction
from modulus.launch.logging import (
PythonLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
from modulus.launch.utils import load_checkpoint, save_checkpoint
from train_utils import count_trainable_params
from loss.utils import grid_cell_area
from train_base import BaseTrainer
from validation import Validation
from constants import Constants
from modulus.datapipes.climate import ERA5HDF5Datapipe
from modulus.distributed import DistributedManager
try:
import apex
except:
pass
# Instantiate constants, and save to JSON file
C = Constants()
if C.cugraphops_encoder or C.cugraphops_processor or C.cugraphops_decoder:
try:
import pylibcugraphops
except:
raise ImportError(
"pylibcugraphops is not installed. Refer the Dockerfile for instructions"
+ "on how to install this package."
)
class GraphCastTrainer(BaseTrainer):
def __init__(self, wb, dist, rank_zero_logger):
super().__init__()
self.dist = dist
self.dtype = torch.bfloat16 if C.full_bf16 else torch.float32
self.enable_scaler = False
self.amp_dtype = None
if C.full_bf16:
assert torch.cuda.is_bf16_supported()
rank_zero_logger.info(f"Using {str(self.dtype)} dtype")
if C.amp:
raise ValueError("Full bfloat16 training is enabled, switch off C.amp")
if C.amp:
rank_zero_logger.info(f"Using C.amp with dtype {C.amp_dtype}")
if C.amp_dtype == "float16" or C.amp_dtype == "fp16":
self.C.amp_dtype = torch.float16
self.enable_scaler = True
elif C.amp_dtype == "bfloat16" or C.amp_dtype == "bf16":
self.C.amp_dtype = torch.bfloat16
else:
raise ValueError("Invalid dtype for C.amp")
# instantiate the model
self.model = GraphCastNet(
meshgraph_path=C.icospheres_path,
static_dataset_path=C.static_dataset_path,
input_dim_grid_nodes=C.num_channels,
input_dim_mesh_nodes=3,
input_dim_edges=4,
output_dim_grid_nodes=C.num_channels,
processor_layers=C.processor_layers,
hidden_dim=C.hidden_dim,
do_concat_trick=C.concat_trick,
use_cugraphops_encoder=C.cugraphops_encoder,
use_cugraphops_processor=C.cugraphops_processor,
use_cugraphops_decoder=C.cugraphops_decoder,
recompute_activation=C.recompute_activation,
)
# set gradient checkpointing
if C.force_single_checkpoint:
self.model.set_checkpoint_model(True)
if C.checkpoint_encoder:
self.model.set_checkpoint_encoder(True)
if C.checkpoint_processor:
self.model.set_checkpoint_processor(C.segments)
if C.checkpoint_decoder:
self.model.set_checkpoint_decoder(True)
# JIT compile the model, and specify the device and dtype
if C.jit:
torch.jit.script(self.model).to(dtype=self.dtype).to(device=dist.device)
rank_zero_logger.success("JIT compiled the model")
else:
self.model = self.model.to(dtype=self.dtype).to(device=dist.device)
if C.watch_model and not C.jit and dist.rank == 0:
wb.watch(self.model)
# distributed data parallel for multi-node training
if dist.world_size > 1:
self.model = DistributedDataParallel(
self.model,
device_ids=[dist.local_rank],
output_device=dist.device,
broadcast_buffers=dist.broadcast_buffers,
find_unused_parameters=dist.find_unused_parameters,
gradient_as_bucket_view=True,
static_graph=True,
)
rank_zero_logger.info(
f"Model parameter count is {count_trainable_params(self.model)}"
)
# instantiate the training datapipe
self.datapipe = ERA5HDF5Datapipe(
data_dir=os.path.join(C.dataset_path, "train"),
stats_dir=os.path.join(C.dataset_path, "stats"),
channels=[i for i in range(C.num_channels)],
num_steps=1,
batch_size=1,
num_workers=C.num_workers,
device=dist.device,
process_rank=dist.rank,
world_size=dist.world_size,
)
rank_zero_logger.success(
f"Loaded training datapipe of size {len(self.datapipe)}"
)
# instantiate the validation
if dist.rank == 0:
self.validation = Validation(self.model, self.dtype, self.dist, wb)
# enable train mode
self.model.train()
# get area
if hasattr(self.model, "module"):
self.area = grid_cell_area(
self.model.module.lat_lon_grid[:, :, 0], unit="deg"
)
else:
self.area = grid_cell_area(self.model.lat_lon_grid[:, :, 0], unit="deg")
self.area = self.area.to(dtype=self.dtype).to(device=dist.device)
# instantiate loss, optimizer, and scheduler
self.criterion = CellAreaWeightedLossFunction(self.area)
try:
self.optimizer = apex.optimizers.FusedAdam(
self.model.parameters(), lr=C.lr, betas=(0.9, 0.95), weight_decay=0.1
)
rank_zero_logger.info("Using FusedAdam optimizer")
except:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=C.lr)
scheduler1 = LinearLR(
self.optimizer,
start_factor=1e-3,
end_factor=1.0,
total_iters=C.num_iters_step1,
)
scheduler2 = CosineAnnealingLR(
self.optimizer, T_max=C.num_iters_step2, eta_min=0.0
)
scheduler3 = LambdaLR(
self.optimizer, lr_lambda=lambda epoch: (C.lr_step3 / C.lr)
)
self.scheduler = SequentialLR(
self.optimizer,
schedulers=[scheduler1, scheduler2, scheduler3],
milestones=[C.num_iters_step1, C.num_iters_step1 + C.num_iters_step2],
)
self.scaler = GradScaler(enabled=self.enable_scaler)
# load checkpoint
if dist.world_size > 1:
torch.distributed.barrier()
self.iter_init = load_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=self.model,
optimizer=self.optimizer,
scheduler=self.scheduler,
scaler=self.scaler,
device=dist.device,
)
if __name__ == "__main__":
# initialize distributed manager
DistributedManager.initialize()
dist = DistributedManager()
if dist.rank == 0:
os.makedirs(C.ckpt_path, exist_ok=True)
with open(
os.path.join(C.ckpt_path, C.ckpt_name.replace(".pt", ".json")), "w"
) as json_file:
json_file.write(C.json(indent=4))
# initialize loggers
initialize_wandb(
project="Modulus-Launch",
entity="Modulus",
name="GraphCast-Training",
group="GraphCast-DDP-Group",
) # Wandb logger
logger = PythonLogger("main") # General python logger
rank_zero_logger = RankZeroLoggingWrapper(logger, dist) # Rank 0 logger
logger.file_logging()
# initialize trainer
trainer = GraphCastTrainer(wb, dist, rank_zero_logger)
start = time.time()
rank_zero_logger.info("Training started...")
loss_agg, iter, tagged_iter, num_rollout_steps = 0, trainer.iter_init, 1, 1
terminate_training, finetune, update_dataloader = False, False, False
with torch.autograd.profiler.emit_nvtx() if C.profile else nullcontext():
# training loop
while True:
assert (
iter < C.num_iters_step1 + C.num_iters_step2 + C.num_iters_step3
), "Training is already finished!"
for i, data in enumerate(trainer.datapipe):
# profiling
if C.profile and iter == C.profile_range[0]:
rank_zero_logger.info("Starting profile", "green")
profiler.start()
if C.profile and iter == C.profile_range[1]:
rank_zero_logger.info("Ending profile", "green")
profiler.stop()
torch.cuda.nvtx.range_push("Training iteration")
if iter >= C.num_iters_step1 + C.num_iters_step2 and not finetune:
finetune = True
if C.force_single_checkpoint_finetune:
if hasattr(trainer.model, "module"):
trainer.model.module.set_checkpoint_model(True)
else:
trainer.model.set_checkpoint_model(True)
if C.checkpoint_encoder_finetune:
if hasattr(trainer.model, "module"):
trainer.model.module.set_checkpoint_encoder(True)
else:
trainer.model.set_checkpoint_encoder(True)
if C.checkpoint_processor_finetune:
if hasattr(trainer.model, "module"):
trainer.model.module.set_checkpoint_processor(C.segments)
else:
trainer.model.set_checkpoint_encoder(True)
if C.checkpoint_decoder_finetune:
if hasattr(trainer.model, "module"):
trainer.model.module.set_checkpoint_decoder(True)
else:
trainer.model.set_checkpoint_encoder(True)
if (
finetune
and (iter - (C.num_iters_step1 + C.num_iters_step2))
% C.step_change_freq
== 0
and iter != tagged_iter
):
update_dataloader = True
tagged_iter = iter
# update the dataloader for finetuning
if update_dataloader:
num_rollout_steps = (
iter - (C.num_iters_step1 + C.num_iters_step2)
) // C.step_change_freq + 2
trainer.datapipe = ERA5HDF5Datapipe(
data_dir=os.path.join(C.dataset_path, "train"),
stats_dir=os.path.join(C.dataset_path, "stats"),
channels=[i for i in range(C.num_channels)],
num_steps=num_rollout_steps,
batch_size=1,
num_workers=C.num_workers,
device=dist.device,
process_rank=dist.rank,
world_size=dist.world_size,
)
update_dataloader = False
rank_zero_logger.info(
f"Switching to {num_rollout_steps}-step rollout!"
)
break
# prepare the data
# TODO modify for history > 0
data_x = data[0]["invar"]
data_y = data[0]["outvar"]
# move to device & dtype
data_x = data_x.to(dtype=trainer.dtype)
grid_nfeat = data_x
y = data_y.to(dtype=trainer.dtype).to(device=dist.device)
# training step
loss = trainer.train(grid_nfeat, y)
if dist.rank == 0:
loss_agg += loss.detach().cpu()
# validation
if dist.rank == 0 and iter % C.val_freq == 0:
# free up GPU memory
del data_x, y
torch.cuda.empty_cache()
error = trainer.validation.step(
channels=list(np.arange(C.num_channels_val)), iter=iter
)
logger.log(f"iteration {iter}, Validation MSE: {error:.04f}")
# distributed barrier
if dist.world_size > 1:
torch.distributed.barrier()
# print logs and save checkpoint
if dist.rank == 0 and iter % C.save_freq == 0:
save_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=trainer.model,
optimizer=trainer.optimizer,
scheduler=trainer.scheduler,
scaler=trainer.scaler,
epoch=iter,
)
logger.info(f"Saved model on rank {dist.rank}")
logger.log(
f"iteration: {iter}, loss: {loss_agg/C.save_freq:10.3e}, \
time per iter: {(time.time()-start)/C.save_freq:10.3e}"
)
wb.log(
{
"loss": loss_agg / C.save_freq,
"learning_rate": trainer.scheduler.get_last_lr()[0],
},
step=iter,
)
loss_agg = 0
start = time.time()
iter += 1
torch.cuda.nvtx.range_pop()
# wrap up & terminate if training is finished
if iter >= C.num_iters_step1 + C.num_iters_step2 + C.num_iters_step3:
if dist.rank == 0:
del data_x, y
torch.cuda.empty_cache()
error = trainer.validation.step(
channels=list(np.arange(C.num_channels_val)), iter=iter
)
logger.log(f"iteration {iter}, Validation MSE: {error:.04f}")
save_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
trainer.model,
trainer.optimizer,
trainer.scheduler,
trainer.scaler,
iter,
)
logger.info(f"Saved model on rank {dist.rank}")
logger.log(
f"iteration: {iter}, loss: {loss_agg/C.save_freq:10.3e}, \
time per iter: {(time.time()-start)/C.save_freq:10.3e}"
)
terminate_training = True
break
if terminate_training:
rank_zero_logger.info("Finished training!")
break
| modulus-launch-main | examples/weather/graphcast/train_graphcast.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.profiler import profile, record_function, ProfilerActivity
from torch.cuda.amp import autocast
# import modules
import sys
from constants import Constants
C = Constants()
class BaseTrainer:
def __init__(self):
pass
def rollout(self, grid_nfeat, y):
with autocast(enabled=C.amp, dtype=self.amp_dtype):
total_loss = 0
pred_prev = grid_nfeat
for i in range(y.size(dim=1)):
# Shape of y is [N, M, C, H, W]. M is the number of steps
pred = self.model(pred_prev)
loss = self.criterion(pred, y[:, i])
total_loss += loss
pred_prev = pred
return total_loss
def forward(self, grid_nfeat, y):
# forward pass
torch.cuda.nvtx.range_push("Loss computation")
if C.pyt_profiler:
with profile(
activities=[ProfilerActivity.CUDA], record_shapes=True
) as prof:
with record_function("training_step"):
loss = self.rollout(grid_nfeat, y)
print(
prof.key_averages(group_by_input_shape=True).table(
sort_by="cuda_time_total", row_limit=10
)
)
exit(0)
else:
loss = self.rollout(grid_nfeat, y)
torch.cuda.nvtx.range_pop()
return loss
def backward(self, loss):
# backward pass
torch.cuda.nvtx.range_push("Weight gradients")
if C.amp:
self.scaler.scale(loss).backward()
torch.cuda.nvtx.range_pop()
self.scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), C.grad_clip_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), C.grad_clip_norm)
torch.cuda.nvtx.range_pop()
self.optimizer.step()
def train(self, grid_nfeat, y):
self.optimizer.zero_grad()
loss = self.forward(grid_nfeat, y)
self.backward(loss)
self.scheduler.step()
return loss
| modulus-launch-main | examples/weather/graphcast/train_base.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import torch
import matplotlib.pyplot as plt
from constants import Constants
from modulus.datapipes.climate import ERA5HDF5Datapipe
C = Constants()
class Validation:
def __init__(self, model, dtype, dist, wb):
self.model = model
self.dtype = dtype
self.dist = dist
self.wb = wb
self.val_datapipe = ERA5HDF5Datapipe(
data_dir=os.path.join(C.dataset_path, "test"),
stats_dir=os.path.join(C.dataset_path, "stats"),
channels=[i for i in range(C.num_channels)],
num_steps=C.num_val_steps,
batch_size=1,
num_samples_per_year=C.num_val_spy,
shuffle=False,
device=self.dist.device,
process_rank=self.dist.rank,
world_size=self.dist.world_size,
num_workers=C.num_workers,
)
print(f"Loaded validation datapipe of size {len(self.val_datapipe)}")
@torch.no_grad()
def step(self, channels=[0, 1, 2], iter=0):
torch.cuda.nvtx.range_push("Validation")
os.makedirs(C.val_dir, exist_ok=True)
loss_epoch = 0
for i, data in enumerate(self.val_datapipe):
invar = data[0]["invar"].to(dtype=self.dtype)
outvar = (
data[0]["outvar"][0].to(dtype=self.dtype).to(device=self.dist.device)
)
pred = (
torch.empty(outvar.shape)
.to(dtype=self.dtype)
.to(device=self.dist.device)
)
for t in range(outvar.shape[0]):
outpred = self.model(invar)
pred[t] = outpred
invar = outpred
loss_epoch += torch.mean(torch.pow(pred - outvar, 2))
torch.cuda.nvtx.range_pop()
pred = pred.to(torch.float32).cpu().numpy()
outvar = outvar.to(torch.float32).cpu().numpy()
del invar, outpred
torch.cuda.empty_cache()
if i == 0:
for chan in channels:
plt.close("all")
fig, ax = plt.subplots(3, pred.shape[0], figsize=(15, 5))
fig.subplots_adjust(hspace=0.5, wspace=0.3)
for t in range(outvar.shape[0]):
im_pred = ax[0, t].imshow(pred[t, chan], vmin=-1.5, vmax=1.5)
ax[0, t].set_title(f"Prediction (t={t+1})", fontsize=10)
fig.colorbar(
im_pred, ax=ax[0, t], orientation="horizontal", pad=0.4
)
im_outvar = ax[1, t].imshow(
outvar[t, chan], vmin=-1.5, vmax=1.5
)
ax[1, t].set_title(f"Ground Truth (t={t+1})", fontsize=10)
fig.colorbar(
im_outvar, ax=ax[1, t], orientation="horizontal", pad=0.4
)
im_diff = ax[2, t].imshow(
abs(pred[t, chan] - outvar[t, chan]), vmin=0.0, vmax=0.5
)
ax[2, t].set_title(f"Abs. Diff. (t={t+1})", fontsize=10)
fig.colorbar(
im_diff, ax=ax[2, t], orientation="horizontal", pad=0.4
)
fig.savefig(
os.path.join(
C.val_dir, f"era5_validation_channel{chan}_iter{iter}.png"
)
)
self.wb.log({f"val_chan{chan}_iter{iter}": fig}, step=iter)
return loss_epoch / len(self.val_datapipe)
| modulus-launch-main | examples/weather/graphcast/validation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-launch-main | examples/weather/graphcast/loss/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from modulus.utils.graphcast.graph_utils import deg2rad
def grid_cell_area(lat: Tensor, unit="deg") -> Tensor:
"""Normalized area of the latitude-longitude grid cell"""
if unit == "deg":
lat = deg2rad(lat)
area = torch.abs(torch.cos(lat))
return area / torch.mean(area)
def per_variable_level_weight() -> Tensor:
"""Per variable-level weight"""
pass
def per_variable_level_inverse_variance() -> Tensor:
"""Per variable-level inverse variance weighting"""
pass
| modulus-launch-main | examples/weather/graphcast/loss/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import copy
import numpy as np
from tqdm import tqdm
# gpu info
import pynvml
# torch
import torch
from torchvision.utils import save_image
import torch.nn as nn
import torch.cuda.amp as amp
import logging
import wandb
from models import get_model
from modulus.datapipes.climate.sfno.dataloader import get_dataloader
from modulus.utils.sfno.distributed.mappings import gather_from_parallel_region
from modulus.utils.sfno.loss import LossHandler
from modulus.utils.sfno.metric import MetricsHandler
# distributed computing stuff
from modulus.utils.sfno.distributed import comm
import torch.distributed as dist
# for the manipulation of state dict
from collections import OrderedDict
# for counting model parameters
from helpers import count_parameters
class Ensembler:
def _get_time_stats(self):
# get some stats: make data shared with tensor from the class
out_bias, out_scale = self.valid_dataloader.get_output_normalization()
mult_cpu = torch.from_numpy(out_scale)[0, :, 0, 0]
# compute
if self.params.enable_synthetic_data:
clim = torch.zeros(
[
self.params.N_out_channels,
self.params.img_crop_shape_x,
self.params.img_crop_shape_y,
],
dtype=torch.float32,
device=self.device,
)
else:
# full bias and scale
in_bias, in_scale = self.valid_dataloader.get_input_normalization()
in_bias = in_bias[0, ...]
in_scale = in_scale[0, ...]
# we need this window
start_x = self.params.img_crop_offset_x
end_x = start_x + self.params.img_crop_shape_x
start_y = self.params.img_crop_offset_y
end_y = start_y + self.params.img_crop_shape_y
# now we crop the time means
time_means = np.load(self.params.time_means_path)[
0,
self.params.out_channels,
start_x:end_x,
start_y:end_y,
]
clim = torch.as_tensor(
(time_means - in_bias) / in_scale, dtype=torch.float32
)
return mult_cpu, clim
def _update_parameters(self, params):
"""
This could be moved potentially. The idea is to process params and handle the logics for params
"""
params.in_channels = self.valid_dataset.in_channels
params.N_in_channels = len(self.valid_dataset.in_channels)
params.out_channels = self.valid_dataset.out_channels
params.N_out_channels = len(self.valid_dataset.out_channels)
params.img_shape_x = self.valid_dataset.img_shape_x
params.img_shape_y = self.valid_dataset.img_shape_y
params.img_crop_shape_x = self.valid_dataset.img_crop_shape_x
params.img_crop_shape_y = self.valid_dataset.img_crop_shape_y
params.img_crop_offset_x = self.valid_dataset.img_crop_offset_x
params.img_crop_offset_y = self.valid_dataset.img_crop_offset_y
params.img_local_shape_x = self.valid_dataset.img_local_shape_x
params.img_local_shape_y = self.valid_dataset.img_local_shape_y
params.img_local_offset_x = self.valid_dataset.img_local_offset_x
params.img_local_offset_y = self.valid_dataset.img_local_offset_y
# derived quantities
params["N_in_predicted_channels"] = params.N_in_channels
# sanitization:
if not hasattr(params, "add_zenith"):
params["add_zenith"] = False
# input channels
# zenith channel is appended to all the samples, so we need to do it here
if params.add_zenith:
params.N_in_channels += 1
if params.n_history >= 1:
params.N_in_channels = (params.n_history + 1) * params.N_in_channels
params.N_in_predicted_channels *= params.n_history + 1
# these are static and the same for all samples in the same time history
if params.add_grid:
n_grid_chan = 2
if (params.gridtype == "sinusoidal") and hasattr(
params, "grid_num_frequencies"
):
n_grid_chan *= params.grid_num_frequencies
params.N_in_channels += n_grid_chan
if params.add_orography:
params.N_in_channels += 1
if params.add_landmask:
params.N_in_channels += 2
params.n_future = 0
# target channels
params.N_target_channels = (params.n_future + 1) * params.N_out_channels
# MISC parameters
if not hasattr(params, "history_normalization_mode"):
params["history_normalization_mode"] = "none"
if not hasattr(params, "multigrid_mode"):
params["multigrid_mode"] = "none"
if not hasattr(params, "num_visualization_workers"):
params["num_visualization_workers"] = 1
if not hasattr(params, "log_video"):
params["log_video"] = 0
# automatically detect wind channels and keep track of them
if hasattr(params, "channel_names") and not hasattr(params, "wind_channels"):
channel_names = params.channel_names
channel_dict = {
channel_names[ch]: ch
for ch in set(params.in_channels + params.out_channels)
}
wind_channels = []
for chn, ch in channel_dict.items():
if chn[0] == "u":
vchn = "v" + chn[1:]
if vchn in channel_dict.keys():
# wind_channels.append(ch, channel_dict[vchn])
wind_channels = wind_channels + [ch, channel_dict[vchn]]
params["wind_channels"] = wind_channels
if not hasattr(params, "load_checkpoint"):
params["load_checkpoint"] = "legacy"
return params
def __del__(self):
if self.params.log_to_wandb:
wandb.finish()
def __init__(self, params, world_rank):
self.params = None
self.world_rank = world_rank
self.data_parallel_rank = comm.get_rank("data")
if torch.cuda.is_available():
self.device = torch.device(f"cuda:{torch.cuda.current_device()}")
else:
self.device = torch.device("cpu")
# nvml stuff
if params.log_to_screen:
pynvml.nvmlInit()
self.nvml_handle = pynvml.nvmlDeviceGetHandleByIndex(self.device.index)
# set amp_parameters
self.amp_enabled = params.amp_mode != "none"
self.amp_dtype = (
torch.float16
if (params.amp_mode == "fp16")
else torch.bfloat16
if (params.amp_mode == "bf16")
else None
)
if params.log_to_wandb:
# login first:
wandb.login()
# init
wandb.init(
dir=params.experiment_dir,
config=params,
name=params.wandb_name, # if not params.resuming else None,
group=params.wandb_group, # if not params.resuming else None,
project=params.wandb_project,
entity=params.wandb_entity,
resume=params.resuming,
)
# data loader
if params.log_to_screen:
logging.info("initializing data loader")
# we set the number of validation steps manually to 0 so we can abuse the dataloader and load examples without shuffling
params.valid_autoreg_steps = 0
# just a dummy dataloader
self.valid_dataloader, self.valid_dataset = get_dataloader(
params,
params.inf_data_path,
train=False,
final_eval=True,
device=self.device,
)
if params.log_to_screen:
logging.info("data loader initialized")
# update params
params = self._update_parameters(params)
# save params
self.params = params
# init preprocessor and model
self.model = get_model(params).to(self.device)
self.preprocessor = self.model.preprocessor
if params.log_to_wandb:
wandb.watch(self.model)
# print model
if self.world_rank == 0:
print(self.model)
# metrics handler
mult_cpu, clim = self._get_time_stats()
self.metrics = MetricsHandler(self.params, mult_cpu, clim, self.device)
self.metrics.initialize_buffers()
# loss handler
self.loss_obj = LossHandler(self.params, d=2)
self.loss_obj = self.loss_obj.to(self.device)
if self.params.enable_nhwc:
self.loss_obj = self.loss_obj.to(memory_format=torch.channels_last)
self.model_eval = self.model
# reload checkpoints
self.iters = 0
self.startEpoch = 0
assert (
params.pretrained_checkpoint_path is not None
), "Error, please specify a valid pretrained checkpoint path"
self.restore_checkpoint(
params.pretrained_checkpoint_path, checkpoint_mode=params["load_checkpoint"]
)
self.epoch = self.startEpoch
# if params.log_to_screen:
# logging.info(self.model)
# counting runs a reduction so we need to count on all ranks before printing on rank 0
pcount = count_parameters(self.model, self.device)
if params.log_to_screen:
logging.info("Number of trainable model parameters: {}".format(pcount))
def ensemble(self):
# log parameters
if self.params.log_to_screen:
# log memory usage so far
all_mem_gb = pynvml.nvmlDeviceGetMemoryInfo(self.nvml_handle).used / (
1024.0 * 1024.0 * 1024.0
)
max_mem_gb = torch.cuda.max_memory_allocated(device=self.device) / (
1024.0 * 1024.0 * 1024.0
)
logging.info(
f"Scaffolding memory high watermark: {all_mem_gb} GB ({max_mem_gb} GB for pytorch)"
)
# announce training start
logging.info("Starting Training Loop...")
# perform a barrier here to make sure everybody is ready
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
try:
torch.cuda.reset_peak_memory_stats(self.device)
except ValueError:
pass
training_start = time.time()
best_valid_loss = 1.0e6
epoch = 0
# start timer
epoch_start = time.time()
ens_time = self.ensemble_one_epoch(epoch)
# end timer
epoch_end = time.time()
# create timing logs:
# training done
training_end = time.time()
if self.params.log_to_screen:
logging.info(
"Total training time is {:.2f} sec".format(
training_end - training_start
)
)
return
def _set_train(self):
self.model.train()
self.loss_obj.train()
def _set_eval(self):
self.model.eval()
self.loss_obj.eval()
@torch.jit.ignore
def _gather_hw(self, x: torch.Tensor) -> torch.Tensor:
# gather the data over the spatial communicator
xh = gather_from_parallel_region(x, -2, "h")
xw = gather_from_parallel_region(xh, -1, "w")
return xw
@torch.jit.ignore
def _gather_data(self, x: torch.Tensor) -> torch.Tensor:
# gather the data over the spatial communicator
xd = gather_from_parallel_region(x, -4, "data")
return xd
def ensemble_one_epoch(self, epoch, log_channels=["u10m", "v10m", "t2m"]):
# set to eval
self._set_eval()
# get channels
ch = [self.params.channel_names.index(chn) for chn in log_channels]
# clear cache
torch.cuda.empty_cache()
# initialize metrics buffers
self.metrics.zero_buffers()
# start the timer
valid_start = time.time()
with torch.inference_mode():
with torch.no_grad():
# ensemble member list
enslist = []
gtslist = []
# we only use one starting point
iterator = iter(self.valid_dataloader)
for ens_step in tqdm(
range(self.params.ensemble_members),
desc="Ensemble progress",
disable=not self.params.log_to_screen,
):
prdlist = []
gtlist = []
for idt in range(self.params.ensemble_autoreg_steps):
data = next(iterator)
gdata = map(
lambda x: x.to(self.device, dtype=torch.float32), data
)
# preprocess
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
inp = self.preprocessor.flatten_history(inp)
# split list of targets
# tarlist = torch.split(tar, 1, dim=1)
if idt == 0:
inpt = inp
# add the noise if it is the first step
# TODO: add more ensembling strategies
# TODO: we treat the batch dimension as ensemble dimension so we need to turn off the noise for member 0
noise = self.params.noise_amplitude * torch.randn_like(
inpt[:, : self.params.N_in_predicted_channels]
)
inpt[:, : self.params.N_in_predicted_channels] += noise
# keep track of gt
gt = self._gather_hw(inp).detach().unsqueeze(1).cpu().numpy()
gtlist.append(gt[:, :, ch])
targ = tar
# gather the output and write it out
out = self._gather_hw(inpt)
# currently disabling data dimension and only working on rank 0
# out = self._gather_data(out)
out = out.detach().unsqueeze(1).cpu().numpy()
prdlist.append(out[:, :, ch])
# flatten history of the target
targ = self.preprocessor.flatten_history(targ)
# FW pass
with amp.autocast(
enabled=self.amp_enabled, dtype=self.amp_dtype
):
pred = self.model_eval(inpt)
# loss = self.loss_obj(pred, targ, inpt)
# put in the metrics handler
# self.metrics.update(pred, targ, loss, idt)
# append history, which should also correctlyy append the targets zenith andgle
inpt = self.preprocessor.append_history(inpt, pred, idt)
ens_member = np.stack(prdlist, axis=1)
enslist.append(ens_member)
gts_member = np.stack(gtlist, axis=1)
gtslist.append(gts_member)
# we should add a gather over the batch dim probably
ens_array = np.stack(enslist, axis=0)
print(ens_array.shape)
gts_array = np.stack(gtslist, axis=0)
print(gts_array.shape)
# # create final logs
# logs, acc_curve = self.metrics.finalize(final_inference=True)
# save the acc curve
if self.world_rank == 0:
np.save(
os.path.join(self.params.experiment_dir, "ensemble_output.npy"),
ens_array,
)
np.save(
os.path.join(self.params.experiment_dir, "gts_output.npy"), gts_array
)
# global sync is in order
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# timer
inference_time = time.time() - valid_start
return inference_time
def restore_checkpoint(self, checkpoint_path, checkpoint_mode="flexible"):
"""We intentionally require a checkpoint_dir to be passed
in order to allow Ray Tune to use this function"""
# legacy mode
if checkpoint_mode == "legacy":
checkpoint_fname = checkpoint_path.format(mp_rank=comm.get_rank("model"))
if self.params.log_to_screen:
logging.info("Loading checkpoint {checkpoint_fname} in legacy mode")
checkpoint = torch.load(
checkpoint_fname, map_location="cuda:{}".format(self.device.index)
)
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
# a hacky workaround to remove SHT params from state dict
if True:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
sht_strings = [
"forward_transform",
"inverse_transform",
"sht",
"isht",
"sht_down",
"isht_up",
".ii",
".jj",
".pct",
"trans_down",
"itrans_up",
"trans",
"itrans",
]
contains = [string in k for string in sht_strings]
if True not in contains:
# to be able to deal with older implementations we need to reshape any weights from norm layers
# this can be removed in the future
if "norm" in k:
v = v.reshape(-1)
new_state_dict[k] = v
state_dict = new_state_dict
self.model.load_state_dict(state_dict, strict=False)
# we load the dict a second time for the cases in which the previous run was not conducted with multistep
if self.params.n_future > 0:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = "module." + "model." + k[7:]
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict, strict=False)
# new flexible mode allows to load models in arbitrary model-parallel configurations
elif checkpoint_mode == "flexible":
# when loading the weights in flexble mode we exclusively use mp_rank=0 and load them onto the cpu
checkpoint_fname = checkpoint_path.format(mp_rank=0)
if self.params.log_to_screen:
logging.info("Loading checkpoint {checkpoint_fname} in flexible mode")
checkpoint = torch.load(
checkpoint_fname, map_location="cuda:{}".format(self.device.index)
)
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
new_state_dict = OrderedDict()
for k, v in self.model.named_parameters():
if k in state_dict.keys():
if hasattr(v, "sharded_dims_mp"):
weight_shape = state_dict[k].shape
read_ranges = []
for d, group in enumerate(v.sharded_dims_mp):
# compute the read range for this model
if group is None:
# read_range = None
read_range = slice(0, v.shape[d], 1)
else:
weight_shape_dist = (
weight_shape[0] + comm.get_size(group) - 1
) // comm.get_size(group)
read_range = slice(
weight_shape_dist * comm.get_rank(group),
weight_shape_dist * comm.get_rank(group)
+ v.shape[d],
1,
)
read_ranges.append(read_range)
new_state_dict[k] = state_dict[k][read_ranges]
else:
new_state_dict[k] = state_dict[k]
# to be able to deal with older implementations we need to reshape any weights from norm layers
# this can be removed in the future
if "norm" in k:
new_state_dict[k] = new_state_dict[k].reshape(-1)
else:
# put a warning here
print(f"missing {k}")
state_dict = new_state_dict
self.model.load_state_dict(state_dict, strict=False)
else:
raise ValueError(f"Unknown checkoint mode {checkpoint_mode}.")
print(torch.sum(self.model.model.pos_embed))
| modulus-launch-main | examples/weather/fcn_sfno/ensembler.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from functools import partial
from modulus.models.sfno.preprocessor import Preprocessor2D
from modulus.models.sfno.sfnonet import SphericalFourierNeuralOperatorNet
class SingleStepWrapper(nn.Module):
"""Wrapper for training a single step into the future"""
def __init__(self, params, model_handle):
super(SingleStepWrapper, self).__init__()
self.preprocessor = Preprocessor2D(params)
self.model = model_handle(params)
def forward(self, inp):
# first append unpredicted features
inpa = self.preprocessor.append_unpredicted_features(inp)
# now normalize
self.preprocessor.history_compute_stats(inpa)
inpan = self.preprocessor.history_normalize(inpa, target=False)
# now add static features if requested
inpans = self.preprocessor.add_static_features(inpan)
# forward pass
yn = self.model(inpans)
# undo normalization
y = self.preprocessor.history_denormalize(yn, target=True)
return y
class MultiStepWrapper(nn.Module):
"""Wrapper for training multiple steps into the future"""
def __init__(self, params, model_handle):
super(MultiStepWrapper, self).__init__()
self.preprocessor = Preprocessor2D(params)
self.model = model_handle(params)
# collect parameters for history
self.n_future = params.n_future
def _forward_train(self, inp):
result = []
inpt = inp
for step in range(self.n_future + 1):
# add unpredicted features
inpa = self.preprocessor.append_unpredicted_features(inpt)
# do history normalization
self.preprocessor.history_compute_stats(inpa)
inpan = self.preprocessor.history_normalize(inpa, target=False)
# add static features
inpans = self.preprocessor.add_static_features(inpan)
# prediction
predn = self.model(inpans)
# append the denormalized result to output list
# important to do that here, otherwise normalization stats
# will have been updated later:
pred = self.preprocessor.history_denormalize(predn, target=True)
result.append(pred)
if step == self.n_future:
break
# append history
inpt = self.preprocessor.append_history(inpt, pred, step)
# concat the tensors along channel dim to be compatible with flattened target
result = torch.cat(result, dim=1)
return result
def _forward_eval(self, inp):
# first append unpredicted features
inpa = self.preprocessor.append_unpredicted_features(inp)
# do history normalization
self.preprocessor.history_compute_stats(inpa)
inpan = self.preprocessor.history_normalize(inpa, target=False)
# add static features
inpans = self.preprocessor.add_static_features(inpan)
# important, remove normalization here,
# because otherwise normalization stats are already outdated
yn = self.model(inpans)
# important, remove normalization here,
# because otherwise normalization stats are already outdated
y = self.preprocessor.history_denormalize(yn, target=True)
return y
def forward(self, inp):
# decide which routine to call
if self.training:
y = self._forward_train(inp)
else:
y = self._forward_eval(inp)
return y
def get_model(params):
model_handle = None
if params.nettype == "sfno":
assert params.spectral_transform == "sht"
# use the Helmholtz decomposition
model_handle = partial(
SphericalFourierNeuralOperatorNet, use_complex_kernels=True
)
else:
raise NotImplementedError(f"Error, net type {params.nettype} not implemented")
# wrap into Multi-Step if requested
if params.n_future > 0:
model = MultiStepWrapper(params, model_handle)
else:
model = SingleStepWrapper(params, model_handle)
return model
| modulus-launch-main | examples/weather/fcn_sfno/models.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gc
import time
import numpy as np
from tqdm import tqdm
# gpu info
import pynvml
# torch
import torch
import torch.cuda.amp as amp
import logging
import wandb
from modulus.models.sfno.preprocessor import get_preprocessor
from models import get_model
from modulus.datapipes.climate.sfno.dataloader import get_dataloader
from modulus.utils.sfno.distributed.mappings import init_gradient_reduction_hooks
from apex import optimizers
from modulus.utils.sfno.loss import LossHandler
from modulus.utils.sfno.metric import MetricsHandler
# distributed computing stuff
from modulus.utils.sfno.distributed import comm
import torch.distributed as dist
# for the manipulation of state dict
from collections import OrderedDict
# visualization utils
import visualize
# for counting model parameters
from helpers import count_parameters
from modulus.launch.logging import (
PythonLogger,
LaunchLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
class Inferencer:
# jit stuff
def _compile_model(self, inp_shape):
if self.params.jit_mode == "script":
if dist.is_initialized() and not self.params.disable_ddp:
self.model.module = torch.jit.script(self.model.module)
else:
self.model = torch.jit.script(self.model)
self.model_train = self.model
self.model_eval = self.model
elif self.params.jit_mode == "inductor":
self.model = torch.compile(self.model)
self.model_train = self.model
self.model_eval = self.model
else:
self.model_train = self.model
self.model_eval = self.model
return
# graph stuff
def _capture_model(self, capture_stream, inp_shape, tar_shape, num_warmup_steps=20):
matmul_comm_size = comm.get_size("matmul")
# modify inp shape due to model parallelism
if self.params.split_data_channels:
inp_shape_eff = (
inp_shape[0],
(inp_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
inp_shape[2],
inp_shape[3],
)
tar_shape_eff = (
tar_shape[0],
(tar_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
tar_shape[2],
tar_shape[3],
)
else:
inp_shape_eff = (inp_shape[0], inp_shape[1], inp_shape[2], inp_shape[3])
tar_shape_eff = (tar_shape[0], tar_shape[1], tar_shape[2], tar_shape[3])
# print(inp_shape_eff, tar_shape_eff)
self.static_inp = torch.zeros(
inp_shape_eff, dtype=torch.float32, device=self.device
)
self.static_tar = torch.zeros(
tar_shape_eff, dtype=torch.float32, device=self.device
)
if self.params.enable_nhwc:
self.static_inp = self.static_inp.to(memory_format=torch.channels_last)
self.static_tar = self.static_tar.to(memory_format=torch.channels_last)
# set to train
self._set_train()
# do capture
if capture_stream is None:
capture_stream = torch.cuda.Stream()
capture_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(capture_stream):
for _ in range(num_warmup_steps):
self.model_train.zero_grad(set_to_none=True)
# FW
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
self.static_pred = self.model_train(self.static_inp).to(self.device)
self.static_loss = self.loss_obj(
self.static_pred, self.static_tar, self.static_inp
)
# BW
self.gscaler.scale(self.static_loss).backward()
# sync here
capture_stream.synchronize()
gc.collect()
torch.cuda.empty_cache()
# create graph
self.graph = torch.cuda.CUDAGraph()
# zero grads before capture:
self.model_train.zero_grad(set_to_none=True)
# start capture
self.graph.capture_begin()
# FW
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
self.static_pred = self.model_train(self.static_inp)
self.static_loss = self.loss_obj(
self.static_pred, self.static_tar, self.static_inp
)
# BW
self.gscaler.scale(self.static_loss).backward()
# end capture
self.graph.capture_end()
torch.cuda.current_stream().wait_stream(capture_stream)
return
def _get_time_stats(self):
# get some stats: make data shared with tensor from the class
out_bias, out_scale = self.valid_dataloader.get_output_normalization()
mult_cpu = torch.from_numpy(out_scale)[0, :, 0, 0]
# compute
if self.params.enable_synthetic_data:
clim = torch.zeros(
[
self.params.N_out_channels,
self.params.img_crop_shape_x,
self.params.img_crop_shape_y,
],
dtype=torch.float32,
device=self.device,
)
else:
# full bias and scale
in_bias, in_scale = self.valid_dataloader.get_input_normalization()
in_bias = in_bias[0, ...]
in_scale = in_scale[0, ...]
# we need this window
start_x = self.params.img_crop_offset_x
end_x = start_x + self.params.img_crop_shape_x
start_y = self.params.img_crop_offset_y
end_y = start_y + self.params.img_crop_shape_y
# now we crop the time means
time_means = np.load(self.params.time_means_path)[
0, self.params.out_channels, start_x:end_x, start_y:end_y
]
clim = torch.as_tensor(
(time_means - in_bias) / in_scale, dtype=torch.float32
)
return mult_cpu, clim
def _update_parameters(self, params):
"""
This could be moved potentially. The idea is to process params and handle the logics for params
"""
params.in_channels = self.valid_dataset.in_channels
params.N_in_channels = len(self.valid_dataset.in_channels)
params.out_channels = self.valid_dataset.out_channels
params.N_out_channels = len(self.valid_dataset.out_channels)
params.img_shape_x = self.valid_dataset.img_shape_x
params.img_shape_y = self.valid_dataset.img_shape_y
params.img_crop_shape_x = self.valid_dataset.img_crop_shape_x
params.img_crop_shape_y = self.valid_dataset.img_crop_shape_y
params.img_crop_offset_x = self.valid_dataset.img_crop_offset_x
params.img_crop_offset_y = self.valid_dataset.img_crop_offset_y
params.img_local_shape_x = self.valid_dataset.img_local_shape_x
params.img_local_shape_y = self.valid_dataset.img_local_shape_y
params.img_local_offset_x = self.valid_dataset.img_local_offset_x
params.img_local_offset_y = self.valid_dataset.img_local_offset_y
# derived quantities
params["N_in_predicted_channels"] = params.N_in_channels
# sanitization:
if not hasattr(params, "add_zenith"):
params["add_zenith"] = False
# input channels
# zenith channel is appended to all the samples, so we need to do it here
if params.add_zenith:
params.N_in_channels += 1
if params.n_history >= 1:
params.N_in_channels = (params.n_history + 1) * params.N_in_channels
params.N_in_predicted_channels *= params.n_history + 1
# these are static and the same for all samples in the same time history
if params.add_grid:
n_grid_chan = 2
if (params.gridtype == "sinusoidal") and hasattr(
params, "grid_num_frequencies"
):
n_grid_chan *= params.grid_num_frequencies
params.N_in_channels += n_grid_chan
if params.add_orography:
params.N_in_channels += 1
if params.add_landmask:
params.N_in_channels += 2
# target channels
params.N_target_channels = (params.n_future + 1) * params.N_out_channels
# MISC parameters
if not hasattr(params, "history_normalization_mode"):
params["history_normalization_mode"] = "none"
if not hasattr(params, "multigrid_mode"):
params["multigrid_mode"] = "none"
if not hasattr(params, "num_visualization_workers"):
params["num_visualization_workers"] = 1
if not hasattr(params, "log_video"):
params["log_video"] = 0
# automatically detect wind channels and keep track of them
if hasattr(params, "channel_names") and not hasattr(params, "wind_channels"):
channel_names = params.channel_names
channel_dict = {
channel_names[ch]: ch
for ch in set(params.in_channels + params.out_channels)
}
wind_channels = []
for chn, ch in channel_dict.items():
if chn[0] == "u":
vchn = "v" + chn[1:]
if vchn in channel_dict.keys():
# wind_channels.append(ch, channel_dict[vchn])
wind_channels = wind_channels + [ch, channel_dict[vchn]]
params["wind_channels"] = wind_channels
if not hasattr(params, "load_checkpoint"):
params["load_checkpoint"] = "legacy"
return params
def __del__(self):
if self.params.log_to_wandb:
wandb.finish()
def __init__(self, params, world_rank):
self.params = None
self.world_rank = world_rank
self.rank = world_rank
self.data_parallel_rank = comm.get_rank("data")
if torch.cuda.is_available():
self.device = torch.device(f"cuda:{torch.cuda.current_device()}")
else:
self.device = torch.device("cpu")
# setup modulus logger
self.logger = PythonLogger("main") # General python logger
# reenable later
# if self.world_rank == 0:
# self.logger.file_logging(file_name=os.path.join(params.experiment_dir, "out.log"))
self.rank_zero_logger = RankZeroLoggingWrapper(self.logger, self)
# nvml stuff
if params.log_to_screen:
pynvml.nvmlInit()
self.nvml_handle = pynvml.nvmlDeviceGetHandleByIndex(self.device.index)
# set amp_parameters
self.amp_enabled = params.amp_mode != "none"
self.amp_dtype = (
torch.float16
if (params.amp_mode == "fp16")
else torch.bfloat16
if (params.amp_mode == "bf16")
else None
)
if params.log_to_wandb:
# login first:
wandb.login()
# init
wandb.init(
dir=params.experiment_dir,
config=params,
name=params.wandb_name, # if not params.resuming else None,
group=params.wandb_group, # if not params.resuming else None,
project=params.wandb_project,
entity=params.wandb_entity,
resume=params.resuming,
)
# data loader
self.rank_zero_logger.info("initializing data loader")
# just a dummy dataloader
self.train_dataloader, self.train_dataset, self.train_sampler = get_dataloader(
params,
params.inf_data_path,
train=True,
device=self.device,
)
self.valid_dataloader, self.valid_dataset = get_dataloader(
params,
params.inf_data_path,
train=False,
final_eval=True,
device=self.device,
)
self.rank_zero_logger.info("data loader initialized")
# update params
params = self._update_parameters(params)
# save params
self.params = params
# init preprocessor and model
self.model = get_model(params).to(self.device)
self.preprocessor = self.model.preprocessor
# define process group for DDP, we might need to override that
if dist.is_initialized() and not params.disable_ddp:
ddp_process_group = comm.get_group("data")
if params.log_to_wandb:
wandb.watch(self.model)
# print model
if self.world_rank == 0:
print(self.model)
# metrics handler
mult_cpu, clim = self._get_time_stats()
self.metrics = MetricsHandler(self.params, mult_cpu, clim, self.device)
self.metrics.initialize_buffers()
# loss handler
self.loss_obj = LossHandler(self.params, d=2)
self.loss_obj = self.loss_obj.to(self.device)
if self.params.enable_nhwc:
self.loss_obj = self.loss_obj.to(memory_format=torch.channels_last)
if not params.resuming:
if params.nettype == "unet":
self.model.apply(self.model.get_weights_function(params.weight_init))
self.capturable_optimizer = False
betas = (params.optimizer_beta1, params.optimizer_beta2)
if params.optimizer_type == "FusedAdam":
self.rank_zero_logger.info("using FusedAdam")
self.optimizer = optimizers.FusedAdam(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
)
elif params.optimizer_type == "FusedLAMB":
try:
import doesnotexist
from apex.optimizers import FusedMixedPrecisionLamb
self.rank_zero_logger.info("using FusedMixedPrecisionLamb")
self.optimizer = FusedMixedPrecisionLamb(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
max_grad_norm=params.optimizer_max_grad_norm,
)
self.capturable_optimizer = True
except ImportError:
self.rank_zero_logger.info("using FusedLAMB")
self.optimizer = optimizers.FusedLAMB(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
max_grad_norm=params.optimizer_max_grad_norm,
)
elif params.optimizer_type == "Adam":
self.rank_zero_logger.info("using Adam")
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=params.lr)
elif params.optimizer_type == "SGD":
self.rank_zero_logger.info("using SGD")
self.optimizer = torch.optim.SGD(
self.model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
momentum=0,
)
else:
raise ValueError(f"Unknown optimizer type {params.optimizer_type}")
if params.scheduler == "ReduceLROnPlateau":
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, factor=0.2, patience=5, mode="min"
)
elif params.scheduler == "CosineAnnealingLR":
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer, T_max=params.scheduler_T_max
)
else:
self.scheduler = None
if params.lr_warmup_steps > 0:
from utils.warmup_scheduler import WarmupScheduler
self.scheduler = WarmupScheduler(
self.scheduler,
num_warmup_steps=params.lr_warmup_steps,
start_lr=params.lr_start,
)
self.gscaler = amp.GradScaler(enabled=(self.amp_dtype == torch.float16))
# we need this further down
capture_stream = None
if dist.is_initialized() and not params.disable_ddp:
capture_stream = torch.cuda.Stream()
parameter_size_mb = (
count_parameters(self.model, self.device) * 4 / float(1024 * 1024)
)
reduction_size_mb = int(
(parameter_size_mb / params.parameters_reduction_buffer_count) * 1.05
)
with torch.cuda.stream(capture_stream):
self.model = init_gradient_reduction_hooks(
self.model,
device_ids=[self.device.index],
output_device=[self.device.index],
bucket_cap_mb=reduction_size_mb,
broadcast_buffers=True,
find_unused_parameters=False,
gradient_as_bucket_view=True,
static_graph=params.checkpointing > 0,
)
capture_stream.synchronize()
# we need to set up some additional gradient reductions
# if params.model_parallel_size > 1:
# init_additional_parameters_reductions(self.model)
# capture stream sync
capture_stream.synchronize()
# lets get one sample from the dataloader:
# get sample and map to gpu
iterator = iter(self.train_dataloader)
data = next(iterator)
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
# extract unpredicted features
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
# flatten
inp = self.preprocessor.flatten_history(inp)
tar = self.preprocessor.flatten_history(tar)
# get shapes
inp_shape = inp.shape
tar_shape = tar.shape
self._compile_model(inp_shape)
if not self.loss_obj.is_distributed():
self.loss_obj = torch.jit.script(self.loss_obj)
# graph capture
self.graph = None
if params.cuda_graph_mode != "none":
self._capture_model(
capture_stream, inp_shape, tar_shape, num_warmup_steps=20
)
# reload checkpoints
self.iters = 0
self.startEpoch = 0
assert (
(params.pretrained_checkpoint_path is not None),
"Error, please specify a valid pretrained checkpoint path",
)
self.restore_checkpoint(
params.pretrained_checkpoint_path,
checkpoint_mode=params["load_checkpoint"],
)
self.epoch = self.startEpoch
if params.log_to_screen:
pcount = count_parameters(self.model, self.device)
self.rank_zero_logger.info("Number of trainable model parameters: {pcount}")
def inference(self):
# log parameters
if self.params.log_to_screen:
# log memory usage so far
all_mem_gb = pynvml.nvmlDeviceGetMemoryInfo(self.nvml_handle).used / (
1024.0 * 1024.0 * 1024.0
)
max_mem_gb = torch.cuda.max_memory_allocated(device=self.device) / (
1024.0 * 1024.0 * 1024.0
)
self.rank_zero_logger.info(
f"Scaffolding memory high watermark: {all_mem_gb} GB ({max_mem_gb} GB for pytorch)"
)
# announce training start
self.rank_zero_logger.info("Starting Training Loop...")
# perform a barrier here to make sure everybody is ready
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
try:
torch.cuda.reset_peak_memory_stats(self.device)
except ValueError:
pass
training_start = time.time()
best_valid_loss = 1.0e6
epoch = 0
# start timer
epoch_start = time.time()
inf_time, inf_logs = self.inference_one_epoch(epoch)
# end timer
epoch_end = time.time()
# create timing logs:
# training done
training_end = time.time()
self.rank_zero_logger.info(
f"Total training time is {(training_end - training_start):.2f} sec"
)
return
def _set_train(self):
self.model.train()
self.loss_obj.train()
def _set_eval(self):
self.model.eval()
self.loss_obj.eval()
def inference_one_epoch(self, epoch):
# set to eval
self._set_eval()
# clear cache
torch.cuda.empty_cache()
# initialize metrics buffers
self.metrics.zero_buffers()
# start the timer
valid_start = time.time()
with torch.inference_mode():
with torch.no_grad():
eval_steps = 0
for data in tqdm(
self.valid_dataloader,
desc="Inference progress",
disable=not self.params.log_to_screen,
):
eval_steps += 1
# map to gpu
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
# preprocess
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
inp = self.preprocessor.flatten_history(inp)
# split list of targets
tarlist = torch.split(tar, 1, dim=1)
# do autoregression
inpt = inp
for idt, targ in enumerate(tarlist):
# flatten history of the target
targ = self.preprocessor.flatten_history(targ)
# FW pass
with amp.autocast(
enabled=self.amp_enabled, dtype=self.amp_dtype
):
pred = self.model_eval(inpt)
loss = self.loss_obj(pred, targ, inpt)
# put in the metrics handler
self.metrics.update(pred, targ, loss, idt)
# append history
inpt = self.preprocessor.append_history(inpt, pred, idt)
# create final logs
logs, acc_curve = self.metrics.finalize(final_inference=True)
# save the acc curve
if self.world_rank == 0:
np.save(
os.path.join(self.params.experiment_dir, "acc_curve.npy"),
acc_curve.cpu().numpy(),
)
if self.params.ifs_acc_path is not None:
visualize.plot_ifs_acc_comparison(acc_curve, self.params, self.epoch)
# global sync is in order
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# timer
inference_time = time.time() - valid_start
return inference_time, logs
def test_model_output(self, model):
"""helper to test checkpointing"""
inp_shape = (
self.params.batch_size,
self.params.N_in_channels,
self.params.img_shape_local_x,
self.params.img_shape_local_y,
)
matmul_comm_size = comm.get_size("matmul")
# modify inp shape due to model parallelism
if self.params.split_data_channels:
inp_shape_eff = (
inp_shape[0],
(inp_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
inp_shape[2],
inp_shape[3],
)
else:
inp_shape_eff = (inp_shape[0], inp_shape[1], inp_shape[2], inp_shape[3])
random_tensor = os.path.join(
self.params.experiment_dir,
"random_tensor{}.npy".format(comm.get_rank("model")),
)
if not os.path.exists(random_tensor):
y = torch.rand(inp_shape_eff, dtype=torch.float).cpu().numpy()
np.save(random_tensor, y)
y = torch.from_numpy(np.load(random_tensor)).type(torch.float).to(self.device)
out = model(y).detach().cpu().numpy()
random_output = os.path.join(
self.params.experiment_dir,
"random_output{}.npy".format(comm.get_rank("model")),
)
if os.path.exists(random_output):
out_old = np.load(random_output)
diff = (out - out_old).flatten()
self.rank_zero_logger.info(
"Diff metrics: norm = {}, max = {}, min = {}".format(
np.linalg.norm(diff), np.max(diff), np.min(diff)
)
)
np.save(random_output, out)
def restore_checkpoint(self, checkpoint_path, checkpoint_mode="flexible"):
"""We intentionally require a checkpoint_dir to be passed
in order to allow Ray Tune to use this function"""
# legacy mode
if checkpoint_mode == "legacy":
checkpoint_fname = checkpoint_path.format(mp_rank=comm.get_rank("model"))
self.rank_zero_logger.info(
"Loading checkpoint {checkpoint_fname} in legacy mode"
)
checkpoint = torch.load(
checkpoint_fname, map_location="cuda:{}".format(self.device.index)
)
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
# a hacky workaround to remove SHT params from state dict
if True:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
sht_strings = [
"forward_transform",
"inverse_transform",
"sht",
"isht",
"sht_down",
"isht_up",
".ii",
".jj",
".pct",
"trans_down",
"itrans_up",
"trans",
"itrans",
]
contains = [string in k for string in sht_strings]
if True not in contains:
# to be able to deal with older implementations we need to reshape any weights from norm layers
# this can be removed in the future
if "norm" in k:
v = v.reshape(-1)
new_state_dict[k] = v
state_dict = new_state_dict
self.model.load_state_dict(state_dict, strict=False)
# we load the dict a second time for the cases in which the previous run was not conducted with multistep
if self.params.n_future > 0:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = "module." + "model." + k[7:]
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict, strict=False)
# new flexible mode allows to load models in arbitrary model-parallel configurations
elif checkpoint_mode == "flexible":
# when loading the weights in flexble mode we exclusively use mp_rank=0 and load them onto the cpu
checkpoint_fname = checkpoint_path.format(mp_rank=0)
self.rank_zero_logger.info(
"Loading checkpoint {checkpoint_fname} in flexible mode"
)
checkpoint = torch.load(
checkpoint_fname, map_location="cuda:{}".format(self.device.index)
)
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
new_state_dict = OrderedDict()
for k, v in self.model.named_parameters():
if k in state_dict.keys():
if hasattr(v, "sharded_dims_mp"):
weight_shape = state_dict[k].shape
read_ranges = []
for d, group in enumerate(v.sharded_dims_mp):
# compute the read range for this model
if group is None:
# read_range = None
read_range = slice(0, v.shape[d], 1)
else:
weight_shape_dist = (
(weight_shape[0] + comm.get_size(group) - 1)
) // comm.get_size(group)
read_range = slice(
weight_shape_dist * comm.get_rank(group),
weight_shape_dist * comm.get_rank(group)
+ v.shape[d],
1,
)
read_ranges.append(read_range)
new_state_dict[k] = state_dict[k][read_ranges]
else:
new_state_dict[k] = state_dict[k]
# to be able to deal with older implementations we need to reshape any weights from norm layers
# this can be removed in the future
if "norm" in k:
new_state_dict[k] = new_state_dict[k].reshape(-1)
else:
# put a warning here
print(f"missing {k}")
state_dict = new_state_dict
self.model.load_state_dict(state_dict, strict=False)
else:
raise ValueError(f"Unknown checkoint mode {checkpoint_mode}.")
| modulus-launch-main | examples/weather/fcn_sfno/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import numpy as np
import concurrent.futures as cf
from PIL import Image
from moviepy.editor import ImageSequenceClip
import wandb
import torch
def plot_comparison(
pred,
truth,
pred_title="Prediction",
truth_title="Ground truth",
cmap="twilight_shifted",
projection="mollweide",
diverging=False,
figsize=(8, 9),
vmax=None,
):
"""
Visualization tool to plot a comparison between ground truth and prediction
pred: 2d array
truth: 2d array
cmap: colormap
projection: 'mollweide', 'hammer', 'aitoff' or None
"""
import matplotlib.pyplot as plt
assert len(pred.shape) == 2
assert len(truth.shape) == 2
assert pred.shape == truth.shape
H, W = pred.shape
lon = np.linspace(-np.pi, np.pi, W)
lat = np.linspace(np.pi / 2.0, -np.pi / 2.0, H)
Lon, Lat = np.meshgrid(lon, lat)
# only normalize with the truth
vmax = vmax or np.abs(truth).max()
if diverging:
vmin = -vmax
else:
vmin = 0.0
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(2, 1, 1, projection=projection) # can also be Mollweide
ax.pcolormesh(Lon, Lat, pred, cmap=cmap, vmax=vmax, vmin=vmin)
ax.set_title(pred_title)
ax.grid(True)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax = fig.add_subplot(2, 1, 2, projection=projection) # can also be Mollweide
ax.pcolormesh(Lon, Lat, truth, cmap=cmap, vmax=vmax, vmin=vmin)
ax.set_title(truth_title)
ax.grid(True)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tight_layout()
# save into memory buffer
buf = io.BytesIO()
plt.savefig(buf)
plt.close(fig)
buf.seek(0)
# create image
image = Image.open(buf)
return image
def plot_ifs_acc_comparison(acc_curve, params, epoch):
import os
ifs_comparison_dict = {
"u10m": "u10_2018_acc.npy",
"v10m": "v10_2018_acc.npy",
"z500": "z500_2018_acc.npy",
"t2m": "t2m_2018_acc.npy",
"t850": "t850_2018_acc.npy",
}
for comparison_var, comparison_file in ifs_comparison_dict.items():
ifs_acc_file = os.path.join(
params.ifs_acc_path, comparison_var, comparison_file
)
ifs_acc = np.mean(np.load(ifs_acc_file), axis=0)[0 : acc_curve.shape[1] + 1, 0]
channel_names = params.channel_names
fcn_acc = acc_curve[channel_names.index(comparison_var), :].cpu().numpy()
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
var_name = comparison_var
fig, ax = plt.subplots()
t = np.arange(1, len(ifs_acc), 1) * 6
ax.plot(t, ifs_acc[1:], ".-", label="IFS")
ax.plot(t, fcn_acc, ".-", label="S-FNO")
xticks = np.arange(0, len(ifs_acc), 1) * 6
x_locator = ticker.FixedLocator(xticks)
ax.xaxis.set_major_locator(x_locator)
y_locator = ticker.MaxNLocator(nbins=20)
ax.yaxis.set_major_locator(y_locator)
ax.grid(which="major", alpha=0.5)
ax.legend()
ax.set_xlabel("Time [h]")
ax.set_ylabel("ACC " + var_name)
ax.set_title(params.wandb_name)
plt.setp(ax.get_xticklabels(), rotation=45, horizontalalignment="right")
fig.savefig(os.path.join(params.experiment_dir, "acc_" + var_name + ".png"))
# push to wandb
if params.log_to_wandb:
wandb.log({"ACC " + var_name: wandb.Image(fig)}, step=epoch)
def visualize_field(tag, func_string, prediction, target, scale, bias, diverging):
torch.cuda.nvtx.range_push("visualize_field")
# get func handle:
func_handle = eval(func_string)
# unscale:
pred = scale * prediction + bias
targ = scale * target + bias
# apply functor:
pred = func_handle(pred)
targ = func_handle(targ)
# generate image
image = plot_comparison(
pred,
targ,
pred_title="Prediction",
truth_title="Ground truth",
projection="mollweide",
diverging=diverging,
)
torch.cuda.nvtx.range_pop()
return tag, image
class VisualizationWrapper(object):
def __init__(
self, log_to_wandb, path, prefix, plot_list, scale=1.0, bias=0.0, num_workers=1
):
self.log_to_wandb = log_to_wandb
self.generate_video = True
self.path = path
self.prefix = prefix
self.plot_list = plot_list
# normalization
self.scale = scale
self.bias = bias
# this is for parallel processing
self.executor = cf.ProcessPoolExecutor(max_workers=num_workers)
self.requests = []
def reset(self):
self.requests = []
def add(self, tag, prediction, target):
# go through the plot list
for item in self.plot_list:
field_name = item["name"]
func_string = item["functor"]
plot_diverge = item["diverging"]
self.requests.append(
self.executor.submit(
visualize_field,
(tag, field_name),
func_string,
np.copy(prediction),
np.copy(target),
self.scale,
self.bias,
plot_diverge,
)
)
return
def finalize(self):
torch.cuda.nvtx.range_push("VisualizationWrapper:finalize")
results = {}
for request in cf.as_completed(self.requests):
token, image = request.result()
tag, field_name = token
prefix = field_name + "_" + tag
results[prefix] = image
if self.generate_video:
if self.log_to_wandb:
video = []
# draw stuff that goes on every frame here
for prefix, image in sorted(results.items()):
video.append(np.transpose(np.asarray(image), (2, 0, 1)))
video = np.stack(video)
results = [wandb.Video(video, fps=1, format="gif")]
else:
video = []
# draw stuff that goes on every frame here
for prefix, image in sorted(results.items()):
video.append(np.asarray(image))
video = ImageSequenceClip(video, fps=1)
video.write_gif("video_output.gif")
else:
results = [
wandb.Image(image, caption=prefix) for prefix, image in results.items()
]
if self.log_to_wandb and results:
wandb.log({"Inference samples": results})
# reset requests
self.reset()
torch.cuda.nvtx.range_pop()
return
| modulus-launch-main | examples/weather/fcn_sfno/visualize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import torch
import logging
from modulus.launch.logging import (
PythonLogger,
LaunchLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
import modulus.models
from modulus.utils.sfno import logging_utils
from modulus.utils.sfno.YParams import YParams
DECORRELATION_TIME = 36 # 9 days
# distributed computing stuff
from modulus.utils.sfno.distributed import comm
# import trainer
from trainer import Trainer
from inferencer import Inferencer
from ensembler import Ensembler
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--fin_parallel_size",
default=1,
type=int,
help="Input feature parallelization",
)
parser.add_argument(
"--fout_parallel_size",
default=1,
type=int,
help="Output feature parallelization",
)
parser.add_argument(
"--h_parallel_size",
default=1,
type=int,
help="Spatial parallelism dimension in h",
)
parser.add_argument(
"--w_parallel_size",
default=1,
type=int,
help="Spatial parallelism dimension in w",
)
parser.add_argument(
"--parameters_reduction_buffer_count",
default=1,
type=int,
help="How many buffers will be used (approximately) for weight gradient reductions.",
)
parser.add_argument("--run_num", default="00", type=str)
parser.add_argument("--yaml_config", default="./config/sfnonet.yaml", type=str)
parser.add_argument(
"--batch_size",
default=-1,
type=int,
help="Switch for overriding batch size in the configuration file.",
)
parser.add_argument("--config", default="default", type=str)
parser.add_argument("--enable_synthetic_data", action="store_true")
parser.add_argument(
"--amp_mode",
default="none",
type=str,
choices=["none", "fp16", "bf16"],
help="Specify the mixed precision mode which should be used.",
)
parser.add_argument(
"--jit_mode",
default="none",
type=str,
choices=["none", "script", "inductor"],
help="Specify if and how to use torch jit.",
)
parser.add_argument(
"--cuda_graph_mode",
default="none",
type=str,
choices=["none", "fwdbwd", "step"],
help="Specify which parts to capture under cuda graph",
)
parser.add_argument("--enable_benchy", action="store_true")
parser.add_argument("--disable_ddp", action="store_true")
parser.add_argument("--enable_nhwc", action="store_true")
parser.add_argument(
"--checkpointing_level",
default=0,
type=int,
help="How aggressively checkpointing is used",
)
# for data prefetch buffers
parser.add_argument(
"--host_prefetch_buffers",
action="store_true",
default=False,
help="Store file prefetch buffers on the host instead of the gpu, uses less GPU memory but can be slower",
)
parser.add_argument("--epsilon_factor", default=0, type=float)
parser.add_argument("--split_data_channels", action="store_true")
parser.add_argument(
"--print_timings_frequency",
default=-1,
type=int,
help="Frequency at which to print timing information",
)
parser.add_argument(
"--mode",
default="train",
type=str,
choices=["train", "inference", "ensemble"],
help="Run training, inference or ensemble",
)
# checkpoint format
parser.add_argument(
"--checkpoint_format",
default="none",
type=str,
help="Format in which to save and load checkpoints. Can be 'flexible' or 'legacy'",
)
parser.add_argument(
"--save_checkpoint",
default="none",
type=str,
help="Format in which to save checkpoints. Can be 'flexible' or 'legacy'",
)
parser.add_argument(
"--load_checkpoint",
default="none",
type=str,
help="Format in which to load checkpoints. Can be 'flexible' or 'legacy'",
)
# multistep stuff
parser.add_argument(
"--multistep_count",
default=1,
type=int,
help="Number of autoregressive training steps. A value of 1 denotes conventional training",
)
# parse
args = parser.parse_args()
# sanity checks
if (args.checkpoint_format != "none") and (
(args.save_checkpoint != "none") or (args.load_checkpoint != "none")
):
raise RuntimeError(
"Error, checkpoint_format cannot be used together with save_checkpoint and load_checkpoint"
)
# parse parameters
params = YParams(os.path.abspath(args.yaml_config), args.config)
params["epsilon_factor"] = args.epsilon_factor
params["host_prefetch_buffers"] = args.host_prefetch_buffers
# distributed
params["fin_parallel_size"] = args.fin_parallel_size
params["fout_parallel_size"] = args.fout_parallel_size
params["h_parallel_size"] = args.h_parallel_size
params["w_parallel_size"] = args.w_parallel_size
params["model_parallel_sizes"] = [
args.h_parallel_size,
args.w_parallel_size,
args.fin_parallel_size,
args.fout_parallel_size,
]
params["model_parallel_names"] = ["h", "w", "fin", "fout"]
params["parameters_reduction_buffer_count"] = args.parameters_reduction_buffer_count
# checkpoint format
if args.checkpoint_format != "none":
params["load_checkpoint"] = params["save_checkpoint"] = args.checkpoint_format
else:
params["load_checkpoint"] = (
args.load_checkpoint if args.load_checkpoint != "none" else "legacy"
)
params["save_checkpoint"] = (
args.save_checkpoint if args.save_checkpoint != "none" else "legacy"
)
# make sure to reconfigure logger after the pytorch distributed init
comm.init(params, verbose=False)
world_rank = comm.get_world_rank()
# update parameters
params["world_size"] = comm.get_world_size()
if args.batch_size > 0:
params.batch_size = args.batch_size
params["global_batch_size"] = params.batch_size
assert (
params["global_batch_size"] % comm.get_size("data") == 0
), f"Error, cannot evenly distribute {params['global_batch_size']} across {comm.get_size('data')} GPU."
params["batch_size"] = int(params["global_batch_size"] // comm.get_size("data"))
# optimizer params
if "optimizer_max_grad_norm" not in params:
params["optimizer_max_grad_norm"] = 1.0
# set device
torch.cuda.set_device(comm.get_local_rank())
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# Set up directory
expDir = os.path.join(params.exp_dir, args.config, str(args.run_num))
if world_rank == 0:
logging.info(f"writing output to {expDir}")
if not os.path.isdir(expDir):
os.makedirs(expDir, exist_ok=True)
os.makedirs(os.path.join(expDir, "training_checkpoints"), exist_ok=True)
os.makedirs(os.path.join(expDir, "wandb"), exist_ok=True)
params["experiment_dir"] = os.path.abspath(expDir)
params.experiment_dir = os.path.abspath(expDir)
params["checkpoint_path"] = os.path.join(
expDir, "training_checkpoints/ckpt_mp{mp_rank}.tar"
)
params["best_checkpoint_path"] = os.path.join(
expDir, "training_checkpoints/best_ckpt_mp{mp_rank}.tar"
)
# Do not comment this line out please:
# check if all files are there
args.resuming = True
for mp_rank in range(comm.get_size("model")):
checkpoint_fname = params.checkpoint_path.format(mp_rank=mp_rank)
if params["load_checkpoint"] == "legacy" or mp_rank < 1:
args.resuming = args.resuming and os.path.isfile(checkpoint_fname)
params["resuming"] = args.resuming
params["amp_mode"] = args.amp_mode
params["jit_mode"] = args.jit_mode
params["cuda_graph_mode"] = args.cuda_graph_mode
params["enable_benchy"] = args.enable_benchy
params["disable_ddp"] = args.disable_ddp
params["enable_nhwc"] = args.enable_nhwc
params["checkpointing"] = args.checkpointing_level
params["enable_synthetic_data"] = args.enable_synthetic_data
params["split_data_channels"] = args.split_data_channels
params["print_timings_frequency"] = args.print_timings_frequency
params["multistep_count"] = args.multistep_count
params["n_future"] = (
args.multistep_count - 1
) # note that n_future counts only the additional samples
# wandb configuration
if params["wandb_name"] is None:
params["wandb_name"] = args.config + "_" + str(args.run_num)
if params["wandb_group"] is None:
params["wandb_group"] = "era5_wind" + args.config
if world_rank == 0:
logging_utils.log_to_file(
logger_name=None, log_filename=os.path.join(expDir, "out.log")
)
logging_utils.log_versions()
params.log()
if "metadata_json_path" in params:
import json
with open(params.metadata_json_path, "r") as f:
metadata = json.load(f)
channel_names = metadata["coords"]["channel"]
params["channel_names"] = channel_names
if params["in_channels"] is None:
params["in_channels"] = list(range(len(channel_names)))
if params["out_channels"] is None:
params["out_channels"] = list(range(len(channel_names)))
if hasattr(params, "drop_masked_channels") and params.drop_masked_channels:
# names of channels to drop
channels_to_drop = params["masked_channels"]
channels = [
ch
for ch in range(len(channel_names))
if channel_names[ch] not in channels_to_drop
]
channel_names = [channel_names[ch] for ch in channels]
params["channel_names"] = channel_names
params["in_channels"] = channels
params["out_channels"] = channels
logging.info(f"Using channel names: {channel_names}")
params["log_to_wandb"] = (world_rank == 0) and params["log_to_wandb"]
params["log_to_screen"] = (world_rank == 0) and params["log_to_screen"]
# instantiate trainer / inference / ensemble object
if args.mode == "train":
trainer = Trainer(params, world_rank)
trainer.train()
elif args.mode == "inference":
inferencer = Inferencer(params, world_rank)
inferencer.inference()
elif args.mode == "ensemble":
ensembler = Ensembler(params, world_rank)
ensembler.ensemble()
| modulus-launch-main | examples/weather/fcn_sfno/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import gc
import json
import time
import wandb
import pynvml
import numpy as np
import torch
import torch.cuda.amp as amp
import torch.distributed as dist
from tqdm import tqdm
from apex import optimizers
from collections import OrderedDict
import visualize
from models import get_model
from modulus.models.sfno.preprocessor import get_preprocessor
from modulus.datapipes.climate.sfno.dataloader import get_dataloader
from modulus.utils.sfno.distributed import comm
from modulus.utils.sfno.loss import LossHandler
from modulus.utils.sfno.metric import MetricsHandler
from modulus.utils.sfno.distributed.helpers import sync_params
from modulus.utils.sfno.distributed.mappings import init_gradient_reduction_hooks
from helpers import count_parameters
from modulus.launch.logging import (
PythonLogger,
LaunchLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
class Trainer:
# jit stuff
def _compile_model(self, inp_shape):
if self.params.jit_mode == "script":
if dist.is_initialized() and not self.params.disable_ddp:
self.model.module = torch.jit.script(self.model.module)
else:
self.model = torch.jit.script(self.model)
self.model_train = self.model
self.model_eval = self.model
elif self.params.jit_mode == "inductor":
self.model = torch.compile(self.model)
self.model_train = self.model
self.model_eval = self.model
else:
self.model_train = self.model
self.model_eval = self.model
return
# graph stuff
def _capture_model(self, capture_stream, inp_shape, tar_shape, num_warmup_steps=20):
matmul_comm_size = comm.get_size("matmul")
# modify inp shape due to model parallelism
if self.params.split_data_channels:
inp_shape_eff = (
inp_shape[0],
(inp_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
inp_shape[2],
inp_shape[3],
)
tar_shape_eff = (
tar_shape[0],
(tar_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
tar_shape[2],
tar_shape[3],
)
else:
inp_shape_eff = (inp_shape[0], inp_shape[1], inp_shape[2], inp_shape[3])
tar_shape_eff = (tar_shape[0], tar_shape[1], tar_shape[2], tar_shape[3])
self.static_inp = torch.zeros(
inp_shape_eff, dtype=torch.float32, device=self.device
)
self.static_tar = torch.zeros(
tar_shape_eff, dtype=torch.float32, device=self.device
)
if self.params.enable_nhwc:
self.static_inp = self.static_inp.to(memory_format=torch.channels_last)
self.static_tar = self.static_tar.to(memory_format=torch.channels_last)
# set to train
self._set_train()
# do capture
if capture_stream is None:
capture_stream = torch.cuda.Stream()
capture_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(capture_stream):
for _ in range(num_warmup_steps):
self.model_train.zero_grad(set_to_none=True)
# FW
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
self.static_pred = self.model_train(self.static_inp).to(self.device)
self.static_loss = self.loss_obj(
self.static_pred, self.static_tar, self.static_inp
)
# BW
self.gscaler.scale(self.static_loss).backward()
# sync here
capture_stream.synchronize()
gc.collect()
torch.cuda.empty_cache()
# create graph
self.graph = torch.cuda.CUDAGraph()
# zero grads before capture:
self.model_train.zero_grad(set_to_none=True)
# start capture
self.graph.capture_begin()
# FW
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
self.static_pred = self.model_train(self.static_inp)
self.static_loss = self.loss_obj(
self.static_pred, self.static_tar, self.static_inp
)
# BW
self.gscaler.scale(self.static_loss).backward()
# end capture
self.graph.capture_end()
torch.cuda.current_stream().wait_stream(capture_stream)
return
def _get_time_stats(self):
# get some stats: make data shared with tensor from the class
_, out_scale = self.train_dataloader.get_output_normalization()
mult_cpu = torch.from_numpy(out_scale)[0, :, 0, 0]
# compute
if self.params.enable_synthetic_data:
clim = torch.zeros(
[
self.params.N_out_channels,
self.params.img_crop_shape_x,
self.params.img_crop_shape_y,
],
dtype=torch.float32,
device=self.device,
)
else:
# full bias and scale
in_bias, in_scale = self.train_dataloader.get_input_normalization()
in_bias = in_bias[
0, ...
] # np.load(self.params.global_means_path)[0, self.params.out_channels]
in_scale = in_scale[
0, ...
] # np.load(self.params.global_stds_path)[0, self.params.out_channels]
# we need this window
start_x = self.params.img_crop_offset_x
end_x = start_x + self.params.img_crop_shape_x
start_y = self.params.img_crop_offset_y
end_y = start_y + self.params.img_crop_shape_y
# now we crop the time means
time_means = np.load(self.params.time_means_path)[
0, self.params.out_channels, start_x:end_x, start_y:end_y
]
clim = torch.as_tensor(
(time_means - in_bias) / in_scale, dtype=torch.float32
)
return mult_cpu, clim
def _update_parameters(self, params):
"""
This could be moved potentially. The idea is to process params and handle the logics for params
"""
params.in_channels = self.valid_dataset.in_channels
params.N_in_channels = len(self.valid_dataset.in_channels)
params.out_channels = self.valid_dataset.out_channels
params.N_out_channels = len(self.valid_dataset.out_channels)
params.img_shape_x = self.valid_dataset.img_shape_x
params.img_shape_y = self.valid_dataset.img_shape_y
params.img_crop_shape_x = self.valid_dataset.img_crop_shape_x
params.img_crop_shape_y = self.valid_dataset.img_crop_shape_y
params.img_crop_offset_x = self.valid_dataset.img_crop_offset_x
params.img_crop_offset_y = self.valid_dataset.img_crop_offset_y
params.img_local_shape_x = self.valid_dataset.img_local_shape_x
params.img_local_shape_y = self.valid_dataset.img_local_shape_y
params.img_local_offset_x = self.valid_dataset.img_local_offset_x
params.img_local_offset_y = self.valid_dataset.img_local_offset_y
# derived quantities
params["N_in_predicted_channels"] = params.N_in_channels
# sanitization:
if not hasattr(params, "add_zenith"):
params["add_zenith"] = False
# input channels
# zenith channel is appended to all the samples, so we need to do it here
if params.add_zenith:
params.N_in_channels += 1
if params.n_history >= 1:
params.N_in_channels = (params.n_history + 1) * params.N_in_channels
params.N_in_predicted_channels *= params.n_history + 1
# these are static and the same for all samples in the same time history
if params.add_grid:
n_grid_chan = 2
if (params.gridtype == "sinusoidal") and hasattr(
params, "grid_num_frequencies"
):
n_grid_chan *= params.grid_num_frequencies
params.N_in_channels += n_grid_chan
if params.add_orography:
params.N_in_channels += 1
if params.add_landmask:
params.N_in_channels += 2
# target channels
params.N_target_channels = (params.n_future + 1) * params.N_out_channels
# MISC parameters
if not hasattr(params, "history_normalization_mode"):
params["history_normalization_mode"] = "none"
if not hasattr(params, "multigrid_mode"):
params["multigrid_mode"] = "none"
if not hasattr(params, "num_visualization_workers"):
params["num_visualization_workers"] = 1
if not hasattr(params, "log_video"):
params["log_video"] = 0
# automatically detect wind channels and keep track of them
if hasattr(params, "channel_names") and not hasattr(params, "wind_channels"):
channel_names = params.channel_names
channel_dict = {
channel_names[ch]: ch
for ch in set(params.in_channels + params.out_channels)
}
wind_channels = []
for chn, ch in channel_dict.items():
if chn[0] == "u":
vchn = "v" + chn[1:]
if vchn in channel_dict.keys():
# wind_channels.append(ch, channel_dict[vchn])
wind_channels = wind_channels + [ch, channel_dict[vchn]]
params["wind_channels"] = wind_channels
if not hasattr(params, "load_checkpoint"):
params["load_checkpoint"] = "legacy"
if not hasattr(params, "save_checkpoint"):
params["save_checkpoint"] = "legacy"
return params
def __del__(self):
if self.params.log_to_wandb:
wandb.finish()
def __init__(self, params, world_rank):
self.params = None
self.world_rank = world_rank
self.rank = world_rank
self.data_parallel_rank = comm.get_rank("data")
if torch.cuda.is_available():
self.device = torch.device(f"cuda:{torch.cuda.current_device()}")
else:
self.device = torch.device("cpu")
LaunchLogger.initialize()
self.logger = PythonLogger("main") # General python logger
# add back in when logger working
# if self.world_rank == 0:
# self.logger.file_logging(file_name=os.path.join(params.experiment_dir, "out.log"))
self.rank_zero_logger = RankZeroLoggingWrapper(self.logger, self)
# nvml stuff
if params.log_to_screen:
pynvml.nvmlInit()
self.nvml_handle = pynvml.nvmlDeviceGetHandleByIndex(self.device.index)
# set amp_parameters
self.amp_enabled = params.amp_mode != "none"
self.amp_dtype = (
torch.float16
if (params.amp_mode == "fp16")
else torch.bfloat16
if (params.amp_mode == "bf16")
else None
)
if params.log_to_wandb:
# login first:
wandb.login()
# init
wandb.init(
dir=params.experiment_dir,
config=params,
name=params.wandb_name, # if not params.resuming else None,
group=params.wandb_group, # if not params.resuming else None,
project=params.wandb_project,
entity=params.wandb_entity,
resume=params.resuming,
)
# data loader
self.rank_zero_logger.info("initializing data loader")
self.train_dataloader, self.train_dataset, self.train_sampler = get_dataloader(
params, params.train_data_path, train=True, device=self.device
)
self.valid_dataloader, self.valid_dataset = get_dataloader(
params, params.valid_data_path, train=False, device=self.device
)
self.rank_zero_logger.info("data loader initialized")
# update params
params = self._update_parameters(params)
# save params
self.params = params
# init preprocessor and model
# save the modified params to a json file to make it easier to load for inference later on
# This should happen immediately before ``get_model`` is called.
if self.world_rank == 0:
config_path = os.path.join(params.experiment_dir, "config.json")
with open(config_path, "w") as f:
json.dump(params.to_dict(), f)
self.model = get_model(params).to(self.device)
self.preprocessor = self.model.preprocessor
# if model-parallelism is enabled, we need to sure that shared weights are matching across ranks
# as random seeds might get out of sync during initialization
if comm.get_size("model") > 1:
sync_params(self.model, mode="broadcast")
# define process group for DDP, we might need to override that
if dist.is_initialized() and not params.disable_ddp:
ddp_process_group = comm.get_group("data")
if params.log_to_wandb:
wandb.watch(self.model)
# print model
if self.world_rank == 0:
print(self.model)
# metrics handler
mult_cpu, clim = self._get_time_stats()
self.metrics = MetricsHandler(self.params, mult_cpu, clim, self.device)
self.metrics.initialize_buffers()
# loss handler
self.loss_obj = LossHandler(self.params, d=2)
self.loss_obj = self.loss_obj.to(self.device)
if self.params.enable_nhwc:
self.loss_obj = self.loss_obj.to(memory_format=torch.channels_last)
if not params.resuming:
if params.nettype == "unet":
self.model.apply(self.model.get_weights_function(params.weight_init))
self.capturable_optimizer = False
betas = (params.optimizer_beta1, params.optimizer_beta2)
if params.optimizer_type == "FusedAdam":
self.rank_zero_logger.info("using FusedAdam")
self.optimizer = optimizers.FusedAdam(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
)
elif params.optimizer_type == "FusedLAMB":
try:
from apex.optimizers import FusedMixedPrecisionLamb
self.rank_zero_logger.info("using FusedMixedPrecisionLamb")
self.optimizer = FusedMixedPrecisionLamb(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
max_grad_norm=params.optimizer_max_grad_norm,
)
self.capturable_optimizer = True
except ImportError:
self.rank_zero_logger.info("using FusedLAMB")
self.optimizer = optimizers.FusedLAMB(
self.model.parameters(),
betas=betas,
lr=params.lr,
weight_decay=params.weight_decay,
max_grad_norm=params.optimizer_max_grad_norm,
)
elif params.optimizer_type == "Adam":
self.rank_zero_logger.info("using Adam")
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=params.lr)
elif params.optimizer_type == "SGD":
self.rank_zero_logger.info("using SGD")
self.optimizer = torch.optim.SGD(
self.model.parameters(),
lr=params.lr,
weight_decay=params.weight_decay,
momentum=0,
)
else:
raise ValueError(f"Unknown optimizer type {params.optimizer_type}")
if params.scheduler == "ReduceLROnPlateau":
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, factor=0.2, patience=5, mode="min"
)
elif params.scheduler == "CosineAnnealingLR":
if not hasattr(params, "scheduler_min_lr"):
params["scheduler_min_lr"] = 0.0
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
self.optimizer,
T_max=params.scheduler_T_max,
eta_min=params.scheduler_min_lr,
)
elif params.scheduler == "OneCycleLR":
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(
self.optimizer,
max_lr=params.lr,
total_steps=params.scheduler_T_max,
steps_per_epoch=1,
)
else:
self.scheduler = None
if params.lr_warmup_steps > 0:
from utils.warmup_scheduler import WarmupScheduler
self.scheduler = WarmupScheduler(
self.scheduler,
num_warmup_steps=params.lr_warmup_steps,
start_lr=params.lr_start,
)
self.gscaler = amp.GradScaler(enabled=(self.amp_dtype == torch.float16))
# we need this further down
capture_stream = None
if dist.is_initialized() and not params.disable_ddp:
capture_stream = torch.cuda.Stream()
parameter_size_mb = (
count_parameters(self.model, self.device) * 4 / float(1024 * 1024)
)
reduction_size_mb = int(
(parameter_size_mb / params.parameters_reduction_buffer_count) * 1.05
)
with torch.cuda.stream(capture_stream):
self.model = init_gradient_reduction_hooks(
self.model,
device_ids=[self.device.index],
output_device=[self.device.index],
bucket_cap_mb=reduction_size_mb,
broadcast_buffers=True,
find_unused_parameters=False,
gradient_as_bucket_view=True,
static_graph=params.checkpointing > 0,
)
capture_stream.synchronize()
# capture stream sync
capture_stream.synchronize()
# lets get one sample from the dataloader:
# get sample and map to gpu
iterator = iter(self.train_dataloader)
data = next(iterator)
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
# extract unpredicted features
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
# flatten
inp = self.preprocessor.flatten_history(inp)
tar = self.preprocessor.flatten_history(tar)
# get shapes
inp_shape = inp.shape
tar_shape = tar.shape
self._compile_model(inp_shape)
if not self.loss_obj.is_distributed():
self.loss_obj = torch.jit.script(self.loss_obj)
# graph capture
self.graph = None
if params.cuda_graph_mode != "none":
self._capture_model(
capture_stream, inp_shape, tar_shape, num_warmup_steps=20
)
# visualization wrapper:
plot_list = [
{
"name": "windspeed_uv10",
"functor": "lambda x: np.sqrt(np.square(x[0, ...]) + np.square(x[1, ...]))",
"diverging": False,
}
]
out_bias, out_scale = self.train_dataloader.get_output_normalization()
self.visualizer = visualize.VisualizationWrapper(
params.log_to_wandb,
path=None,
prefix=None,
plot_list=plot_list,
scale=out_scale[0, ...],
bias=out_bias[0, ...],
num_workers=params.num_visualization_workers,
)
# allocate pinned tensors for faster copy:
self.viz_stream = torch.cuda.Stream()
self.viz_prediction_cpu = torch.empty(
(
(params.N_target_channels // (params.n_future + 1)),
params.img_shape_x,
params.img_shape_y,
),
device="cpu",
).pin_memory()
self.viz_target_cpu = torch.empty(
(
(params.N_target_channels // (params.n_future + 1)),
params.img_shape_x,
params.img_shape_y,
),
device="cpu",
).pin_memory()
# reload checkpoints
self.iters = 0
self.startEpoch = 0
if params.finetune and not params.resuming:
assert (
params.pretrained_checkpoint_path is not None
), "Error, please specify a valid pretrained checkpoint path"
self.restore_checkpoint(
params.pretrained_checkpoint_path,
checkpoint_mode=params["load_checkpoint"],
)
if params.resuming:
self.restore_checkpoint(
params.checkpoint_path, checkpoint_mode=params["load_checkpoint"]
)
self.epoch = self.startEpoch
# counting runs a reduction so we need to count on all ranks before printing on rank 0
pcount = count_parameters(self.model, self.device)
if params.log_to_screen:
self.rank_zero_logger.info(
f"Number of trainable model parameters: {pcount}"
)
def train(self):
# log parameters
if self.params.log_to_screen:
# log memory usage so far
all_mem_gb = pynvml.nvmlDeviceGetMemoryInfo(self.nvml_handle).used / (
1024.0 * 1024.0 * 1024.0
)
max_mem_gb = torch.cuda.max_memory_allocated(device=self.device) / (
1024.0 * 1024.0 * 1024.0
)
self.rank_zero_logger.info(
f"Scaffolding memory high watermark: {all_mem_gb} GB ({max_mem_gb} GB for pytorch)"
)
# announce training start
self.rank_zero_logger.info("Starting Training Loop...")
# perform a barrier here to make sure everybody is ready
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
try:
torch.cuda.reset_peak_memory_stats(self.device)
except ValueError:
pass
training_start = time.time()
best_valid_loss = 1.0e6
for epoch in range(self.startEpoch, self.params.max_epochs):
if dist.is_initialized() and self.train_sampler is not None:
self.train_sampler.set_epoch(epoch)
# start timer
epoch_start = time.time()
train_time, train_data_gb, train_logs = self.train_one_epoch()
valid_time, viz_time, valid_logs = self.validate_one_epoch(epoch)
# if epoch == self.params.max_epochs - 1:
# self.train_dataloader.reset_pipeline()
# self.valid_dataloader.reset_pipeline()
# inf_time, inf_logs = self.inference_one_epoch(epoch)
if self.params.scheduler == "ReduceLROnPlateau":
self.scheduler.step(valid_logs["base"]["validation loss"])
elif self.scheduler is not None:
self.scheduler.step()
if self.params.log_to_wandb:
for pg in self.optimizer.param_groups:
lr = pg["lr"]
wandb.log({"learning rate": lr}, step=self.epoch)
if (self.data_parallel_rank == 0) and self.params.save_checkpoint:
# checkpoint at the end of every epoch
self.save_checkpoint(
self.params.checkpoint_path,
checkpoint_mode=self.params["save_checkpoint"],
)
best_checkpoint_path = self.params.best_checkpoint_path.format(
mp_rank=comm.get_rank("model")
)
best_checkpoint_saved = os.path.isfile(best_checkpoint_path)
if (not best_checkpoint_saved) or valid_logs["base"][
"validation loss"
] <= best_valid_loss:
# logging.info('Val loss improved from {} to {}'.format(best_valid_loss, valid_logs['valid_loss']))
self.save_checkpoint(
self.params.best_checkpoint_path,
checkpoint_mode=self.params["save_checkpoint"],
)
best_valid_loss = valid_logs["base"]["validation loss"]
# wait for everybody
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# end timer
epoch_end = time.time()
# create timing logs:
timing_logs = {
"epoch time [s]": epoch_end - epoch_start,
"training time [s]": train_time,
"validation time [s]": valid_time,
"visualization time [s]": viz_time,
"training step time [ms]": (train_time / train_logs["train_steps"])
* 10**3,
"minimal IO rate [GB/s]": train_data_gb / train_time,
}
# log metrics:
self.log_epoch(train_logs, valid_logs, timing_logs)
# training done
training_end = time.time()
if self.params.log_to_screen:
self.rank_zero_logger.success(
"Total training time is {:.2f} sec".format(
training_end - training_start
)
)
return
def _set_train(self):
self.model.train()
self.loss_obj.train()
self.preprocessor.train()
def _set_eval(self):
self.model.eval()
self.loss_obj.eval()
self.preprocessor.eval()
def train_one_epoch(self):
self.epoch += 1
total_data_bytes = 0
self._set_train()
train_steps = 0
train_start = time.perf_counter_ns()
for data in tqdm(
self.train_dataloader,
desc="Training progress ",
disable=not self.params.log_to_screen,
):
train_steps += 1
self.iters += 1
# map to device
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
# do preprocessing
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
inp = self.preprocessor.flatten_history(inp)
tar = self.preprocessor.flatten_history(tar)
# assuming float32
total_data_bytes += (torch.numel(inp) + torch.numel(tar)) * 4
if self.graph is not None:
self.static_inp.copy_(inp)
self.static_tar.copy_(tar)
self.graph.replay()
loss = self.static_loss
else:
self.model_train.zero_grad(set_to_none=True)
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
pred = self.model_train(inp)
loss = self.loss_obj(pred, tar, inp)
self.gscaler.scale(loss).backward()
# perform weight update
self.gscaler.step(self.optimizer)
self.gscaler.update()
if (
(self.params.print_timings_frequency > 0)
and (self.iters % self.params.print_timings_frequency == 0)
and self.params.log_to_screen
):
running_train_time = time.perf_counter_ns() - train_start
print(
f"Average step time after step {self.iters}: {running_train_time / float(train_steps) * 10**(-6):.1f} ms"
)
print(
f"Average effective io rate after step {self.iters}: {total_data_bytes * float(comm.get_world_size()) / (float(running_train_time) * 10**(-9) * 1024. * 1024. * 1024.):.2f} GB/s"
)
print(f"Current loss {loss.item()}")
# add the eval loss to logs
logs = {"loss": loss}
if dist.is_initialized():
for key in sorted(logs.keys()):
dist.all_reduce(
logs[key].detach(),
op=dist.ReduceOp.AVG,
group=comm.get_group("data"),
)
logs[key] = logs[key].item()
# add train steps to log
logs["train_steps"] = train_steps
# global sync is in order
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# finalize timers
train_end = time.perf_counter_ns()
train_time = (train_end - train_start) * 10 ** (-9)
total_data_gb = (total_data_bytes / (1024.0 * 1024.0 * 1024.0)) * float(
comm.get_world_size()
)
return train_time, total_data_gb, logs
def validate_one_epoch(self, epoch):
# set to eval
self._set_eval()
# clear cache
torch.cuda.empty_cache()
# initialize metrics buffers
self.metrics.zero_buffers()
visualize = self.params.log_video and (epoch % self.params.log_video == 0)
# start the timer
valid_start = time.time()
with torch.inference_mode():
with torch.no_grad():
eval_steps = 0
for data in tqdm(
self.valid_dataloader,
desc="Validation progress",
disable=not self.params.log_to_screen,
):
eval_steps += 1
# map to gpu
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
# preprocess
inp, tar = self.preprocessor.cache_unpredicted_features(*gdata)
inp = self.preprocessor.flatten_history(inp)
# split list of targets
tarlist = torch.split(tar, 1, dim=1)
inpt = inp
# do autoregression
for idt, targ in enumerate(tarlist):
# flatten history of the target
targ = self.preprocessor.flatten_history(targ)
# FW pass
with amp.autocast(
enabled=self.amp_enabled, dtype=self.amp_dtype
):
pred = self.model_eval(inpt)
loss = self.loss_obj(pred, targ, inpt)
if eval_steps <= 1 and visualize:
pred_single = pred[0:1, ...].clone()
targ_single = targ[0:1, ...].clone()
pred_gather = torch.squeeze(
self.metrics._gather_input(pred_single), dim=0
)
targ_gather = torch.squeeze(
self.metrics._gather_input(targ_single), dim=0
)
self.viz_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.viz_stream):
self.viz_prediction_cpu.copy_(
pred_gather, non_blocking=True
)
self.viz_target_cpu.copy_(
targ_gather, non_blocking=True
)
self.viz_stream.synchronize()
pred_cpu = self.viz_prediction_cpu.to(
torch.float32
).numpy()
targ_cpu = self.viz_target_cpu.to(torch.float32).numpy()
tag = f"step{eval_steps}_time{str(idt).zfill(3)}"
self.visualizer.add(tag, pred_cpu, targ_cpu)
# put in the metrics handler
self.metrics.update(pred, targ, loss, idt)
# append history
inpt = self.preprocessor.append_history(inpt, pred, idt)
# create final logs
logs = self.metrics.finalize()
# finalize plotting
viz_time = time.perf_counter_ns()
if visualize:
self.visualizer.finalize()
viz_time = (time.perf_counter_ns() - viz_time) * 10 ** (-9)
# global sync is in order
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# timer
valid_time = time.time() - valid_start
return valid_time, viz_time, logs
def inference_one_epoch(self, epoch):
self._set_eval()
self.inference_dataloader, self.inference_dataset = get_dataloader(
self.params,
self.params.inf_data_path,
train=False,
final_eval=True,
device=self.device,
)
self.metrics.initialize_buffers()
# start the timer
valid_start = time.time()
with torch.no_grad():
eval_steps = 0
for data in tqdm(
self.inference_dataloader,
desc="Inference progress",
disable=not self.params.log_to_screen,
):
eval_steps += 1
gdata = map(lambda x: x.to(self.device, dtype=torch.float32), data)
if len(data) == 4:
inp, tar, izen, tzen = gdata
tzenlist = torch.split(tzen, 1, dim=1)
else:
inp, tar = gdata
izen = None
tzenlist = None
# split list of targets
tarlist = torch.split(tar, 1, dim=1)
inpt = inp
for idt, targ in enumerate(tarlist):
# might modify inpt too often
inpt, targ = self.preprocessor(inpt, targ, izen)
# FW pass
with amp.autocast(enabled=self.amp_enabled, dtype=self.amp_dtype):
pred = self.model_eval(inpt)
loss = self.loss_obj(pred, targ, inpt)
# append zenith angle to prediction
if tzenlist is not None:
predt = self.preprocessor.append_channels(
pred, tzenlist[idt]
)
else:
predt = pred
# append history if requested # does this even do anything here?????
inpt = self.preprocessor.append_history(inpt, predt)
# set to none so that we no not re-attach the channels
izen = None
# put in the metrics handler
self.metrics.update(pred, targ, loss, idt)
# create final logs
logs, acc_curve = self.metrics.finalize(final_inference=True)
if self.world_rank == 0:
np.save(
os.path.join(self.params.experiment_dir, "acc_curve.npy"),
acc_curve.cpu().numpy(),
)
if self.params.ifs_acc_path is not None:
visualize.plot_ifs_acc_comparison(acc_curve, self.params, self.epoch)
# global sync is in order
if dist.is_initialized():
dist.barrier(device_ids=[self.device.index])
# timer
inference_time = time.time() - valid_start
return inference_time, logs
def test_model_output(self, model):
"""helper to test checkpointing"""
inp_shape = (
self.params.batch_size,
self.params.N_in_channels,
self.params.img_shape_local_x,
self.params.img_shape_local_y,
)
matmul_comm_size = comm.get_size("matmul")
# modify inp shape due to model parallelism
if self.params.split_data_channels:
inp_shape_eff = (
inp_shape[0],
(inp_shape[1] + matmul_comm_size - 1) // matmul_comm_size,
inp_shape[2],
inp_shape[3],
)
else:
inp_shape_eff = (inp_shape[0], inp_shape[1], inp_shape[2], inp_shape[3])
random_tensor = os.path.join(
self.params.experiment_dir,
"random_tensor{}.npy".format(comm.get_rank("model")),
)
if not os.path.exists(random_tensor):
y = torch.rand(inp_shape_eff, dtype=torch.float).cpu().numpy()
np.save(random_tensor, y)
y = torch.from_numpy(np.load(random_tensor)).type(torch.float).to(self.device)
out = model(y).detach().cpu().numpy()
random_output = os.path.join(
self.params.experiment_dir,
"random_output{}.npy".format(comm.get_rank("model")),
)
if os.path.exists(random_output):
out_old = np.load(random_output)
diff = (out - out_old).flatten()
self.rank_zero_logger.info(
"Diff metrics: norm = {}, max = {}, min = {}".format(
np.linalg.norm(diff), np.max(diff), np.min(diff)
)
)
np.save(random_output, out)
def log_epoch(self, train_logs, valid_logs, timing_logs):
# separator
separator = "".join(["-" for _ in range(50)])
print_prefix = " "
def get_pad(nchar):
return "".join([" " for x in range(nchar)])
if self.params.log_to_screen:
# header:
self.rank_zero_logger.info(separator)
self.rank_zero_logger.info(f"Epoch {self.epoch} summary:")
self.rank_zero_logger.info(f"Performance Parameters:")
self.rank_zero_logger.info(
print_prefix + "training steps: {}".format(train_logs["train_steps"])
)
self.rank_zero_logger.info(
print_prefix
+ "validation steps: {}".format(valid_logs["base"]["validation steps"])
)
all_mem_gb = pynvml.nvmlDeviceGetMemoryInfo(self.nvml_handle).used / (
1024.0 * 1024.0 * 1024.0
)
self.rank_zero_logger.info(
print_prefix + f"memory footprint [GB]: {all_mem_gb:.2f}"
)
for key in timing_logs.keys():
self.rank_zero_logger.info(
print_prefix + key + ": {:.2f}".format(timing_logs[key])
)
# logging.info('Time taken for training in epoch {} is {:.2f} sec ({} steps)'.format(epoch + 1, time.time()-start, train_logs["train_steps"]))
# logging.info('Time taken for validation in epoch {} is {:.2f} sec ({} steps)'.format(epoch + 1, valid_time, valid_logs['base']["validation steps"]))
# logging.info('Effective training IO rate for epoch {} is {:.2f} GB/s'.format(epoch + 1, train_data_gb/tr_time))
# all_mem_gb = pynvml.nvmlDeviceGetMemoryInfo(self.nvml_handle).used / (1024. * 1024. * 1024.)
# max_mem_gb = torch.cuda.max_memory_allocated(device=self.device) / (1024. * 1024. * 1024.)
# logging.info(f'Memory high watermark: {all_mem_gb:.2f} GB ({max_mem_gb:.2f} GB for pytorch)')
# compute padding:
print_list = ["training loss", "validation loss", "validation L1"] + list(
valid_logs["metrics"].keys()
)
max_len = max([len(x) for x in print_list])
pad_len = [max_len - len(x) for x in print_list]
# validation summary
self.rank_zero_logger.info("Metrics:")
self.rank_zero_logger.info(
print_prefix
+ "training loss: {}{}".format(get_pad(pad_len[0]), train_logs["loss"])
)
self.rank_zero_logger.info(
print_prefix
+ "validation loss: {}{}".format(
get_pad(pad_len[1]), valid_logs["base"]["validation loss"]
)
)
self.rank_zero_logger.info(
print_prefix
+ "validation L1: {}{}".format(
get_pad(pad_len[2]), valid_logs["base"]["validation L1"]
)
)
for idk, key in enumerate(print_list[3:], start=3):
value = valid_logs["metrics"][key]
self.rank_zero_logger.info(
f"{print_prefix}{key}: {get_pad(pad_len[idk])}{value}"
)
self.rank_zero_logger.info(separator)
if self.params.log_to_wandb:
wandb.log(train_logs, step=self.epoch)
wandb.log(valid_logs["base"], step=self.epoch)
wandb.log(valid_logs["metrics"], step=self.epoch)
return
def save_checkpoint(self, checkpoint_path, model=None, checkpoint_mode="flexible"):
"""We intentionally require a checkpoint_dir to be passed
in order to allow Ray Tune to use this function"""
if not model:
model = self.model
self.rank_zero_logger.info(
f"Writing checkpoint to {checkpoint_path} ({checkpoint_mode} format)"
)
with torch.no_grad():
# legacy mode
if checkpoint_mode == "legacy":
# start timer
store_start = time.time()
checkpoint_fname = checkpoint_path.format(
mp_rank=comm.get_rank("model")
)
store_dict = {
"iters": self.iters,
"epoch": self.epoch,
"model_state": model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
}
if self.scheduler is not None:
store_dict["scheduler_state_dict"] = self.scheduler.state_dict()
torch.save(store_dict, checkpoint_fname)
# stop timer
store_stop = time.time()
# report time
self.rank_zero_logger.info(
f"Save checkpoint (legacy): {(store_stop - store_start):.2f} sec ({sys.getsizeof(store_dict)/(1024.**3)}) GB"
)
elif checkpoint_mode == "flexible":
# clear cache
torch.cuda.empty_cache()
# start timer
collect_start = time.time()
# state_dict = model.state_dict()
state_dict = OrderedDict()
for k, v in self.model.named_parameters():
weight = v.clone()
if hasattr(v, "sharded_dims_mp"):
# gather the weight across all sharded dimensions
for d, group in enumerate(v.sharded_dims_mp):
if group is not None:
weight = gather_uneven(weight, d, group)
state_dict[k] = weight.to("cpu")
# stop timer
collect_stop = time.time()
# print collect time
self.rank_zero_logger.info(
f"Collect checkpoint (flexible): {(collect_stop - collect_start):.2f} sec."
)
# start timer:
store_start = time.time()
checkpoint_fname = checkpoint_path.format(mp_rank=0)
store_dict = {
"iters": self.iters,
"epoch": self.epoch,
"model_state": state_dict,
"optimizer_state_dict": self.optimizer.state_dict(),
}
if self.scheduler is not None:
store_dict["scheduler_state_dict"] = self.scheduler.state_dict()
# in flexible mode only rank 0 needs to save the data to disk
if self.world_rank == 0:
torch.save(
store_dict,
checkpoint_fname,
_use_new_zipfile_serialization=False,
)
# wait for group
if dist.is_initialized() and (comm.get_size("model") > 1):
dist.barrier(
device_ids=[self.device.index], group=comm.get_group("model")
)
# stop timer
store_stop = time.time()
self.rank_zero_logger.info(
f"Save checkpoint (flexible): {(store_stop - store_start):.2f} sec"
)
else:
raise ValueError(f"Unknown checkoint mode {checkpoint_mode}.")
def restore_checkpoint(self, checkpoint_path, checkpoint_mode="flexible"):
"""We intentionally require a checkpoint_dir to be passed
in order to allow Ray Tune to use this function"""
# legacy mode
if checkpoint_mode == "legacy":
checkpoint_fname = checkpoint_path.format(mp_rank=comm.get_rank("model"))
self.rank_zero_logger.info(f"Loading checkpoint {checkpoint_fname}")
checkpoint = torch.load(checkpoint_fname, map_location="cpu")
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
self.model.load_state_dict(state_dict, strict=True)
# If finetuning, restore checkpoint does not load optimizer state, instead uses config specified lr.
if self.params.resuming:
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
if self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
self.iters = checkpoint["iters"]
self.startEpoch = checkpoint["epoch"]
# new flexible mode allows to load models in arbitrary model-parallel configurations
elif checkpoint_mode == "flexible":
# when loading the weights in flexble mode we exclusively use mp_rank=0 and load them onto the cpu
checkpoint_fname = checkpoint_path.format(mp_rank=0)
self.rank_zero_logger.info(
f"Loading checkpoint {checkpoint_fname} in flexible mode"
)
checkpoint = torch.load(checkpoint_fname, map_location="cpu")
# this is reworked to avoid loading modules related to the SHT
state_dict = checkpoint["model_state"]
with torch.inference_mode():
with torch.no_grad():
for k, v in self.model.named_parameters():
if k in state_dict.keys():
weight = state_dict[k]
if hasattr(v, "sharded_dims_mp"):
for d, group in enumerate(v.sharded_dims_mp):
# continue if there is nothing to do
if (group is None) or (comm.get_size(group) == 1):
continue
shard_size = (
weight.shape[d] + comm.get_size(group) - 1
) // comm.get_size(group)
weight = torch.split(
weight, split_size_or_sections=shard_size, dim=d
)[comm.get_rank(group)]
v.copy_(weight)
else:
# put a warning here
print(f"missing {k}")
# If finetuning, restore checkpoint does not load optimizer state, instead uses config specified lr.
if self.params.resuming:
self.iters = checkpoint["iters"]
self.startEpoch = checkpoint["epoch"]
# not loading optimzer as momentum tensor shapes might have changed
# self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if self.scheduler is not None:
self.scheduler.load_state_dict(checkpoint["scheduler_state_dict"])
else:
raise ValueError(f"Unknown checkpoint mode {checkpoint_mode}.")
| modulus-launch-main | examples/weather/fcn_sfno/trainer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.utils.sfno.distributed import comm
import torch.distributed as dist
def count_parameters(model, device):
with torch.no_grad():
total_count = 0
for p in model.parameters():
if not p.requires_grad:
continue
# reduce over model group
pcount = torch.tensor(p.numel(), device=device)
if hasattr(p, "is_shared_mp") and p.is_shared_mp:
if comm.get_size("model") > 1:
dist.all_reduce(pcount, group=comm.get_group("model"))
# divide by shared dims:
for cname in p.is_shared_mp:
pcount = pcount / comm.get_size(cname)
total_count += pcount.item()
return total_count
def check_parameters(model):
for p in model.parameters():
if p.requires_grad:
print(p.shape, p.stride(), p.is_contiguous())
| modulus-launch-main | examples/weather/fcn_sfno/helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os
import hydra
import wandb
import matplotlib.pyplot as plt
from torch.nn.parallel import DistributedDataParallel
from omegaconf import DictConfig
from modulus.models.afno import AFNO
from modulus.datapipes.climate import ERA5HDF5Datapipe
from modulus.distributed import DistributedManager
from modulus.utils import StaticCaptureTraining, StaticCaptureEvaluateNoGrad
from modulus.launch.logging import LaunchLogger, PythonLogger, initialize_mlflow
from modulus.launch.utils import load_checkpoint, save_checkpoint
try:
from apex import optimizers
except:
raise ImportError(
"FCN training requires apex package for optimizer."
+ "See https://github.com/nvidia/apex for install details."
)
def loss_func(x, y, p=2.0):
yv = y.reshape(x.size()[0], -1)
xv = x.reshape(x.size()[0], -1)
diff_norms = torch.linalg.norm(xv - yv, ord=p, dim=1)
y_norms = torch.linalg.norm(yv, ord=p, dim=1)
return torch.mean(diff_norms / y_norms)
@torch.no_grad()
def validation_step(eval_step, fcn_model, datapipe, channels=[0, 1], epoch=0):
loss_epoch = 0
num_examples = 0 # Number of validation examples
# Dealing with DDP wrapper
if hasattr(fcn_model, "module"):
fcn_model = fcn_model.module
fcn_model.eval()
for i, data in enumerate(datapipe):
invar = data[0]["invar"].detach()
outvar = data[0]["outvar"].cpu().detach()
predvar = torch.zeros_like(outvar)
for t in range(outvar.shape[1]):
output = eval_step(fcn_model, invar)
invar.copy_(output)
predvar[:, t] = output.detach().cpu()
num_elements = torch.prod(torch.Tensor(list(predvar.shape[1:])))
loss_epoch += torch.sum(torch.pow(predvar - outvar, 2)) / num_elements
num_examples += predvar.shape[0]
# Plotting
if i == 0:
predvar = predvar.numpy()
outvar = outvar.numpy()
for chan in channels:
plt.close("all")
fig, ax = plt.subplots(
3, predvar.shape[1], figsize=(15, predvar.shape[0] * 5)
)
for t in range(outvar.shape[1]):
ax[0, t].imshow(predvar[0, t, chan])
ax[1, t].imshow(outvar[0, t, chan])
ax[2, t].imshow(predvar[0, t, chan] - outvar[0, t, chan])
fig.savefig(f"era5_validation_channel{chan}_epoch{epoch}.png")
fcn_model.train()
return loss_epoch / num_examples
@hydra.main(version_base="1.2", config_path="conf", config_name="config")
def main(cfg: DictConfig) -> None:
DistributedManager.initialize()
dist = DistributedManager()
# Initialize loggers
# initialize_wandb(
# project="Modulus-Launch-Dev",
# entity="Modulus",
# name="FourCastNet-Training",
# group="FCN-DDP-Group",
# )
initialize_mlflow(
experiment_name="Modulus-Launch-Dev",
experiment_desc="Modulus launch development",
run_name="FCN-Training",
run_desc="FCN ERA5 Training",
user_name="Modulus User",
mode="offline",
)
LaunchLogger.initialize(use_mlflow=True) # Modulus launch logger
logger = PythonLogger("main") # General python logger
datapipe = ERA5HDF5Datapipe(
data_dir="/data/train/",
stats_dir="/data/stats/",
channels=[i for i in range(20)],
num_samples_per_year=1456, # Need better shard fix
batch_size=2,
patch_size=(8, 8),
num_workers=8,
device=dist.device,
process_rank=dist.rank,
world_size=dist.world_size,
)
logger.success(f"Loaded datapipe of size {len(datapipe)}")
if dist.rank == 0:
logger.file_logging()
validation_datapipe = ERA5HDF5Datapipe(
data_dir="/data/test/",
stats_dir="/data/stats/",
channels=[i for i in range(20)],
num_steps=8,
num_samples_per_year=4,
batch_size=1,
patch_size=(8, 8),
device=dist.device,
num_workers=8,
shuffle=False,
)
logger.success(f"Loaded validaton datapipe of size {len(validation_datapipe)}")
fcn_model = AFNO(
img_size=(720, 1440),
in_channels=20,
out_channels=20,
patch_size=(8, 8),
embed_dim=768,
depth=12,
num_blocks=8,
).to(dist.device)
if dist.rank == 0 and wandb.run is not None:
wandb.watch(
fcn_model, log="all", log_freq=1000, log_graph=(True)
) # currently does not work with scripted modules. This will be fixed in the next release of W&B SDK.
# Distributed learning
if dist.world_size > 1:
ddps = torch.cuda.Stream()
with torch.cuda.stream(ddps):
fcn_model = DistributedDataParallel(
fcn_model,
device_ids=[dist.local_rank],
output_device=dist.device,
broadcast_buffers=dist.broadcast_buffers,
find_unused_parameters=dist.find_unused_parameters,
)
torch.cuda.current_stream().wait_stream(ddps)
# Initialize optimizer and scheduler
optimizer = optimizers.FusedAdam(
fcn_model.parameters(), betas=(0.9, 0.999), lr=0.0005, weight_decay=0.0
)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=150)
# Attempt to load latest checkpoint if one exists
loaded_epoch = load_checkpoint(
"./checkpoints",
models=fcn_model,
optimizer=optimizer,
scheduler=scheduler,
device=dist.device,
)
@StaticCaptureEvaluateNoGrad(model=fcn_model, logger=logger, use_graphs=False)
def eval_step_forward(my_model, invar):
return my_model(invar)
@StaticCaptureTraining(model=fcn_model, optim=optimizer, logger=logger)
def train_step_forward(my_model, invar, outvar):
# Multi-step prediction
loss = 0
# Multi-step not supported
for t in range(outvar.shape[1]):
outpred = my_model(invar)
invar = outpred
loss += loss_func(outpred, outvar[:, t])
return loss
# Main training loop
max_epoch = 80
for epoch in range(max(1, loaded_epoch + 1), max_epoch + 1):
# Wrap epoch in launch logger for console / WandB logs
with LaunchLogger(
"train", epoch=epoch, num_mini_batch=len(datapipe), epoch_alert_freq=10
) as log:
# === Training step ===
for j, data in enumerate(datapipe):
invar = data[0]["invar"]
outvar = data[0]["outvar"]
loss = train_step_forward(fcn_model, invar, outvar)
log.log_minibatch({"loss": loss.detach()})
log.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
if dist.rank == 0:
# Wrap validation in launch logger for console / WandB logs
with LaunchLogger("valid", epoch=epoch) as log:
# === Validation step ===
error = validation_step(
eval_step_forward, fcn_model, validation_datapipe, epoch=epoch
)
log.log_epoch({"Validation error": error})
if dist.world_size > 1:
torch.distributed.barrier()
scheduler.step()
if (epoch % 5 == 0 or epoch == 1) and dist.rank == 0:
# Use Modulus Launch checkpoint
save_checkpoint(
"./checkpoints",
models=fcn_model,
optimizer=optimizer,
scheduler=scheduler,
epoch=epoch,
)
if dist.rank == 0:
logger.info("Finished training!")
if __name__ == "__main__":
main()
| modulus-launch-main | examples/weather/fcn_afno/train_era5.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import urllib.request
import h5py
import numpy as np
import torch
import hydra
from omegaconf import DictConfig
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
from modulus.models.rnn.rnn_one2many import One2ManyRNN
import torch.nn.functional as F
from typing import Union
from modulus.launch.utils import load_checkpoint, save_checkpoint
from modulus.launch.logging import PythonLogger, LaunchLogger
from hydra.utils import to_absolute_path
from pyevtk.hl import imageToVTK
def prepare_data(
input_data_path,
output_data_path,
predict_nr_tsteps,
start_timestep,
):
if Path(output_data_path).is_file():
pass
else:
data = h5py.File(input_data_path)
list_data = []
for i in range(len(list(data.keys()))):
data_u = data[str(i)]["u"]
data_v = data[str(i)]["v"]
data_uv = np.stack([data_u, data_v], axis=0)
data_uv = np.array(data_uv)
list_data.append(data_uv)
data.close()
data_combined = np.stack(list_data, axis=0)
h = h5py.File(output_data_path, "w")
h.create_dataset(
"invar",
data=np.expand_dims(data_combined[:, :, start_timestep, ...], axis=2),
)
h.create_dataset(
"outvar",
data=data_combined[
:, :, start_timestep + 1 : start_timestep + 1 + predict_nr_tsteps, ...
],
)
h.close()
def validation_step(model, dataloader, epoch):
model.eval()
for data in dataloader:
invar, outvar = data
predvar = model(invar)
# convert data to numpy
outvar = outvar.detach().cpu().numpy()
predvar = predvar.detach().cpu().numpy()
# plotting
for t in range(outvar.shape[2]):
cellData = {
"outvar_chan0": outvar[0, 0, t, ...],
"outvar_chan1": outvar[0, 1, t, ...],
"predvar_chan0": predvar[0, 0, t, ...],
"predvar_chan1": predvar[0, 1, t, ...],
}
imageToVTK(f"./test_{t}", cellData=cellData)
class HDF5MapStyleDataset(Dataset):
def __init__(
self,
file_path,
device: Union[str, torch.device] = "cuda",
):
self.file_path = file_path
with h5py.File(file_path, "r") as f:
self.keys = list(f.keys())
# Set up device, needed for pipeline
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
self.device = device
def __len__(self):
with h5py.File(self.file_path, "r") as f:
return len(f[self.keys[0]])
def __getitem__(self, idx):
data = {}
with h5py.File(self.file_path, "r") as f:
for key in self.keys:
data[key] = np.array(f[key][idx])
invar = torch.from_numpy(data["invar"])
outvar = torch.from_numpy(data["outvar"])
if self.device.type == "cuda":
# Move tensors to GPU
invar = invar.cuda()
outvar = outvar.cuda()
return invar, outvar
@hydra.main(version_base="1.2", config_path="conf", config_name="config_3d")
def main(cfg: DictConfig) -> None:
logger = PythonLogger("main") # General python logger
LaunchLogger.initialize()
# Data download
raw_train_data_path = to_absolute_path("./datasets/grayscott_training.hdf5")
raw_test_data_path = to_absolute_path("./datasets/grayscott_test.hdf5")
# Download data
if Path(raw_train_data_path).is_file():
pass
else:
logger.info("Data download starting...")
url = "https://zenodo.org/record/5148524/files/grayscott_training.tar.gz"
os.makedirs(to_absolute_path("./datasets/"), exist_ok=True)
output_path = to_absolute_path("./datasets/grayscott_training.tar.gz")
urllib.request.urlretrieve(url, output_path)
logger.info("Data downloaded.")
logger.info("Extracting data...")
with tarfile.open(output_path, "r") as tar_ref:
tar_ref.extractall(to_absolute_path("./datasets/"))
logger.info("Data extracted")
if Path(raw_test_data_path).is_file():
pass
else:
logger.info("Data download starting...")
url = "https://zenodo.org/record/5148524/files/grayscott_test.tar.gz"
os.makedirs(to_absolute_path("./datasets/"), exist_ok=True)
output_path = to_absolute_path("./datasets/grayscott_test.tar.gz")
urllib.request.urlretrieve(url, output_path)
logger.info("Data downloaded.")
logger.info("Extracting data...")
with tarfile.open(output_path, "r") as tar_ref:
tar_ref.extractall(to_absolute_path("./datasets/"))
logger.info("Data extracted")
# Data pre-processing
nr_tsteps_to_predict = 64
nr_tsteps_to_test = 64
start_timestep = 5
train_save_path = "./train_data_gray_scott_one2many.hdf5"
test_save_path = "./test_data_gray_scott_one2many.hdf5"
# prepare data
prepare_data(
raw_train_data_path, train_save_path, nr_tsteps_to_predict, start_timestep
)
prepare_data(
raw_test_data_path,
test_save_path,
nr_tsteps_to_test,
start_timestep,
)
train_dataset = HDF5MapStyleDataset(train_save_path, device="cuda")
train_dataloader = DataLoader(
train_dataset, batch_size=cfg.batch_size, shuffle=True
)
test_dataset = HDF5MapStyleDataset(test_save_path, device="cuda")
test_dataloader = DataLoader(
test_dataset, batch_size=cfg.batch_size_test, shuffle=False
)
# set device as GPU
device = "cuda"
# instantiate model
arch = One2ManyRNN(
input_channels=2,
dimension=3,
nr_tsteps=nr_tsteps_to_predict,
nr_downsamples=2,
nr_residual_blocks=2,
nr_latent_channels=16,
)
if device == "cuda":
arch.cuda()
optimizer = torch.optim.Adam(
arch.parameters(),
betas=(0.9, 0.999),
lr=cfg.start_lr,
weight_decay=0.0,
)
scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma=cfg.lr_scheduler_gamma
)
loaded_epoch = load_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
device="cuda",
)
# Training loop
for epoch in range(max(1, loaded_epoch + 1), cfg.max_epochs + 1):
# wrap epoch in launch logger for console logs
with LaunchLogger(
"train",
epoch=epoch,
num_mini_batch=len(train_dataloader),
epoch_alert_freq=1,
) as log:
# go through the full dataset
for i, data in enumerate(train_dataloader):
invar, outvar = data
optimizer.zero_grad()
outpred = arch(invar)
loss = F.mse_loss(outvar, outpred)
loss.backward()
optimizer.step()
scheduler.step()
log.log_minibatch({"loss": loss.detach()})
log.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
with LaunchLogger("valid", epoch=epoch) as log:
validation_step(arch, test_dataloader, epoch)
if epoch % cfg.checkpoint_save_freq == 0:
save_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
epoch=epoch,
)
logger.info("Finished Training")
if __name__ == "__main__":
main()
| modulus-launch-main | examples/cfd/gray_scott_rnn/gray_scott_rnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import zipfile
import h5py
import numpy as np
import torch
import hydra
from omegaconf import DictConfig
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
from modulus.models.rnn.rnn_one2many import One2ManyRNN
from modulus.models.rnn.rnn_seq2seq import Seq2SeqRNN
import torch.nn.functional as F
import matplotlib.pyplot as plt
from typing import Union
from modulus.launch.utils import load_checkpoint, save_checkpoint
from modulus.launch.logging import PythonLogger, LaunchLogger
from hydra.utils import to_absolute_path
def prepare_data(
input_data_path,
output_data_path,
input_nr_tsteps,
predict_nr_tsteps,
start_idx,
num_samples,
):
if Path(output_data_path).is_file():
pass
else:
arrays = {}
data = h5py.File(input_data_path)
for k, v in data.items():
arrays[k] = np.array(v)
invar = arrays["u"][
input_nr_tsteps : input_nr_tsteps + predict_nr_tsteps,
...,
start_idx : start_idx + num_samples,
]
outvar = arrays["u"][
input_nr_tsteps
+ predict_nr_tsteps : input_nr_tsteps
+ 2 * predict_nr_tsteps,
...,
start_idx : start_idx + num_samples,
]
invar = np.moveaxis(invar, -1, 0)
outvar = np.moveaxis(outvar, -1, 0)
invar = np.expand_dims(invar, axis=1)
outvar = np.expand_dims(outvar, axis=1)
h = h5py.File(output_data_path, "w")
h.create_dataset("invar", data=invar)
h.create_dataset("outvar", data=outvar)
h.close()
def validation_step(model, dataloader, epoch):
model.eval()
loss_epoch = 0
for data in dataloader:
invar, outvar = data
predvar = model(invar)
loss_epoch += F.mse_loss(outvar, predvar)
# convert data to numpy
outvar = outvar.detach().cpu().numpy()
predvar = predvar.detach().cpu().numpy()
# plotting
fig, ax = plt.subplots(2, outvar.shape[2], figsize=(5 * outvar.shape[2], 10))
for t in range(outvar.shape[2]):
ax[0, t].imshow(outvar[0, 0, t, ...])
ax[1, t].imshow(predvar[0, 0, t, ...])
ax[0, t].set_title(f"True: {t}")
ax[1, t].set_title(f"Pred: {t}")
fig.savefig(f"./test_{epoch}.png")
plt.close()
return loss_epoch / len(dataloader)
class HDF5MapStyleDataset(Dataset):
def __init__(
self,
file_path,
device: Union[str, torch.device] = "cuda",
):
self.file_path = file_path
with h5py.File(file_path, "r") as f:
self.keys = list(f.keys())
# Set up device, needed for pipeline
if isinstance(device, str):
device = torch.device(device)
# Need a index id if cuda
if device.type == "cuda" and device.index == None:
device = torch.device("cuda:0")
self.device = device
def __len__(self):
with h5py.File(self.file_path, "r") as f:
return len(f[self.keys[0]])
def __getitem__(self, idx):
data = {}
with h5py.File(self.file_path, "r") as f:
for key in self.keys:
data[key] = np.array(f[key][idx])
invar = torch.from_numpy(data["invar"])
outvar = torch.from_numpy(data["outvar"])
if self.device.type == "cuda":
# Move tensors to GPU
invar = invar.cuda()
outvar = outvar.cuda()
return invar, outvar
@hydra.main(version_base="1.2", config_path="conf", config_name="config_2d")
def main(cfg: DictConfig) -> None:
logger = PythonLogger("main") # General python logger
LaunchLogger.initialize()
raw_data_path = to_absolute_path("./datasets/ns_V1e-3_N5000_T50.mat")
# Download data
if Path(raw_data_path).is_file():
pass
else:
try:
import gdown
except:
logger.error(
"gdown package not found, install it using `pip install gdown`"
)
sys.exit()
logger.info("Data download starting...")
url = "https://drive.google.com/uc?id=1r3idxpsHa21ijhlu3QQ1hVuXcqnBTO7d"
os.makedirs(to_absolute_path("./datasets/"), exist_ok=True)
output_path = to_absolute_path("./datasets/navier_stokes.zip")
gdown.download(url, output_path, quiet=False)
logger.info("Data downloaded.")
logger.info("Extracting data...")
with zipfile.ZipFile(output_path, "r") as zip_ref:
zip_ref.extractall(to_absolute_path("./datasets/"))
logger.info("Data extracted")
# Data pre-processing
num_samples = 1000
test_samples = 10
nr_tsteps_to_predict = 16
nr_tsteps_to_test = 16
if cfg.model_type == "one2many":
input_nr_tsteps = 1
elif cfg.model_type == "seq2seq":
input_nr_tsteps = nr_tsteps_to_predict
else:
logger.error("Invalid model type!")
raw_data_path = to_absolute_path("./datasets/ns_V1e-3_N5000_T50.mat")
train_save_path = "./train_data_" + str(cfg.model_type) + ".hdf5"
test_save_path = "./test_data_" + str(cfg.model_type) + ".hdf5"
# prepare data
prepare_data(
raw_data_path,
train_save_path,
input_nr_tsteps,
nr_tsteps_to_predict,
0,
num_samples,
)
prepare_data(
raw_data_path,
test_save_path,
input_nr_tsteps,
nr_tsteps_to_test,
num_samples,
test_samples,
)
train_dataset = HDF5MapStyleDataset(train_save_path, device="cuda")
train_dataloader = DataLoader(
train_dataset, batch_size=cfg.batch_size, shuffle=True
)
test_dataset = HDF5MapStyleDataset(test_save_path, device="cuda")
test_dataloader = DataLoader(
test_dataset, batch_size=cfg.batch_size_test, shuffle=False
)
# set device as GPU
device = "cuda"
# instantiate model
if cfg.model_type == "one2many":
arch = One2ManyRNN(
input_channels=1,
dimension=2,
nr_tsteps=nr_tsteps_to_predict,
nr_downsamples=3,
nr_residual_blocks=2,
nr_latent_channels=32,
)
elif cfg.model_type == "seq2seq":
arch = Seq2SeqRNN(
input_channels=1,
dimension=2,
nr_tsteps=nr_tsteps_to_predict,
nr_downsamples=3,
nr_residual_blocks=2,
nr_latent_channels=32,
)
else:
logger.error("Invalid model type!")
if device == "cuda":
arch.cuda()
optimizer = torch.optim.Adam(
arch.parameters(),
betas=(0.9, 0.999),
lr=cfg.start_lr,
weight_decay=0.0,
)
scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, gamma=cfg.lr_scheduler_gamma
)
loaded_epoch = load_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
device="cuda",
)
# Training loop
for epoch in range(max(1, loaded_epoch + 1), cfg.max_epochs + 1):
# wrap epoch in launch logger for console logs
with LaunchLogger(
"train",
epoch=epoch,
num_mini_batch=len(train_dataloader),
epoch_alert_freq=10,
) as log:
# go through the full dataset
for data in train_dataloader:
invar, outvar = data
optimizer.zero_grad()
outpred = arch(invar)
loss = F.mse_loss(outvar, outpred)
loss.backward()
optimizer.step()
scheduler.step()
# log.log_minibatch({"loss": loss.detach()})
log.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
with LaunchLogger("valid", epoch=epoch) as log:
error = validation_step(arch, test_dataloader, epoch)
log.log_epoch({"Validation error": error})
if epoch % cfg.checkpoint_save_freq == 0:
save_checkpoint(
"./checkpoints",
models=arch,
optimizer=optimizer,
scheduler=scheduler,
epoch=epoch,
)
logger.info("Finished Training")
if __name__ == "__main__":
main()
| modulus-launch-main | examples/cfd/navier_stokes_rnn/navier_stokes_rnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from pydantic import BaseModel
from typing import Tuple, Optional
class Constants(BaseModel):
"""Ahmed Body model constants"""
ckpt_path: str = "./checkpoints"
ckpt_name: str = "./ahmed_body.pt"
data_dir: str = "../dataset"
results_dir: str = "./results"
input_dim_nodes: int = 11
input_dim_edges: int = 4
output_dim: int = 4
aggregation: int = "sum"
hidden_dim_node_encoder = 256
hidden_dim_edge_encoder = 256
hidden_dim_node_decoder = 256
batch_size: int = 1
epochs: int = 500
num_training_samples: int = 428
num_validation_samples: int = 20
num_test_samples: int = 10
lr: float = 1e-4
lr_decay_rate: float = 0.99985
amp: bool = False
jit: bool = False
wandb_mode = "disabled"
| modulus-launch-main | examples/cfd/ahmed_body_mgn/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import Tensor
def compute_drag_coefficient(normals, area, coeff, p, s):
"""
Compute drag coefficient for a given mesh.
Parameters:
-----------
normals: Tensor
The surface normals mapped onto nodes
area: Tensor
The surface areas of each cell mapped onto nodes
coeff: Tensor
Dynamic pressure times the frontal area
p: Tensor
Pressure distribution on the mesh
s: Tensor
Wall shear stress distribution on the mesh
Returns:
--------
c_drag: float:
Computed drag coefficient
"""
# Compute coefficients
c_p = coeff * torch.dot(normals[:, 0], area * p)
c_f = -coeff * torch.dot(s[:, 0], area)
# Compute total drag coefficients
c_drag = c_p + c_f
return c_drag
def relative_lp_error(pred, y, p=2):
"""
Calculate relative L2 error norm
Parameters:
-----------
pred: torch.Tensor
Prediction
y: torch.Tensor
Ground truth
Returns:
--------
error: float
Calculated relative L2 error norm (percentage) on cpu
"""
error = (
torch.mean(torch.linalg.norm(pred - y, ord=p) / torch.linalg.norm(y, ord=p))
.cpu()
.numpy()
)
return error * 100
| modulus-launch-main | examples/cfd/ahmed_body_mgn/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.nn.parallel import DistributedDataParallel
import wandb as wb
from modulus.models.meshgraphnet import MeshGraphNet
from modulus.datapipes.gnn.ahmed_body_dataset import AhmedBodyDataset
from modulus.distributed.manager import DistributedManager
from modulus.launch.logging import (
PythonLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
from modulus.launch.utils import load_checkpoint, save_checkpoint
from utils import relative_lp_error
from constants import Constants
try:
from dgl.dataloading import GraphDataLoader
except:
raise ImportError(
"Ahmed Body example requires the DGL library. Install the "
+ "desired CUDA version at: \n https://www.dgl.ai/pages/start.html"
)
try:
import apex
except ImportError:
pass
# Instantiate constants
C = Constants()
class MGNTrainer:
def __init__(self, wb, dist, rank_zero_logger):
self.dist = dist
self.wb = wb
self.rank_zero_logger = rank_zero_logger
# instantiate dataset
rank_zero_logger.info("Loading the training dataset...")
self.dataset = AhmedBodyDataset(
name="ahmed_body_train",
data_dir=C.data_dir,
split="train",
num_samples=C.num_training_samples,
)
# instantiate validation dataset
rank_zero_logger.info("Loading the validation dataset...")
self.validation_dataset = AhmedBodyDataset(
name="ahmed_body_validation",
data_dir=C.data_dir,
split="validation",
num_samples=C.num_validation_samples,
)
# instantiate dataloader
self.dataloader = GraphDataLoader(
self.dataset,
batch_size=C.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True,
use_ddp=dist.world_size > 1,
)
# instantiate validation dataloader
self.validation_dataloader = GraphDataLoader(
self.validation_dataset,
batch_size=C.batch_size,
shuffle=False,
drop_last=True,
pin_memory=True,
use_ddp=False,
)
# instantiate the model
self.model = MeshGraphNet(
C.input_dim_nodes,
C.input_dim_edges,
C.output_dim,
aggregation=C.aggregation,
hidden_dim_node_encoder=C.hidden_dim_node_encoder,
hidden_dim_edge_encoder=C.hidden_dim_edge_encoder,
hidden_dim_node_decoder=C.hidden_dim_node_decoder,
)
if C.jit:
self.model = torch.jit.script(self.model).to(dist.device)
else:
self.model = self.model.to(dist.device)
# distributed data parallel for multi-node training
if dist.world_size > 1:
self.model = DistributedDataParallel(
self.model,
device_ids=[dist.local_rank],
output_device=dist.device,
broadcast_buffers=dist.broadcast_buffers,
find_unused_parameters=dist.find_unused_parameters,
)
# enable train mode
self.model.train()
# instantiate loss, optimizer, and scheduler
self.criterion = torch.nn.MSELoss()
try:
self.optimizer = apex.optimizers.FusedAdam(self.model.parameters(), lr=C.lr)
rank_zero_logger.info("Using FusedAdam optimizer")
except:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=C.lr)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer, lr_lambda=lambda epoch: C.lr_decay_rate**epoch
)
self.scaler = GradScaler()
# load checkpoint
if dist.world_size > 1:
torch.distributed.barrier()
self.epoch_init = load_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=self.model,
optimizer=self.optimizer,
scheduler=self.scheduler,
scaler=self.scaler,
device=dist.device,
)
def train(self, graph):
self.optimizer.zero_grad()
loss = self.forward(graph)
self.backward(loss)
self.scheduler.step()
return loss
def forward(self, graph):
# forward pass
with autocast(enabled=C.amp):
pred = self.model(graph.ndata["x"], graph.edata["x"], graph)
loss = self.criterion(pred, graph.ndata["y"])
return loss
def backward(self, loss):
# backward pass
if C.amp:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
self.optimizer.step()
lr = self.get_lr()
self.wb.log({"lr": lr})
def get_lr(self):
# get the learning rate
for param_group in self.optimizer.param_groups:
return param_group["lr"]
@torch.no_grad()
def validation(self):
error = 0
for graph in self.validation_dataloader:
graph = graph.to(self.dist.device)
pred = self.model(graph.ndata["x"], graph.edata["x"], graph)
gt = graph.ndata["y"]
error += relative_lp_error(pred, gt)
error = error / len(self.validation_dataloader)
self.wb.log({"val_error (%)": error})
self.rank_zero_logger.info(f"Validation error (%): {error}")
if __name__ == "__main__":
# initialize distributed manager
DistributedManager.initialize()
dist = DistributedManager()
# save constants to JSON file
if dist.rank == 0:
os.makedirs(C.ckpt_path, exist_ok=True)
with open(
os.path.join(C.ckpt_path, C.ckpt_name.replace(".pt", ".json")), "w"
) as json_file:
json_file.write(C.json(indent=4))
# initialize loggers
initialize_wandb(
project="Aero",
entity="Modulus",
name="Aero-Training",
group="Aero-DDP-Group",
mode=C.wandb_mode,
) # Wandb logger
logger = PythonLogger("main") # General python logger
rank_zero_logger = RankZeroLoggingWrapper(logger, dist) # Rank 0 logger
logger.file_logging()
trainer = MGNTrainer(wb, dist, rank_zero_logger)
start = time.time()
rank_zero_logger.info("Training started...")
for epoch in range(trainer.epoch_init, C.epochs):
loss_agg = 0
for graph in trainer.dataloader:
graph = graph.to(dist.device)
loss = trainer.train(graph)
loss_agg += loss.detach().cpu().numpy()
loss_agg /= len(trainer.dataloader)
rank_zero_logger.info(
f"epoch: {epoch}, loss: {loss_agg:10.3e}, lr: {trainer.get_lr()}, time per epoch: {(time.time()-start):10.3e}"
)
wb.log({"loss": loss_agg})
# validation
if dist.rank == 0:
trainer.validation()
# save checkpoint
if dist.world_size > 1:
torch.distributed.barrier()
if dist.rank == 0:
save_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=trainer.model,
optimizer=trainer.optimizer,
scheduler=trainer.scheduler,
scaler=trainer.scaler,
epoch=epoch,
)
logger.info(f"Saved model on rank {dist.rank}")
start = time.time()
rank_zero_logger.info("Training completed!")
| modulus-launch-main | examples/cfd/ahmed_body_mgn/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import numpy as np
import wandb as wb
from modulus.models.meshgraphnet import MeshGraphNet
from modulus.datapipes.gnn.ahmed_body_dataset import AhmedBodyDataset
from modulus.launch.utils import load_checkpoint
from modulus.launch.logging import PythonLogger
from utils import compute_drag_coefficient, relative_lp_error
from constants import Constants
try:
from dgl.dataloading import GraphDataLoader
from dgl import DGLGraph
except:
raise ImportError(
"Ahmed Body example requires the DGL library. Install the "
+ "desired CUDA version at: \n https://www.dgl.ai/pages/start.html"
)
try:
import pyvista as pv
except:
raise ImportError(
"Ahmed Body Dataset requires the pyvista library. Install with "
+ "pip install pyvista"
)
C = Constants()
def dgl_to_pyvista(graph: DGLGraph):
"""
Converts a DGL graph to a PyVista graph.
Parameters:
-----------
graph: DGLGraph
The input DGL graph.
Returns:
--------
pv_graph:
The output PyVista graph.
"""
# Convert the DGL graph to a NetworkX graph
nx_graph = graph.to_networkx(
node_attrs=["pos", "p_pred", "p", "s_pred", "wallShearStress"]
).to_undirected()
# Initialize empty lists for storing data
points = []
lines = []
p_pred = []
s_pred = []
p = []
wallShearStress = []
# Iterate over the nodes in the NetworkX graph
for node, attributes in nx_graph.nodes(data=True):
# Append the node and attribute data to the respective lists
points.append(attributes["pos"].numpy())
p_pred.append(attributes["p_pred"].numpy())
s_pred.append(attributes["s_pred"].numpy())
p.append(attributes["p"].numpy())
wallShearStress.append(attributes["wallShearStress"].numpy())
# Add edges to the lines list
for edge in nx_graph.edges():
lines.extend([2, edge[0], edge[1]])
# Initialize a PyVista graph
pv_graph = pv.PolyData()
# Assign the points, lines, and attributes to the PyVista graph
pv_graph.points = np.array(points)
pv_graph.lines = np.array(lines)
pv_graph.point_data["p_pred"] = np.array(p_pred)
pv_graph.point_data["s_pred"] = np.array(s_pred)
pv_graph.point_data["p"] = np.array(p)
pv_graph.point_data["wallShearStress"] = np.array(wallShearStress)
return pv_graph
class AhmedBodyRollout:
def __init__(self, wb, logger):
# set device
self.device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using {self.device} device")
# instantiate dataset
self.dataset = AhmedBodyDataset(
name="ahmed_body_test",
data_dir=C.data_dir,
split="test",
num_samples=C.num_test_samples,
compute_drag=True,
)
# instantiate dataloader
self.dataloader = GraphDataLoader(
self.dataset,
batch_size=C.batch_size,
shuffle=False,
drop_last=False,
)
# instantiate the model
self.model = MeshGraphNet(
C.input_dim_nodes,
C.input_dim_edges,
C.output_dim,
aggregation=C.aggregation,
hidden_dim_node_encoder=C.hidden_dim_node_encoder,
hidden_dim_edge_encoder=C.hidden_dim_edge_encoder,
hidden_dim_node_decoder=C.hidden_dim_node_decoder,
)
self.model = self.model.to(self.device)
# enable train mode
self.model.eval()
# load checkpoint
_ = load_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=self.model,
device=self.device,
)
def predict(self, save_results=False):
"""
Run the prediction process.
Parameters:
-----------
save_results: bool
Whether to save the results in form of a .vtp file, by default False
Returns:
--------
None
"""
self.pred, self.exact, self.faces, self.graphs = [], [], [], []
for i, (graph, sid, normals, areas, coeff) in enumerate(self.dataloader):
graph = graph.to(self.device)
normals = normals.to(self.device, torch.float32).squeeze()
areas = areas.to(self.device, torch.float32).squeeze()
coeff = coeff.to(self.device, torch.float32).squeeze()
sid = sid.item()
logger.info(f"Processing sample ID {sid}")
pred = self.model(graph.ndata["x"], graph.edata["x"], graph).detach()
gt = graph.ndata["y"]
graph.ndata["p_pred"] = pred[:, 0]
graph.ndata["s_pred"] = pred[:, 1:]
graph.ndata["p"] = gt[:, 0]
graph.ndata["wallShearStress"] = gt[:, 1:]
error = relative_lp_error(pred, gt)
logger.info(f"Test error (%): {error}")
if save_results:
# Convert DGL graph to PyVista graph and save it
os.makedirs(C.results_dir, exist_ok=True)
pv_graph = dgl_to_pyvista(graph.cpu())
pv_graph.save(os.path.join(C.results_dir, f"graph_{sid}.vtp"))
if __name__ == "__main__":
logger = PythonLogger("main") # General python logger
logger.file_logging()
logger.info("Rollout started...")
rollout = AhmedBodyRollout(wb, logger)
rollout.predict(save_results=True)
| modulus-launch-main | examples/cfd/ahmed_body_mgn/inference.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import matplotlib.pyplot as plt
from torch import FloatTensor
from torch.nn import MSELoss
from mlflow import log_figure
class GridValidator:
"""Grid Validator
The validator compares model output and target, inverts normalisation and plots a sample
Parameters
----------
loss_fun : MSELoss
loss function for assessing validation error
norm : Dict, optional
mean and standard deviation for each channel to normalise input and target
out_dir : str, optional
directory to which plots shall be stored
font_size : float, optional
font size used in figures
"""
def __init__(
self,
loss_fun,
norm: dict = {"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
out_dir: str = "./outputs/validators",
font_size: float = 28.0,
):
self.norm = norm
self.criterion = loss_fun
self.font_size = font_size
self.headers = ("invar", "truth", "prediction", "relative error")
self.out_dir = os.path.abspath(os.path.join(os.getcwd(), out_dir))
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
def compare(
self,
invar: FloatTensor,
target: FloatTensor,
prediction: FloatTensor,
step: int,
) -> float:
"""compares model output, target and plots everything
Parameters
----------
invar : FloatTensor
input to model
target : FloatTensor
ground truth
prediction : FloatTensor
model output
step : int
iteration counter
Returns
-------
float
validation error
"""
loss = self.criterion(prediction, target)
norm = self.norm
# pick first sample from batch
invar = invar * norm["permeability"][1] + norm["permeability"][0]
target = target * norm["darcy"][1] + norm["darcy"][0]
prediction = prediction * norm["darcy"][1] + norm["darcy"][0]
invar = invar.cpu().numpy()[0, -1, :, :]
target = target.cpu().numpy()[0, 0, :, :]
prediction = prediction.detach().cpu().numpy()[0, 0, :, :]
plt.close("all")
plt.rcParams.update({"font.size": self.font_size})
fig, ax = plt.subplots(1, 4, figsize=(15 * 4, 15), sharey=True)
im = []
im.append(ax[0].imshow(invar))
im.append(ax[1].imshow(target))
im.append(ax[2].imshow(prediction))
im.append(ax[3].imshow((prediction - target) / norm["darcy"][1]))
for ii in range(len(im)):
fig.colorbar(im[ii], ax=ax[ii], location="bottom", fraction=0.046, pad=0.04)
ax[ii].set_title(self.headers[ii])
log_figure(fig, f"val_step_{step}.png")
fig.savefig(os.path.join(self.out_dir, f"validation_step_{step}.png"))
return loss
| modulus-launch-main | examples/cfd/darcy_fno/validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import hydra
from omegaconf import DictConfig
from math import ceil
from torch.nn import MSELoss
from torch.optim import Adam, lr_scheduler
from modulus.models.mlp import FullyConnected
from modulus.models.fno import FNO
from modulus.datapipes.benchmarks.darcy import Darcy2D
from modulus.distributed import DistributedManager
from modulus.utils import StaticCaptureTraining, StaticCaptureEvaluateNoGrad
from modulus.launch.utils import load_checkpoint, save_checkpoint
from modulus.launch.logging import PythonLogger, LaunchLogger, initialize_mlflow
from validator import GridValidator
@hydra.main(version_base="1.3", config_path=".", config_name="config.yaml")
def darcy_trainer(cfg: DictConfig) -> None:
"""Training for the 2D Darcy flow benchmark problem.
This training script demonstrates how to set up a data-driven model for a 2D Darcy flow
using Fourier Neural Operators (FNO) and acts as a benchmark for this type of operator.
Training data is generated in-situ via the Darcy2D data loader from Modulus. Darcy2D
continuously generates data previously unseen by the model, i.e. the model is trained
over a single epoch of a training set consisting of
(cfg.training.max_pseudo_epochs*cfg.training.pseudo_epoch_sample_size) unique samples.
Pseudo_epochs were introduced to leverage the LaunchLogger and its MLFlow integration.
"""
DistributedManager.initialize() # Only call this once in the entire script!
dist = DistributedManager() # call if required elsewhere
# initialize monitoring
log = PythonLogger(name="darcy_fno")
# initialize monitoring
initialize_mlflow(
experiment_name=f"Darcy_FNO",
experiment_desc=f"training an FNO model for the Darcy problem",
run_name=f"Darcy FNO training",
run_desc=f"training FNO for Darcy",
user_name="Gretchen Ross",
mode="offline",
)
LaunchLogger.initialize(use_mlflow=True) # Modulus launch logger
# define model, loss, optimiser, scheduler, data loader
model = FNO(
in_channels=cfg.arch.fno.in_channels,
out_channels=cfg.arch.decoder.out_features,
decoder_layers=cfg.arch.decoder.layers,
decoder_layer_size=cfg.arch.decoder.layer_size,
dimension=cfg.arch.fno.dimension,
latent_channels=cfg.arch.fno.latent_channels,
num_fno_layers=cfg.arch.fno.fno_layers,
num_fno_modes=cfg.arch.fno.fno_modes,
padding=cfg.arch.fno.padding,
).to(dist.device)
loss_fun = MSELoss(reduction="mean")
optimizer = Adam(model.parameters(), lr=cfg.scheduler.initial_lr)
scheduler = lr_scheduler.LambdaLR(
optimizer, lr_lambda=lambda step: cfg.scheduler.decay_rate**step
)
norm_vars = cfg.normaliser
normaliser = {
"permeability": (norm_vars.permeability.mean, norm_vars.permeability.std_dev),
"darcy": (norm_vars.darcy.mean, norm_vars.darcy.std_dev),
}
dataloader = Darcy2D(
resolution=cfg.training.resolution,
batch_size=cfg.training.batch_size,
normaliser=normaliser,
)
validator = GridValidator(loss_fun=MSELoss(reduction="mean"))
ckpt_args = {
"path": f"./checkpoints",
"optimizer": optimizer,
"scheduler": scheduler,
"models": model,
}
loaded_pseudo_epoch = load_checkpoint(device=dist.device, **ckpt_args)
# calculate steps per pseudo epoch
steps_per_pseudo_epoch = ceil(
cfg.training.pseudo_epoch_sample_size / cfg.training.batch_size
)
validation_iters = ceil(cfg.validation.sample_size / cfg.training.batch_size)
log_args = {
"name_space": "train",
"num_mini_batch": steps_per_pseudo_epoch,
"epoch_alert_freq": 1,
}
if cfg.training.pseudo_epoch_sample_size % cfg.training.batch_size != 0:
log.warning(
f"increased pseudo_epoch_sample_size to multiple of \
batch size: {steps_per_pseudo_epoch*cfg.training.batch_size}"
)
if cfg.validation.sample_size % cfg.training.batch_size != 0:
log.warning(
f"increased validation sample size to multiple of \
batch size: {validation_iters*cfg.training.batch_size}"
)
# define forward passes for training and inference
@StaticCaptureTraining(
model=model, optim=optimizer, logger=log, use_amp=False, use_graphs=False
)
def forward_train(invars, target):
pred = model(invars)
loss = loss_fun(pred, target)
return loss
@StaticCaptureEvaluateNoGrad(
model=model, logger=log, use_amp=False, use_graphs=False
)
def forward_eval(invars):
return model(invars)
if loaded_pseudo_epoch == 0:
log.success("Training started...")
else:
log.warning(f"Resuming training from pseudo epoch {loaded_pseudo_epoch+1}.")
for pseudo_epoch in range(
max(1, loaded_pseudo_epoch + 1), cfg.training.max_pseudo_epochs + 1
):
# Wrap epoch in launch logger for console / MLFlow logs
with LaunchLogger(**log_args, epoch=pseudo_epoch) as logger:
for _, batch in zip(range(steps_per_pseudo_epoch), dataloader):
loss = forward_train(batch["permeability"], batch["darcy"])
logger.log_minibatch({"loss": loss.detach()})
logger.log_epoch({"Learning Rate": optimizer.param_groups[0]["lr"]})
# save checkpoint
if pseudo_epoch % cfg.training.rec_results_freq == 0:
save_checkpoint(**ckpt_args, epoch=pseudo_epoch)
# validation step
if pseudo_epoch % cfg.validation.validation_pseudo_epochs == 0:
with LaunchLogger("valid", epoch=pseudo_epoch) as logger:
total_loss = 0.0
for _, batch in zip(range(validation_iters), dataloader):
val_loss = validator.compare(
batch["permeability"],
batch["darcy"],
forward_eval(batch["permeability"]),
pseudo_epoch,
)
total_loss += val_loss
logger.log_epoch({"Validation error": total_loss / validation_iters})
# update learning rate
if pseudo_epoch % cfg.scheduler.decay_pseudo_epochs == 0:
scheduler.step()
save_checkpoint(**ckpt_args, epoch=cfg.training.max_pseudo_epochs)
log.success("Training completed *yay*")
if __name__ == "__main__":
darcy_trainer()
| modulus-launch-main | examples/cfd/darcy_fno/train_fno_darcy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import hydra
from typing import Tuple
from omegaconf import DictConfig
from torch.nn import MSELoss
from torch.optim import Adam, lr_scheduler
from torch.utils.data import DataLoader
from modulus.models.mlp import FullyConnected
from modulus.models.fno import FNO
from modulus.distributed import DistributedManager
from modulus.utils import StaticCaptureTraining, StaticCaptureEvaluateNoGrad
from modulus.launch.utils import load_checkpoint, save_checkpoint
from modulus.launch.logging import PythonLogger, LaunchLogger, initialize_mlflow
from utils import NestedDarcyDataset, GridValidator
def InitializeLoggers(cfg: DictConfig) -> Tuple[DistributedManager, PythonLogger]:
"""Class containing most important objects
In this class the infrastructure for training is set.
Parameters
----------
cfg : DictConfig
config file parameters
Returns
-------
Tuple[DistributedManager, PythonLogger]
"""
DistributedManager.initialize() # Only call this once in the entire script!
dist = DistributedManager() # call if required elsewhere
logger = PythonLogger(name="darcy_nested_fno")
assert hasattr(cfg, "model"), logger.error(
f"define which model to train: $ python {__file__.split(os.sep)[-1]} +model=<model_name>"
)
logger.info(f"training model {cfg.model}")
# initialize monitoring
initialize_mlflow(
experiment_name=f"Nested FNO, model: {cfg.model}",
experiment_desc=f"training model {cfg.model} for nested FNOs",
run_name=f"Nested FNO training, model: {cfg.model}",
run_desc=f"training model {cfg.model} for nested FNOs",
user_name="Gretchen Ross",
mode="offline",
)
LaunchLogger.initialize(use_mlflow=True) # Modulus launch logger
return dist, logger
class SetUpInfrastructure:
"""Class containing most important objects
In this class the infrastructure for training is set.
Parameters
----------
cfg : DictConfig
config file parameters
dist : DistributedManager
persistent class instance for storing parallel environment information
logger : PythonLogger
logger for command line output
"""
def __init__(
self, cfg: DictConfig, dist: DistributedManager, logger: PythonLogger
) -> None:
# define model, loss, optimiser, scheduler, data loader
level = int(cfg.model[-1])
model_cfg = cfg.arch[cfg.model]
loss_fun = MSELoss(reduction="mean")
norm = {
"permeability": (
cfg.normaliser.permeability.mean,
cfg.normaliser.permeability.std,
),
"darcy": (cfg.normaliser.darcy.mean, cfg.normaliser.darcy.std),
}
self.training_set = NestedDarcyDataset(
mode="train",
data_path=cfg.training.training_set,
model_name=cfg.model,
norm=norm,
log=logger,
)
self.valid_set = NestedDarcyDataset(
mode="train",
data_path=cfg.validation.validation_set,
model_name=cfg.model,
norm=norm,
log=logger,
)
logger.log(
f"Training set contains {len(self.training_set)} samples, "
+ f"validation set contains {len(self.valid_set)} samples."
)
self.train_loader = DataLoader(
self.training_set, batch_size=cfg.training.batch_size, shuffle=True
)
self.valid_loader = DataLoader(
self.valid_set, batch_size=cfg.validation.batch_size, shuffle=False
)
self.validator = GridValidator(loss_fun=loss_fun, norm=norm)
decoder = FullyConnected(
in_features=model_cfg.fno.latent_channels,
out_features=model_cfg.decoder.out_features,
num_layers=model_cfg.decoder.layers,
layer_size=model_cfg.decoder.layer_size,
)
self.model = FNO(
in_channels=model_cfg.fno.in_channels,
out_channels=model_cfg.decoder.out_features,
decoder_layers=model_cfg.decoder.layers,
decoder_layer_size=model_cfg.decoder.layer_size,
dimension=model_cfg.fno.dimension,
latent_channels=model_cfg.fno.latent_channels,
num_fno_layers=model_cfg.fno.fno_layers,
num_fno_modes=model_cfg.fno.fno_modes,
padding=model_cfg.fno.padding,
).to(dist.device)
self.optimizer = Adam(self.model.parameters(), lr=cfg.scheduler.initial_lr)
self.scheduler = lr_scheduler.LambdaLR(
self.optimizer, lr_lambda=lambda step: cfg.scheduler.decay_rate**step
)
self.log_args = {
"name_space": "train",
"num_mini_batch": len(self.train_loader),
"epoch_alert_freq": 1,
}
self.ckpt_args = {
"path": f"./checkpoints/all/{cfg.model}",
"optimizer": self.optimizer,
"scheduler": self.scheduler,
"models": self.model,
}
self.bst_ckpt_args = {
"path": f"./checkpoints/best/{cfg.model}",
"optimizer": self.optimizer,
"scheduler": self.scheduler,
"models": self.model,
}
# define forward for training and inference
@StaticCaptureTraining(
model=self.model,
optim=self.optimizer,
logger=logger,
use_amp=False,
use_graphs=False,
)
def _forward_train(invars, target):
pred = self.model(invars)
loss = loss_fun(pred, target)
return loss
@StaticCaptureEvaluateNoGrad(
model=self.model, logger=logger, use_amp=False, use_graphs=False
)
def _forward_eval(invars):
return self.model(invars)
self.forward_train = _forward_train
self.forward_eval = _forward_eval
def TrainModel(cfg: DictConfig, base: SetUpInfrastructure, loaded_epoch: int) -> None:
"""Training Loop
Parameters
----------
cfg : DictConfig
config file parameters
base : SetUpInfrastructure
important objects
loaded_epoch : int
epoch from which training is restarted, ==0 if starting from scratch
"""
min_valid_loss = 9.0e9
for epoch in range(max(1, loaded_epoch + 1), cfg.training.max_epochs + 1):
# Wrap epoch in launch logger for console / MLFlow logs
with LaunchLogger(**base.log_args, epoch=epoch) as log:
for batch in base.train_loader:
loss = base.forward_train(batch["permeability"], batch["darcy"])
log.log_minibatch({"loss": loss.detach()})
log.log_epoch({"Learning Rate": base.optimizer.param_groups[0]["lr"]})
# validation
if (
epoch % cfg.validation.validation_epochs == 0
or epoch % cfg.training.rec_results_freq == 0
or epoch == cfg.training.max_epochs
):
with LaunchLogger("valid", epoch=epoch) as log:
total_loss = 0.0
for batch in base.valid_loader:
loss = base.validator.compare(
batch["permeability"],
batch["darcy"],
base.forward_eval(batch["permeability"]),
epoch,
)
total_loss += loss * batch["darcy"].shape[0] / len(base.valid_set)
log.log_epoch({"Validation error": total_loss})
# save checkpoint
if (
epoch % cfg.training.rec_results_freq == 0
or epoch == cfg.training.max_epochs
):
save_checkpoint(**base.ckpt_args, epoch=epoch)
if (
total_loss < min_valid_loss
): # save seperately if best checkpoint thus far
min_valid_loss = total_loss
for ckpt in glob.glob(base.bst_ckpt_args["path"] + "/*.pt"):
os.remove(ckpt)
save_checkpoint(**base.bst_ckpt_args, epoch=epoch)
# update learning rate
if epoch % cfg.scheduler.decay_epochs == 0:
base.scheduler.step()
@hydra.main(version_base="1.3", config_path=".", config_name="config.yaml")
def nested_darcy_trainer(cfg: DictConfig) -> None:
"""Training for the 2D nested Darcy flow problem.
This training script demonstrates how to set up a data-driven model for a nested 2D Darcy flow
using nested Fourier Neural Operators (nFNO, https://arxiv.org/abs/2210.17051). nFNOs are
basically a concatenation of individual FNO models. Individual FNOs can be trained independently
and in any order. The order only gets important for fine tuning (tba) and inference.
"""
# initialize loggers
dist, logger = InitializeLoggers(cfg)
# set up infrastructure
base = SetUpInfrastructure(cfg, dist, logger)
# catch restart in case checkpoint exists
loaded_epoch = load_checkpoint(**base.ckpt_args, device=dist.device)
if loaded_epoch == 0:
logger.success("Training started...")
else:
logger.warning(f"Resuming training from epoch {loaded_epoch+1}.")
# train model
TrainModel(cfg, base, loaded_epoch)
logger.success("Training completed *yay*")
if __name__ == "__main__":
nested_darcy_trainer()
| modulus-launch-main | examples/cfd/darcy_nested_fnos/train_nested_darcy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import isdir
from os import mkdir
import numpy as np
from utils import DarcyInset2D, PlotNestedDarcy
def nested_darcy_generator() -> None:
"""Dataset Generator for the nested Darcy Problem
This script generates the training, validation and out-of-sample data sets
for the nested FNO problem and stores them in ./data, where trainer and
inferencer will find it.
"""
out_dir = "./data/"
file_names = ["training_data.npy", "validation_data.npy", "out_of_sample.npy"]
sample_size = [8192, 2048, 2048]
max_batch_size = 128
resolution = 1024
glob_res = 256
fine_res = 128
buffer = 32
permea_freq = 3
max_n_insets = 2
fine_permeability_freq = 2
min_dist_frac = 1.8
device = "cuda"
n_plots = 10
fill_val = -99999
perm_norm = (0.0, 1.0)
darc_norm = (0.0, 1.0)
if not isdir(out_dir):
mkdir(out_dir)
assert resolution % glob_res == 0, "resolution needs to be multiple of glob_res"
ref_fac = resolution // glob_res
inset_size = fine_res + 2 * buffer
min_offset = (fine_res * (ref_fac - 1) + 1) // 2 + buffer * ref_fac
# force inset on coarse grid
if not min_offset % ref_fac == 0:
min_offset += ref_fac - min_offset % ref_fac
for dset in range(len(file_names)):
# compute batch size and number of iterations
batch_size = min(max_batch_size, sample_size[dset])
nr_iterations = (sample_size[dset] - 1) // max_batch_size + 1
datapipe = DarcyInset2D(
resolution=resolution,
batch_size=batch_size,
nr_permeability_freq=permea_freq,
max_permeability=2.0,
min_permeability=0.5,
max_iterations=30000,
iterations_per_convergence_check=10,
nr_multigrids=3,
normaliser={"permeability": perm_norm, "darcy": darc_norm},
device=device,
max_n_insets=max_n_insets,
fine_res=fine_res,
fine_permeability_freq=fine_permeability_freq,
min_offset=min_offset,
ref_fac=ref_fac,
min_dist_frac=min_dist_frac,
fill_val=fill_val,
)
dat = {}
samp_ind = -1
for _, sample in zip(range(nr_iterations), datapipe):
permea = sample["permeability"].cpu().detach().numpy()
darcy = sample["darcy"].cpu().detach().numpy()
pos = (sample["inset_pos"].cpu().detach().numpy()).astype(int)
assert (
np.where(pos == fill_val, 0, pos) % ref_fac
).sum() == 0, "inset off coarse grid"
# crop out refined region, allow for surrounding area, save in extra array
for ii in range(batch_size):
samp_ind += 1
samp_str = str(samp_ind)
# global fields
dat[samp_str] = {
"ref0": {
"0": {
"permeability": permea[ii, 0, ::ref_fac, ::ref_fac],
"darcy": darcy[ii, 0, ::ref_fac, ::ref_fac],
}
}
}
# insets
dat[samp_str]["ref1"] = {}
for pp in range(pos.shape[1]):
if pos[ii, pp, 0] == fill_val:
continue
xs = pos[ii, pp, 0] - buffer
ys = pos[ii, pp, 1] - buffer
dat[samp_str]["ref1"][str(pp)] = {
"permeability": permea[
ii, 0, xs : xs + inset_size, ys : ys + inset_size
],
"darcy": darcy[
ii, 0, xs : xs + inset_size, ys : ys + inset_size
],
"pos": (pos[ii, pp, :] - min_offset) // ref_fac,
}
meta = {"ref_fac": ref_fac, "buffer": buffer, "fine_res": fine_res}
np.save(out_dir + file_names[dset], {"meta": meta, "fields": dat})
# plot some fields
for idx in range(n_plots):
PlotNestedDarcy(dat, idx)
if __name__ == "__main__":
nested_darcy_generator()
| modulus-launch-main | examples/cfd/darcy_nested_fnos/generate_nested_darcy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hydra
from torch import cat, FloatTensor
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
from omegaconf import DictConfig, open_dict
from torch.utils.data import DataLoader
from modulus.models.mlp import FullyConnected
from modulus.models.fno import FNO
from modulus.utils import StaticCaptureEvaluateNoGrad
from modulus.distributed import DistributedManager
from modulus.launch.logging import PythonLogger
from modulus.launch.utils import load_checkpoint
from utils import NestedDarcyDataset, PlotNestedDarcy
def plot_assembled(perm, darc):
headers = ["permeability", "darcy"]
plt.rcParams.update({"font.size": 28})
fig, ax = plt.subplots(1, 2, figsize=(15 * 2, 15), sharey=True)
im = []
im.append(ax[0].imshow(perm))
im.append(ax[1].imshow(darc))
for ii in range(len(im)):
fig.colorbar(im[ii], ax=ax[ii], location="bottom", fraction=0.046, pad=0.04)
ax[ii].set_title(headers[ii])
fig.savefig(join("./", f"test_test.png"))
def EvaluateModel(
cfg: DictConfig,
model_name: str,
norm: dict = {"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
parent_result: FloatTensor = None,
log: PythonLogger = None,
):
# define model and load weights
dist = DistributedManager()
log.info(f"evaluating model {model_name}")
model_cfg = cfg.arch[model_name]
model = FNO(
in_channels=model_cfg.fno.in_channels,
out_channels=model_cfg.decoder.out_features,
decoder_layers=model_cfg.decoder.layers,
decoder_layer_size=model_cfg.decoder.layer_size,
dimension=model_cfg.fno.dimension,
latent_channels=model_cfg.fno.latent_channels,
num_fno_layers=model_cfg.fno.fno_layers,
num_fno_modes=model_cfg.fno.fno_modes,
padding=model_cfg.fno.padding,
).to(dist.device)
load_checkpoint(
path=f"./checkpoints/best/{model_name}", device=dist.device, models=model
)
# prepare data for inference
dataset = NestedDarcyDataset(
mode="eval",
data_path=cfg.inference.inference_set,
model_name=model_name,
norm=norm,
log=log,
parent_prediction=parent_result,
)
dataloader = DataLoader(dataset, batch_size=cfg.inference.batch_size, shuffle=False)
with open_dict(cfg):
cfg.ref_fac = dataset.ref_fac
cfg.fine_res = dataset.fine_res
cfg.buffer = dataset.buffer
# store positions of insets if refinement level > 0, ie if not global model
if int(model_name[-1]) > 0:
pos = dataset.position
else:
pos = None
# define forward method
@StaticCaptureEvaluateNoGrad(
model=model, logger=log, use_amp=False, use_graphs=False
)
def forward_eval(invars):
return model(invars)
# evaluate and invert normalisation
invars, result = [], []
for batch in dataloader:
invars.append(batch["permeability"])
result.append(forward_eval(batch["permeability"]))
invars = cat(invars, dim=0).detach()
result = cat(result, dim=0).detach()
return pos, invars, result
def AssembleSolutionToDict(cfg: DictConfig, perm: dict, darcy: dict, pos: dict):
dat, idx = {}, 0
for ii in range(perm["ref0"].shape[0]):
samp = str(ii)
dat[samp] = {
"ref0": {
"0": {
"permeability": perm["ref0"][ii, 0, ...],
"darcy": darcy["ref0"][ii, 0, ...],
}
}
}
# insets
dat[samp]["ref1"] = {}
for ins, ps in pos["ref1"][samp].items():
dat[samp]["ref1"][ins] = {
"permeability": perm["ref1"][idx, 1, ...],
"darcy": darcy["ref1"][idx, 0, ...],
"pos": ps,
}
idx += 1
if cfg.inference.save_result:
np.save(
"./nested_darcy_results.npy",
dat,
)
return dat
def AssembleToSingleField(cfg: DictConfig, dat: dict):
ref_fac = cfg.ref_fac
glob_size = dat["0"]["ref0"]["0"]["darcy"].shape[0]
inset_size = dat["0"]["ref1"]["0"]["darcy"].shape[0]
size = ref_fac * glob_size
min_offset = (cfg.fine_res * (ref_fac - 1) + 1) // 2 + cfg.buffer * ref_fac
perm = np.zeros((len(dat), size, size), dtype=np.float32)
darc = np.zeros_like(perm)
for ii, (_, field) in enumerate(dat.items()):
# extract global premeability and expand to size x size
perm[ii, ...] = np.kron(
field["ref0"]["0"]["permeability"],
np.ones((ref_fac, ref_fac), dtype=field["ref0"]["0"]["permeability"].dtype),
)
darc[ii, ...] = np.kron(
field["ref0"]["0"]["darcy"],
np.ones((ref_fac, ref_fac), dtype=field["ref0"]["0"]["darcy"].dtype),
)
# overwrite refined regions
for __, inset in field["ref1"].items():
pos = inset["pos"] * ref_fac + min_offset
perm[
ii, pos[0] : pos[0] + inset_size, pos[1] : pos[1] + inset_size
] = inset["permeability"]
darc[
ii, pos[0] : pos[0] + inset_size, pos[1] : pos[1] + inset_size
] = inset["darcy"]
return {"permeability": perm, "darcy": darc}, ref_fac
def GetRelativeL2(pred, tar):
div = 1.0 / tar["darcy"].shape[0] * tar["darcy"].shape[1]
err = pred["darcy"] - tar["darcy"]
l2_tar = np.sqrt(np.einsum("ijk,ijk->i", tar["darcy"], tar["darcy"]) * div)
l2_err = np.sqrt(np.einsum("ijk,ijk->i", err, err) * div)
return np.mean(l2_err / l2_tar)
def ComputeErrorNorm(cfg: DictConfig, pred_dict: dict, log: PythonLogger, ref0_pred):
# assemble ref1 and ref2 solutions alongside gound truth to single scalar field
log.info("computing relative L2-norm of error...")
tar_dict = np.load(cfg.inference.inference_set, allow_pickle=True).item()["fields"]
pred, ref_fac = AssembleToSingleField(cfg, pred_dict)
tar = AssembleToSingleField(cfg, tar_dict)[0]
assert np.all(
tar["permeability"] == pred["permeability"]
), "Permeability from file is not equal to analysed permeability"
# compute l2 norm of error
rel_l2_err = GetRelativeL2(pred, tar)
log.log(f" ...which is {rel_l2_err}.")
if cfg.inference.get_ref0_error_norm:
ref0_pred = np.kron(
ref0_pred, np.ones((ref_fac, ref_fac), dtype=ref0_pred.dtype)
)
rel_l2_err = GetRelativeL2({"darcy": ref0_pred}, tar)
log.log(f"The error with ref_0 only would be {rel_l2_err}.")
return
@hydra.main(version_base="1.3", config_path=".", config_name="config")
def nested_darcy_evaluation(cfg: DictConfig) -> None:
"""Inference of the nested 2D Darcy flow benchmark problem.
This inference script consecutively evaluates the models of nested FNO for the
nested Darcy problem, taking into account the result of the model associated
with the parent level. All results are stored in a numpy file and a selection
of samples can be plotted in the end.
"""
# initialize monitoring, models and normalisation
DistributedManager.initialize() # Only call this once in the entire script!
log = PythonLogger(name="darcy_fno")
model_names = sorted(list(cfg.arch.keys()))
norm = {
"permeability": (
cfg.normaliser.permeability.mean,
cfg.normaliser.permeability.std,
),
"darcy": (cfg.normaliser.darcy.mean, cfg.normaliser.darcy.std),
}
# evaluate models and revoke normalisation
perm, darcy, pos, result, ref0_pred = {}, {}, {}, None, None
for name in model_names:
position, invars, result = EvaluateModel(cfg, name, norm, result, log)
perm[name] = (
(invars * norm["permeability"][1] + norm["permeability"][0])
.detach()
.cpu()
.numpy()
)
darcy[name] = (
(result * norm["darcy"][1] + norm["darcy"][0]).detach().cpu().numpy()
)
pos[name] = position
if cfg.inference.get_ref0_error_norm and int(name[-1]) == 0:
ref0_pred = np.copy(darcy[name]).squeeze()
# port solution format to dict structure like in input files
pred_dict = AssembleSolutionToDict(cfg, perm, darcy, pos)
# compute error norm
if cfg.inference.get_error_norm:
ComputeErrorNorm(cfg, pred_dict, log, ref0_pred)
# plot some fields
if cfg.inference.n_plots > 0:
log.info("plotting results")
for idx in range(cfg.inference.n_plots):
PlotNestedDarcy(pred_dict, idx)
if __name__ == "__main__":
nested_darcy_evaluation()
| modulus-launch-main | examples/cfd/darcy_nested_fnos/evaluate_nested_darcy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os.path
import mlflow
import warp as wp
import numpy as np
import matplotlib.pyplot as plt
from typing import Union, Tuple, Dict
from torch import FloatTensor, Tensor
from torch.nn import MSELoss
from modulus.distributed import DistributedManager
from modulus.launch.logging import PythonLogger
from modulus.datapipes.benchmarks.darcy import Darcy2D
from modulus.datapipes.benchmarks.kernels.initialization import init_uniform_random_4d
from modulus.datapipes.benchmarks.kernels.utils import (
fourier_to_array_batched_2d,
threshold_3d,
)
class NestedDarcyDataset:
"""Nested Darcy Dataset
A Dataset class for loading nested Darcy data generated with generate_nested_darcy.py
during training. The method takes care of loading the correct level and associated
information from its parent level.
Parameters
----------
data_path : str
Path to numpy dict file containing the data
level : int, optional
Refinement level which shall be loaded
norm : Dict, optional
mean and standard deviation for each channel to normalise input and target
log : PythonLogger
logger for command line output
"""
def __init__(
self,
mode: str,
data_path: str = None,
model_name: str = None,
norm: dict = {"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
log: PythonLogger = None,
parent_prediction: FloatTensor = None,
) -> None:
self.dist = DistributedManager()
self.data_path = os.path.abspath(data_path)
self.model_name = model_name
# self.level = level
self.norm = norm
self.log = log
self.mode = mode
assert self.mode in [
"train",
"eval",
], "mode in NestedDarcyDataset must be train or eval."
if mode == "eval" and int(self.model_name[-1]) > 0:
assert (
parent_prediction is not None
), f"pass parent result to evaluate level {int(self.model_name[-1])}"
parent_prediction = parent_prediction.detach().cpu().numpy()
self.load_dataset(parent_prediction)
def load_dataset(self, parent_prediction: FloatTensor = None) -> None:
try:
contents = np.load(self.data_path, allow_pickle=True).item()
except IOError as err:
self.log.error(f"Unable to find or load file {self.data_path}")
exit()
# load input varibales, copy to device and normalise
dat = contents["fields"]
self.ref_fac = contents["meta"]["ref_fac"]
self.buffer = contents["meta"]["buffer"]
self.fine_res = contents["meta"]["fine_res"]
mod = self.model_name
perm, darc, par_pred, self.position = [], [], [], {}
for id, samp in dat.items():
if int(mod[-1]) > 0:
self.position[id] = {}
for jd, fields in samp[mod].items():
perm.append(fields["permeability"][None, None, ...])
darc.append(fields["darcy"][None, None, ...])
if int(mod[-1]) > 0: # if not on global level
xy_size = perm[-1].shape[-1]
pos = fields["pos"]
self.position[id][jd] = pos
if self.mode == "eval":
parent = parent_prediction[int(id), 0, ...]
elif self.mode == "train":
parent = (
samp[f"ref{int(mod[-1])-1}"]["0"]["darcy"]
- self.norm["darcy"][0]
) / self.norm["darcy"][1]
par_pred.append(
parent[
pos[0] : pos[0] + xy_size,
pos[1] : pos[1] + xy_size,
][None, None, ...]
)
perm = (
np.concatenate(perm, axis=0) - self.norm["permeability"][0]
) / self.norm["permeability"][1]
darc = (np.concatenate(darc, axis=0) - self.norm["darcy"][0]) / self.norm[
"darcy"
][1]
if int(mod[-1]) > 0:
par_pred = np.concatenate(par_pred, axis=0)
perm = np.concatenate((par_pred, perm), axis=1)
self.invars = torch.from_numpy(perm).float().to(self.dist.device)
self.outvars = torch.from_numpy(darc).float().to(self.dist.device)
self.length = self.invars.size()[0]
def __getitem__(self, idx: int):
return {"permeability": self.invars[idx, ...], "darcy": self.outvars[idx, ...]}
def __len__(self):
return self.length
class GridValidator:
"""Grid Validator
The validator compares model output and target, inverts normalisation and plots a sample
Parameters
----------
loss_fun : MSELoss
loss function for assessing validation error
norm : Dict, optional
mean and standard deviation for each channel to normalise input and target
out_dir : str, optional
directory to which plots shall be stored
font_size : float, optional
font size used in figures
"""
def __init__(
self,
loss_fun: MSELoss,
norm: dict = {"permeability": (0.0, 1.0), "darcy": (0.0, 1.0)},
out_dir: str = "./outputs/validators",
font_size: float = 28.0,
) -> None:
self.norm = norm
self.criterion = loss_fun
self.font_size = font_size
self.headers = ("invar", "truth", "prediction", "relative error")
self.out_dir = os.path.abspath(os.path.join(os.getcwd(), out_dir))
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
def compare(
self,
invar: FloatTensor,
target: FloatTensor,
prediction: FloatTensor,
step: int,
) -> float:
"""compares model output, target and plots everything
Parameters
----------
invar : FloatTensor
input to model
target : FloatTensor
ground truth
prediction : FloatTensor
model output
step : int
iteration counter
Returns
-------
float
validation error
"""
loss = self.criterion(prediction, target)
norm = self.norm
# pick first sample from batch
invar = invar * norm["permeability"][1] + norm["permeability"][0]
target = target * norm["darcy"][1] + norm["darcy"][0]
prediction = prediction * norm["darcy"][1] + norm["darcy"][0]
invar = invar.cpu().numpy()[0, -1, :, :]
target = target.cpu().numpy()[0, 0, :, :]
prediction = prediction.detach().cpu().numpy()[0, 0, :, :]
plt.close("all")
plt.rcParams.update({"font.size": self.font_size})
fig, ax = plt.subplots(1, 4, figsize=(15 * 4, 15), sharey=True)
im = []
im.append(ax[0].imshow(invar))
im.append(ax[1].imshow(target))
im.append(ax[2].imshow(prediction))
im.append(ax[3].imshow((prediction - target) / norm["darcy"][1]))
for ii in range(len(im)):
fig.colorbar(im[ii], ax=ax[ii], location="bottom", fraction=0.046, pad=0.04)
ax[ii].set_title(self.headers[ii])
mlflow.log_figure(fig, f"val_step_{step}.png")
fig.savefig(os.path.join(self.out_dir, f"validation_step_{step}.png"))
return loss
def PlotNestedDarcy(dat: dict, idx: int) -> None:
"""Plot fields from the nested Darcy case
Parameters
----------
dat : dict
dictionary containing fields
target : FloatTensor
index of example to plot
"""
fields = dat[str(idx)]
n_insets = len(fields["ref1"])
fig, ax = plt.subplots(n_insets + 1, 4, figsize=(20, 5 * (n_insets + 1)))
vmin = fields["ref0"]["0"]["darcy"].min()
vmax = fields["ref0"]["0"]["darcy"].max()
ax[0, 0].imshow(fields["ref0"]["0"]["permeability"])
ax[0, 0].set_title("permeability glob")
ax[0, 1].imshow(fields["ref0"]["0"]["darcy"], vmin=vmin, vmax=vmax)
ax[0, 1].set_title("darcy glob")
ax[0, 2].axis("off")
ax[0, 3].axis("off")
for ii in range(n_insets):
loc = fields["ref1"][str(ii)]
inset_size = loc["darcy"].shape[1]
ax[ii + 1, 0].imshow(loc["permeability"])
ax[ii + 1, 0].set_title(f"permeability fine {ii}")
ax[ii + 1, 1].imshow(loc["darcy"], vmin=vmin, vmax=vmax)
ax[ii + 1, 1].set_title(f"darcy fine {ii}")
ax[ii + 1, 2].imshow(
fields["ref0"]["0"]["permeability"][
loc["pos"][0] : loc["pos"][0] + inset_size,
loc["pos"][1] : loc["pos"][1] + inset_size,
]
)
ax[ii + 1, 2].set_title(f"permeability zoomed {ii}")
ax[ii + 1, 3].imshow(
fields["ref0"]["0"]["darcy"][
loc["pos"][0] : loc["pos"][0] + inset_size,
loc["pos"][1] : loc["pos"][1] + inset_size,
],
vmin=vmin,
vmax=vmax,
)
ax[ii + 1, 3].set_title(f"darcy zoomed {ii}")
fig.tight_layout()
plt.savefig(f"sample_{idx:02d}.png")
plt.close()
@wp.kernel
def fourier_to_array_batched_2d_cropped(
array: wp.array3d(dtype=float),
fourier: wp.array4d(dtype=float),
nr_freq: int,
lx: int,
ly: int,
bounds: wp.array3d(dtype=int),
fill_val: int,
): # pragma: no cover
"""Array of Fourier amplitudes to batched 2d spatial array
Parameters
----------
array : wp.array3d
Spatial array
fourier : wp.array4d
Array of Fourier amplitudes
nr_freq : int
Number of frequencies in Fourier array
lx : int
Grid size x
ly : int
Grid size y
x_start : int
lowest x-index
y_start : int
lowest y-index
"""
b, p, x, y = wp.tid()
if bounds[b, p, 0] == fill_val:
return
x += bounds[b, p, 0]
y += bounds[b, p, 1]
array[b, x, y] = 0.0
dx = 6.28318 / wp.float32(lx)
dy = 6.28318 / wp.float32(ly)
rx = dx * wp.float32(x)
ry = dy * wp.float32(y)
for i in range(nr_freq):
for j in range(nr_freq):
ri = wp.float32(i)
rj = wp.float32(j)
ss = fourier[0, b, i, j] * wp.sin(ri * rx) * wp.sin(rj * ry)
cs = fourier[1, b, i, j] * wp.cos(ri * rx) * wp.sin(rj * ry)
sc = fourier[2, b, i, j] * wp.sin(ri * rx) * wp.cos(rj * ry)
cc = fourier[3, b, i, j] * wp.cos(ri * rx) * wp.cos(rj * ry)
wp.atomic_add(
array, b, x, y, 1.0 / (wp.float32(nr_freq) ** 2.0) * (ss + cs + sc + cc)
)
class DarcyInset2D(Darcy2D):
"""2D Darcy flow benchmark problem datapipe.
This datapipe continuously generates solutions to the 2D Darcy equation with variable
permeability. All samples are generated on the fly and is meant to be a benchmark
problem for testing data driven models. Permeability is drawn from a random Fourier
series and threshold it to give a piecewise constant function. The solution is obtained
using a GPU enabled multi-grid Jacobi iterative method.
Parameters
----------
resolution : int, optional
Resolution to run simulation at, by default 256
batch_size : int, optional
Batch size of simulations, by default 64
nr_permeability_freq : int, optional
Number of frequencies to use for generating random permeability. Higher values
will give higher freq permeability fields., by default 5
max_permeability : float, optional
Max permeability, by default 2.0
min_permeability : float, optional
Min permeability, by default 0.5
max_iterations : int, optional
Maximum iterations to use for each multi-grid, by default 30000
convergence_threshold : float, optional
Solver L-Infinity convergence threshold, by default 1e-6
iterations_per_convergence_check : int, optional
Number of Jacobi iterations to run before checking convergence, by default 1000
nr_multigrids : int, optional
Number of multi-grid levels, by default 4
normaliser : Union[Dict[str, Tuple[float, float]], None], optional
Dictionary with keys `permeability` and `darcy`. The values for these keys are two floats corresponding to mean and std `(mean, std)`.
device : Union[str, torch.device], optional
Device for datapipe to run place data on, by default "cuda"
Raises
------
ValueError
Incompatable multi-grid and resolution settings
"""
def __init__(
self,
resolution: int = 256,
batch_size: int = 64,
nr_permeability_freq: int = 5,
max_permeability: float = 2.0,
min_permeability: float = 0.5,
max_iterations: int = 30000,
convergence_threshold: float = 1e-6,
iterations_per_convergence_check: int = 1000,
nr_multigrids: int = 4,
normaliser: Union[Dict[str, Tuple[float, float]], None] = None,
device: Union[str, torch.device] = "cuda",
max_n_insets: int = 3,
fine_res: int = 32,
fine_permeability_freq: int = 10,
min_offset: int = 48,
ref_fac: int = None,
min_dist_frac: float = 1.7,
fill_val: int = -99999,
):
super().__init__(
resolution,
batch_size,
nr_permeability_freq,
max_permeability,
min_permeability,
max_iterations,
convergence_threshold,
iterations_per_convergence_check,
nr_multigrids,
normaliser,
device,
)
self.max_n_insets = max_n_insets
self.fine_res = fine_res
self.fine_freq = fine_permeability_freq
self.ref_fac = ref_fac
assert (
resolution % self.ref_fac == 0
), "simulation res must be multiple of ref_fac"
# force inset on coarse grid
if not min_offset % self.ref_fac == 0:
min_offset += self.ref_fac - min_offset % self.ref_fac
self.beg_min = min_offset
self.beg_max = resolution - min_offset - fine_res - self.ref_fac
self.bounds = None
self.min_dist_frac = min_dist_frac
self.fill_val = fill_val
assert (
self.max_n_insets <= 3
), f"at most 3 insets supported, change max_n_insets accordingly"
assert (self.beg_max - self.beg_min) % ref_fac == 0, "lsdhfgn3x!!!!"
def initialize_batch(self) -> None:
"""Initializes arrays for new batch of simulations"""
# initialize permeability
self.permeability.zero_()
seed = np.random.randint(np.iinfo(np.uint64).max, dtype=np.uint64)
wp.launch(
kernel=init_uniform_random_4d,
dim=self.fourier_dim,
inputs=[self.rand_fourier, -1.0, 1.0, seed],
device=self.device,
)
wp.launch(
kernel=fourier_to_array_batched_2d,
dim=self.dim,
inputs=[
self.permeability,
self.rand_fourier,
self.nr_permeability_freq,
self.resolution,
self.resolution,
],
device=self.device,
)
rr = np.random.randint(
low=0,
high=(self.beg_max - self.beg_min) // self.ref_fac,
size=(self.batch_size, self.max_n_insets, 2),
)
n_insets = np.random.randint(
low=1,
high=rr.shape[1] + 1,
size=(self.batch_size,),
)
# check that regions do not overlap and have distance
min_dist = self.min_dist_frac * self.fine_res // self.ref_fac + 1
print("adjusting inset positions")
for ib in range(self.batch_size):
if n_insets[ib] <= 1:
rr[ib, 1:, :] = self.fill_val
continue
else:
while (
abs(rr[ib, 0, 0] - rr[ib, 1, 0]) < min_dist
and abs(rr[ib, 0, 1] - rr[ib, 1, 1]) < min_dist
):
rr[ib, 0, :] = np.random.randint(
low=0,
high=(self.beg_max - self.beg_min) // self.ref_fac,
size=(2,),
)
rr[ib, 1, :] = np.random.randint(
low=0,
high=(self.beg_max - self.beg_min) // self.ref_fac,
size=(2,),
)
if n_insets[ib] <= 2:
rr[ib, 2:, :] = self.fill_val
continue
else:
while (
abs(rr[ib, 0, 0] - rr[ib, 2, 0]) < min_dist
and abs(rr[ib, 0, 1] - rr[ib, 2, 1]) < min_dist
) or (
abs(rr[ib, 1, 0] - rr[ib, 2, 0]) < min_dist
and abs(rr[ib, 1, 1] - rr[ib, 2, 1]) < min_dist
):
rr[ib, 2, :] = np.random.randint(
low=0,
high=(self.beg_max - self.beg_min) // self.ref_fac,
size=(2,),
)
print("done")
rr = np.where(rr != self.fill_val, (rr * self.ref_fac) + self.beg_min, rr)
self.bounds = wp.array(rr, dtype=int, device=self.device)
wp.launch(
kernel=fourier_to_array_batched_2d_cropped,
dim=(self.batch_size, self.bounds.shape[1], self.fine_res, self.fine_res),
inputs=[
self.permeability,
self.rand_fourier,
self.fine_freq,
self.fine_res,
self.fine_res,
self.bounds,
self.fill_val,
],
device=self.device,
)
wp.launch(
kernel=threshold_3d,
dim=self.dim,
inputs=[
self.permeability,
0.0,
self.min_permeability,
self.max_permeability,
],
device=self.device,
)
# zero darcy arrays
self.darcy0.zero_()
self.darcy1.zero_()
def batch_generator(self) -> Tuple[Tensor, Tensor]:
# run simulation
self.generate_batch()
# convert warp arrays to pytorch
permeability = wp.to_torch(self.permeability)
darcy = wp.to_torch(self.darcy0)
# add channel dims
permeability = torch.unsqueeze(permeability, axis=1)
darcy = torch.unsqueeze(darcy, axis=1)
# crop edges by 1 from multi-grid
permeability = permeability[:, :, : self.resolution, : self.resolution]
darcy = darcy[:, :, : self.resolution, : self.resolution]
# normalize values
if self.normaliser is not None:
permeability = (
permeability - self.normaliser["permeability"][0]
) / self.normaliser["permeability"][1]
darcy = (darcy - self.normaliser["darcy"][0]) / self.normaliser["darcy"][1]
# CUDA graphs static copies
if self.output_k is None:
self.output_k = permeability
self.output_p = darcy
else:
self.output_k.data.copy_(permeability)
self.output_p.data.copy_(darcy)
return {"permeability": self.output_k, "darcy": self.output_p}
def __iter__(self) -> Tuple[Tensor, Tensor, Tensor]:
"""
Yields
------
Iterator[Tuple[Tensor, Tensor, Tensor]]
Infinite iterator that returns a batch of (permeability, darcy pressure)
fields of size [batch, resolution, resolution]
"""
# infinite generator
while True:
batch = self.batch_generator()
batch["inset_pos"] = wp.to_torch(self.bounds)
yield batch
| modulus-launch-main | examples/cfd/darcy_nested_fnos/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from pydantic import BaseModel
from typing import Tuple, Optional
class Constants(BaseModel):
"""vortex shedding constants"""
# data configs
data_dir: str = "./raw_dataset/cylinder_flow/cylinder_flow"
# training configs
batch_size: int = 1
epochs: int = 25
num_training_samples: int = 400
num_training_time_steps: int = 300
lr: float = 0.0001
lr_decay_rate: float = 0.9999991
num_input_features: int = 6
num_output_features: int = 3
num_edge_features: int = 3
ckpt_path: str = "checkpoints"
ckpt_name: str = "model.pt"
# performance configs
amp: bool = False
jit: bool = False
# test & visualization configs
num_test_samples: int = 10
num_test_time_steps: int = 300
viz_vars: Tuple[str, ...] = ("u", "v", "p")
frame_skip: int = 10
frame_interval: int = 1
# wb configs
wandb_mode: str = "disabled"
watch_model: bool = False
| modulus-launch-main | examples/cfd/vortex_shedding_mgn/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from dgl.dataloading import GraphDataLoader
from torch.cuda.amp import autocast, GradScaler
from torch.nn.parallel import DistributedDataParallel
import time, os
import wandb as wb
try:
import apex
except:
pass
from modulus.models.meshgraphnet import MeshGraphNet
from modulus.datapipes.gnn.vortex_shedding_dataset import VortexSheddingDataset
from modulus.distributed.manager import DistributedManager
from modulus.launch.logging import (
PythonLogger,
initialize_wandb,
RankZeroLoggingWrapper,
)
from modulus.launch.utils import load_checkpoint, save_checkpoint
from constants import Constants
# Instantiate constants
C = Constants()
class MGNTrainer:
def __init__(self, wb, dist, rank_zero_logger):
self.dist = dist
# instantiate dataset
dataset = VortexSheddingDataset(
name="vortex_shedding_train",
data_dir=C.data_dir,
split="train",
num_samples=C.num_training_samples,
num_steps=C.num_training_time_steps,
)
# instantiate dataloader
self.dataloader = GraphDataLoader(
dataset,
batch_size=C.batch_size,
shuffle=True,
drop_last=True,
pin_memory=True,
use_ddp=dist.world_size > 1,
)
# instantiate the model
self.model = MeshGraphNet(
C.num_input_features, C.num_edge_features, C.num_output_features
)
if C.jit:
self.model = torch.jit.script(self.model).to(dist.device)
else:
self.model = self.model.to(dist.device)
if C.watch_model and not C.jit and dist.rank == 0:
wb.watch(self.model)
# distributed data parallel for multi-node training
if dist.world_size > 1:
self.model = DistributedDataParallel(
self.model,
device_ids=[dist.local_rank],
output_device=dist.device,
broadcast_buffers=dist.broadcast_buffers,
find_unused_parameters=dist.find_unused_parameters,
)
# enable train mode
self.model.train()
# instantiate loss, optimizer, and scheduler
self.criterion = torch.nn.MSELoss()
try:
self.optimizer = apex.optimizers.FusedAdam(self.model.parameters(), lr=C.lr)
rank_zero_logger.info("Using FusedAdam optimizer")
except:
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=C.lr)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(
self.optimizer, lr_lambda=lambda epoch: C.lr_decay_rate**epoch
)
self.scaler = GradScaler()
# load checkpoint
if dist.world_size > 1:
torch.distributed.barrier()
self.epoch_init = load_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=self.model,
optimizer=self.optimizer,
scheduler=self.scheduler,
scaler=self.scaler,
device=dist.device,
)
def train(self, graph):
graph = graph.to(self.dist.device)
self.optimizer.zero_grad()
loss = self.forward(graph)
self.backward(loss)
self.scheduler.step()
return loss
def forward(self, graph):
# forward pass
with autocast(enabled=C.amp):
pred = self.model(graph.ndata["x"], graph.edata["x"], graph)
loss = self.criterion(pred, graph.ndata["y"])
return loss
def backward(self, loss):
# backward pass
if C.amp:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
self.optimizer.step()
if __name__ == "__main__":
# initialize distributed manager
DistributedManager.initialize()
dist = DistributedManager()
# save constants to JSON file
if dist.rank == 0:
os.makedirs(C.ckpt_path, exist_ok=True)
with open(
os.path.join(C.ckpt_path, C.ckpt_name.replace(".pt", ".json")), "w"
) as json_file:
json_file.write(C.json(indent=4))
# initialize loggers
initialize_wandb(
project="Modulus-Launch",
entity="Modulus",
name="Vortex_Shedding-Training",
group="Vortex_Shedding-DDP-Group",
mode=C.wandb_mode,
) # Wandb logger
logger = PythonLogger("main") # General python logger
rank_zero_logger = RankZeroLoggingWrapper(logger, dist) # Rank 0 logger
logger.file_logging()
trainer = MGNTrainer(wb, dist, rank_zero_logger)
start = time.time()
rank_zero_logger.info("Training started...")
for epoch in range(trainer.epoch_init, C.epochs):
for graph in trainer.dataloader:
loss = trainer.train(graph)
rank_zero_logger.info(
f"epoch: {epoch}, loss: {loss:10.3e}, time per epoch: {(time.time()-start):10.3e}"
)
wb.log({"loss": loss.detach().cpu()})
# save checkpoint
if dist.world_size > 1:
torch.distributed.barrier()
if dist.rank == 0:
save_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=trainer.model,
optimizer=trainer.optimizer,
scheduler=trainer.scheduler,
scaler=trainer.scaler,
epoch=epoch,
)
logger.info(f"Saved model on rank {dist.rank}")
start = time.time()
rank_zero_logger.info("Training completed!")
| modulus-launch-main | examples/cfd/vortex_shedding_mgn/train.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch, dgl
from dgl.dataloading import GraphDataLoader
import torch
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import animation
from matplotlib import tri as mtri
import os
from matplotlib.patches import Rectangle
from modulus.models.meshgraphnet import MeshGraphNet
from modulus.datapipes.gnn.vortex_shedding_dataset import VortexSheddingDataset
from modulus.launch.logging import PythonLogger
from modulus.launch.utils import load_checkpoint
from constants import Constants
# Instantiate constants
C = Constants()
class MGNRollout:
def __init__(self, logger):
# set device
self.device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using {self.device} device")
# instantiate dataset
self.dataset = VortexSheddingDataset(
name="vortex_shedding_test",
data_dir=C.data_dir,
split="test",
num_samples=C.num_test_samples,
num_steps=C.num_test_time_steps,
)
# instantiate dataloader
self.dataloader = GraphDataLoader(
self.dataset,
batch_size=1, # TODO add support for batch_size > 1
shuffle=False,
drop_last=False,
)
# instantiate the model
self.model = MeshGraphNet(
C.num_input_features, C.num_edge_features, C.num_output_features
)
if C.jit:
self.model = torch.jit.script(self.model).to(self.device)
else:
self.model = self.model.to(self.device)
# enable train mode
self.model.eval()
# load checkpoint
_ = load_checkpoint(
os.path.join(C.ckpt_path, C.ckpt_name),
models=self.model,
device=self.device,
)
self.var_identifier = {"u": 0, "v": 1, "p": 2}
def predict(self):
self.pred, self.exact, self.faces, self.graphs = [], [], [], []
stats = {
key: value.to(self.device) for key, value in self.dataset.node_stats.items()
}
for i, (graph, cells, mask) in enumerate(self.dataloader):
graph = graph.to(self.device)
# denormalize data
graph.ndata["x"][:, 0:2] = self.dataset.denormalize(
graph.ndata["x"][:, 0:2], stats["velocity_mean"], stats["velocity_std"]
)
graph.ndata["y"][:, 0:2] = self.dataset.denormalize(
graph.ndata["y"][:, 0:2],
stats["velocity_diff_mean"],
stats["velocity_diff_std"],
)
graph.ndata["y"][:, [2]] = self.dataset.denormalize(
graph.ndata["y"][:, [2]],
stats["pressure_mean"],
stats["pressure_std"],
)
# inference step
invar = graph.ndata["x"].clone()
if i % (C.num_test_time_steps - 1) != 0:
invar[:, 0:2] = self.pred[i - 1][:, 0:2].clone()
i += 1
invar[:, 0:2] = self.dataset.normalize_node(
invar[:, 0:2], stats["velocity_mean"], stats["velocity_std"]
)
pred_i = self.model(invar, graph.edata["x"], graph).detach() # predict
# denormalize prediction
pred_i[:, 0:2] = self.dataset.denormalize(
pred_i[:, 0:2], stats["velocity_diff_mean"], stats["velocity_diff_std"]
)
pred_i[:, 2] = self.dataset.denormalize(
pred_i[:, 2], stats["pressure_mean"], stats["pressure_std"]
)
invar[:, 0:2] = self.dataset.denormalize(
invar[:, 0:2], stats["velocity_mean"], stats["velocity_std"]
)
# do not update the "wall_boundary" & "outflow" nodes
mask = torch.cat((mask, mask), dim=-1).to(self.device)
pred_i[:, 0:2] = torch.where(
mask, pred_i[:, 0:2], torch.zeros_like(pred_i[:, 0:2])
)
# integration
self.pred.append(
torch.cat(
((pred_i[:, 0:2] + invar[:, 0:2]), pred_i[:, [2]]), dim=-1
).cpu()
)
self.exact.append(
torch.cat(
(
(graph.ndata["y"][:, 0:2] + graph.ndata["x"][:, 0:2]),
graph.ndata["y"][:, [2]],
),
dim=-1,
).cpu()
)
self.faces.append(torch.squeeze(cells).numpy())
self.graphs.append(graph.cpu())
def init_animation(self, idx):
self.pred_i = [var[:, idx] for var in self.pred]
self.exact_i = [var[:, idx] for var in self.exact]
# fig configs
plt.rcParams["image.cmap"] = "inferno"
self.fig, self.ax = plt.subplots(2, 1, figsize=(16, 9))
# Set background color to black
self.fig.set_facecolor("black")
self.ax[0].set_facecolor("black")
self.ax[1].set_facecolor("black")
# make animations dir
if not os.path.exists("./animations"):
os.makedirs("./animations")
def animate(self, num):
num *= C.frame_skip
graph = self.graphs[num]
y_star = self.pred_i[num].numpy()
y_exact = self.exact_i[num].numpy()
triang = mtri.Triangulation(
graph.ndata["mesh_pos"][:, 0].numpy(),
graph.ndata["mesh_pos"][:, 1].numpy(),
self.faces[num],
)
self.ax[0].cla()
self.ax[0].set_aspect("equal")
self.ax[0].set_axis_off()
navy_box = Rectangle((0, 0), 1.4, 0.4, facecolor="navy")
self.ax[0].add_patch(navy_box) # Add a navy box to the first subplot
self.ax[0].tripcolor(triang, y_star, vmin=np.min(y_star), vmax=np.max(y_star))
self.ax[0].triplot(triang, "ko-", ms=0.5, lw=0.3)
self.ax[0].set_title("Modulus MeshGraphNet Prediction", color="white")
self.ax[1].cla()
self.ax[1].set_aspect("equal")
self.ax[1].set_axis_off()
navy_box = Rectangle((0, 0), 1.4, 0.4, facecolor="navy")
self.ax[1].add_patch(navy_box) # Add a navy box to the second subplot
self.ax[1].tripcolor(
triang, y_exact, vmin=np.min(y_exact), vmax=np.max(y_exact)
)
self.ax[1].triplot(triang, "ko-", ms=0.5, lw=0.3)
self.ax[1].set_title("Ground Truth", color="white")
# Adjust subplots to minimize empty space
self.ax[0].set_aspect("auto", adjustable="box")
self.ax[1].set_aspect("auto", adjustable="box")
self.ax[0].autoscale(enable=True, tight=True)
self.ax[1].autoscale(enable=True, tight=True)
self.fig.subplots_adjust(
left=0.05, bottom=0.05, right=0.95, top=0.95, wspace=0.1, hspace=0.2
)
return self.fig
if __name__ == "__main__":
logger = PythonLogger("main") # General python logger
logger.file_logging()
logger.info("Rollout started...")
rollout = MGNRollout(logger)
idx = [rollout.var_identifier[k] for k in C.viz_vars]
rollout.predict()
for i in idx:
rollout.init_animation(i)
ani = animation.FuncAnimation(
rollout.fig,
rollout.animate,
frames=len(rollout.graphs) // C.frame_skip,
interval=C.frame_interval,
)
ani.save("animations/animation_" + C.viz_vars[i] + ".gif")
logger.info(f"Created animation for {C.viz_vars[i]}")
| modulus-launch-main | examples/cfd/vortex_shedding_mgn/inference.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
import os
import numpy as np
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
opt = TestOptions().parse(save=False)
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
if opt.dataset_mode == 'temporal':
opt.dataset_mode = 'test'
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
model = create_model(opt)
visualizer = Visualizer(opt)
input_nc = 1 if opt.label_nc != 0 else opt.input_nc
save_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
print('Doing %d frames' % len(dataset))
for i, data in enumerate(dataset):
if i >= opt.how_many:
break
if data['change_seq']:
model.fake_B_prev = None
_, _, height, width = data['A'].size()
A = Variable(data['A']).view(1, -1, input_nc, height, width)
B = Variable(data['B']).view(1, -1, opt.output_nc, height, width) if len(data['B'].size()) > 2 else None
inst = Variable(data['inst']).view(1, -1, 1, height, width) if len(data['inst'].size()) > 2 else None
generated = model.inference(A, B, inst)
if opt.label_nc != 0:
real_A = util.tensor2label(generated[1], opt.label_nc)
else:
c = 3 if opt.input_nc == 3 else 1
real_A = util.tensor2im(generated[1][:c], normalize=False)
visual_list = [('real_A', real_A),
('fake_B', util.tensor2im(generated[0].data[0]))]
visuals = OrderedDict(visual_list)
img_path = data['A_path']
print('process image... %s' % img_path)
visualizer.save_images(save_dir, visuals, img_path) | vid2vid-master | test.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import time
import os
import torch
from subprocess import call
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model, create_optimizer, init_params, save_models, update_models
import util.util as util
from util.visualizer import Visualizer
def train():
opt = TrainOptions().parse()
if opt.debug:
opt.display_freq = 1
opt.print_freq = 1
opt.nThreads = 1
### initialize dataset
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training videos = %d' % dataset_size)
### initialize models
models = create_model(opt)
modelG, modelD, flowNet, optimizer_G, optimizer_D, optimizer_D_T = create_optimizer(opt, models)
### set parameters
n_gpus, tG, tD, tDB, s_scales, t_scales, input_nc, output_nc, \
start_epoch, epoch_iter, print_freq, total_steps, iter_path = init_params(opt, modelG, modelD, data_loader)
visualizer = Visualizer(opt)
### real training starts here
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
for idx, data in enumerate(dataset, start=epoch_iter):
if total_steps % print_freq == 0:
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == 0
n_frames_total, n_frames_load, t_len = data_loader.dataset.init_data_params(data, n_gpus, tG)
fake_B_prev_last, frames_all = data_loader.dataset.init_data(t_scales)
for i in range(0, n_frames_total, n_frames_load):
input_A, input_B, inst_A = data_loader.dataset.prepare_data(data, i, input_nc, output_nc)
###################################### Forward Pass ##########################
####### generator
fake_B, fake_B_raw, flow, weight, real_A, real_Bp, fake_B_last = modelG(input_A, input_B, inst_A, fake_B_prev_last)
####### discriminator
### individual frame discriminator
real_B_prev, real_B = real_Bp[:, :-1], real_Bp[:, 1:] # the collection of previous and current real frames
flow_ref, conf_ref = flowNet(real_B, real_B_prev) # reference flows and confidences
fake_B_prev = modelG.module.compute_fake_B_prev(real_B_prev, fake_B_prev_last, fake_B)
fake_B_prev_last = fake_B_last
losses = modelD(0, reshape([real_B, fake_B, fake_B_raw, real_A, real_B_prev, fake_B_prev, flow, weight, flow_ref, conf_ref]))
losses = [ torch.mean(x) if x is not None else 0 for x in losses ]
loss_dict = dict(zip(modelD.module.loss_names, losses))
### temporal discriminator
# get skipped frames for each temporal scale
frames_all, frames_skipped = modelD.module.get_all_skipped_frames(frames_all, \
real_B, fake_B, flow_ref, conf_ref, t_scales, tD, n_frames_load, i, flowNet)
# run discriminator for each temporal scale
loss_dict_T = []
for s in range(t_scales):
if frames_skipped[0][s] is not None:
losses = modelD(s+1, [frame_skipped[s] for frame_skipped in frames_skipped])
losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
loss_dict_T.append(dict(zip(modelD.module.loss_names_T, losses)))
# collect losses
loss_G, loss_D, loss_D_T, t_scales_act = modelD.module.get_losses(loss_dict, loss_dict_T, t_scales)
###################################### Backward Pass #################################
# update generator weights
loss_backward(opt, loss_G, optimizer_G)
# update individual discriminator weights
loss_backward(opt, loss_D, optimizer_D)
# update temporal discriminator weights
for s in range(t_scales_act):
loss_backward(opt, loss_D_T[s], optimizer_D_T[s])
if i == 0: fake_B_first = fake_B[0, 0] # the first generated image in this sequence
if opt.debug:
call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
############## Display results and errors ##########
### print out errors
if total_steps % print_freq == 0:
t = (time.time() - iter_start_time) / print_freq
errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}
for s in range(len(loss_dict_T)):
errors.update({k+str(s): v.data.item() if not isinstance(v, int) else v for k, v in loss_dict_T[s].items()})
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
visuals = util.save_all_tensors(opt, real_A, fake_B, fake_B_first, fake_B_raw, real_B, flow_ref, conf_ref, flow, weight, modelD)
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
save_models(opt, epoch, epoch_iter, total_steps, visualizer, iter_path, modelG, modelD)
if epoch_iter > dataset_size - opt.batchSize:
epoch_iter = 0
break
# end of epoch
iter_end_time = time.time()
visualizer.vis_print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch and update model params
save_models(opt, epoch, epoch_iter, total_steps, visualizer, iter_path, modelG, modelD, end_of_epoch=True)
update_models(opt, epoch, modelG, modelD, data_loader)
def loss_backward(opt, loss, optimizer):
optimizer.zero_grad()
if opt.fp16:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
def reshape(tensors):
if tensors is None: return None
if isinstance(tensors, list):
return [reshape(tensor) for tensor in tensors]
_, _, ch, h, w = tensors.size()
return tensors.contiguous().view(-1, ch, h, w)
if __name__ == "__main__":
train() | vid2vid-master | train.py |
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=300, help='how many test images to run')
self.parser.add_argument('--use_real_img', action='store_true', help='use real image for first frame')
self.parser.add_argument('--start_frame', type=int, default=0, help='frame index to start inference on')
self.isTrain = False
| vid2vid-master | options/test_options.py |
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--niter', type=int, default=10, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=10, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
self.parser.add_argument('--TTUR', action='store_true', help='Use TTUR training scheme')
self.parser.add_argument('--gan_mode', type=str, default='ls', help='(ls|original|hinge)')
self.parser.add_argument('--pool_size', type=int, default=1, help='the size of image buffer that stores previously generated images')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# for discriminators
self.parser.add_argument('--num_D', type=int, default=2, help='number of patch scales in each discriminator')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='number of layers in discriminator')
self.parser.add_argument('--no_vgg', action='store_true', help='do not use VGG feature matching loss')
self.parser.add_argument('--no_ganFeat', action='store_true', help='do not match discriminator features')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching')
self.parser.add_argument('--sparse_D', action='store_true', help='use sparse temporal discriminators to save memory')
# for temporal
self.parser.add_argument('--lambda_T', type=float, default=10.0, help='weight for temporal loss')
self.parser.add_argument('--lambda_F', type=float, default=10.0, help='weight for flow loss')
self.parser.add_argument('--n_frames_D', type=int, default=3, help='number of frames to feed into temporal discriminator')
self.parser.add_argument('--n_scales_temporal', type=int, default=2, help='number of temporal scales in the temporal discriminator')
self.parser.add_argument('--max_frames_per_gpu', type=int, default=1, help='max number of frames to load into one GPU at a time')
self.parser.add_argument('--max_frames_backpropagate', type=int, default=1, help='max number of frames to backpropagate')
self.parser.add_argument('--max_t_step', type=int, default=1, help='max spacing between neighboring sampled frames. If greater than 1, the network may randomly skip frames during training.')
self.parser.add_argument('--n_frames_total', type=int, default=30, help='the overall number of frames in a sequence to train with')
self.parser.add_argument('--niter_step', type=int, default=5, help='how many epochs do we change training batch size again')
self.parser.add_argument('--niter_fix_global', type=int, default=0, help='if specified, only train the finest spatial layer for the given iterations')
self.isTrain = True
| vid2vid-master | options/train_options.py |
vid2vid-master | options/__init__.py |
|
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
self.parser.add_argument('--dataroot', type=str, default='datasets/Cityscapes/')
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=512, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--label_nc', type=int, default=0, help='number of labels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
# network arch
self.parser.add_argument('--netG', type=str, default='composite', help='selects model to use for netG')
self.parser.add_argument('--ngf', type=int, default=128, help='# of gen filters in first conv layer')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--n_blocks', type=int, default=9, help='number of resnet blocks in generator')
self.parser.add_argument('--n_downsample_G', type=int, default=3, help='number of downsampling layers in netG')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--n_gpus_gen', type=int, default=-1, help='how many gpus are used for generator (the rest are used for discriminator). -1 means use all gpus')
self.parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--dataset_mode', type=str, default='temporal', help='chooses how datasets are loaded. [unaligned | aligned | single]')
self.parser.add_argument('--model', type=str, default='vid2vid', help='chooses which model to use. vid2vid, test')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--display_winsize', type=int, default=512, help='display window size')
self.parser.add_argument('--display_id', type=int, default=0, help='window id of the web display')
self.parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
self.parser.add_argument('--resize_or_crop', type=str, default='scaleWidth', help='scaling and cropping of images at load time [resize_and_crop|crop|scaledCrop|scaleWidth|scaleWidth_and_crop|scaleWidth_and_scaledCrop|scaleHeight|scaleHeight_and_crop] etc')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
# more features as input
self.parser.add_argument('--use_instance', action='store_true', help='if specified, add instance map as feature for class A')
self.parser.add_argument('--label_feat', action='store_true', help='if specified, encode label features as input')
self.parser.add_argument('--feat_num', type=int, default=3, help='number of encoded features')
self.parser.add_argument('--nef', type=int, default=32, help='# of encoder filters in first conv layer')
self.parser.add_argument('--load_features', action='store_true', help='if specified, load precomputed feature maps')
self.parser.add_argument('--netE', type=str, default='simple', help='which model to use for encoder')
self.parser.add_argument('--n_downsample_E', type=int, default=3, help='number of downsampling layers in netE')
# for cascaded resnet
self.parser.add_argument('--n_blocks_local', type=int, default=3, help='number of resnet blocks in outmost multiscale resnet')
self.parser.add_argument('--n_local_enhancers', type=int, default=1, help='number of cascaded layers')
# temporal
self.parser.add_argument('--n_frames_G', type=int, default=3, help='number of input frames to feed into generator, i.e., n_frames_G-1 is the number of frames we look into past')
self.parser.add_argument('--n_scales_spatial', type=int, default=1, help='number of spatial scales in the coarse-to-fine generator')
self.parser.add_argument('--no_first_img', action='store_true', help='if specified, generator also tries to synthesize first image')
self.parser.add_argument('--use_single_G', action='store_true', help='if specified, use single frame generator for the first frame')
self.parser.add_argument('--fg', action='store_true', help='if specified, use foreground-background seperation model')
self.parser.add_argument('--fg_labels', type=str, default='26', help='label indices for foreground objects')
self.parser.add_argument('--no_flow', action='store_true', help='if specified, do not use flow warping and directly synthesize frames')
# face specific
self.parser.add_argument('--no_canny_edge', action='store_true', help='do *not* use canny edge as input')
self.parser.add_argument('--no_dist_map', action='store_true', help='do *not* use distance transform map as input')
self.parser.add_argument('--random_scale_points', action='store_true', help='randomly scale face keypoints a bit to create different results')
# pose specific
self.parser.add_argument('--densepose_only', action='store_true', help='use only densepose as input')
self.parser.add_argument('--openpose_only', action='store_true', help='use only openpose as input')
self.parser.add_argument('--add_face_disc', action='store_true', help='add face discriminator')
self.parser.add_argument('--remove_face_labels', action='store_true', help='remove face labels to better adapt to different face shapes')
self.parser.add_argument('--random_drop_prob', type=float, default=0.05, help='the probability to randomly drop each pose segment during training')
self.parser.add_argument('--basic_point_only', action='store_true', help='only use basic joint keypoints for openpose, without hand or face keypoints')
# miscellaneous
self.parser.add_argument('--load_pretrain', type=str, default='', help='if specified, load the pretrained model')
self.parser.add_argument('--debug', action='store_true', help='if specified, use small dataset for debug')
self.parser.add_argument('--fp16', action='store_true', default=False, help='train with AMP')
self.parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
self.initialized = True
def parse_str(self, ids):
str_ids = ids.split(',')
ids_list = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
ids_list.append(id)
return ids_list
def parse(self, save=True):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
self.opt.fg_labels = self.parse_str(self.opt.fg_labels)
self.opt.gpu_ids = self.parse_str(self.opt.gpu_ids)
if self.opt.n_gpus_gen == -1:
self.opt.n_gpus_gen = len(self.opt.gpu_ids)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
if save:
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| vid2vid-master | options/base_options.py |
import random
import numpy as np
import torch
from torch.autograd import Variable
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| vid2vid-master | util/image_pool.py |
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import inspect, re
import numpy as np
import os
import collections
from PIL import Image
import cv2
from collections import OrderedDict
def save_all_tensors(opt, real_A, fake_B, fake_B_first, fake_B_raw, real_B, flow_ref, conf_ref, flow, weight, modelD):
if opt.label_nc != 0:
input_image = tensor2label(real_A, opt.label_nc)
elif opt.dataset_mode == 'pose':
input_image = tensor2im(real_A)
if real_A.size()[2] == 6:
input_image2 = tensor2im(real_A[0, -1, 3:])
input_image[input_image2 != 0] = input_image2[input_image2 != 0]
else:
c = 3 if opt.input_nc >= 3 else 1
input_image = tensor2im(real_A[0, -1, :c], normalize=False)
if opt.use_instance:
edges = tensor2im(real_A[0, -1, -1:], normalize=False)
input_image += edges[:,:,np.newaxis]
if opt.add_face_disc:
ys, ye, xs, xe = modelD.module.get_face_region(real_A[0, -1:])
if ys is not None:
input_image[ys, xs:xe, :] = input_image[ye, xs:xe, :] = input_image[ys:ye, xs, :] = input_image[ys:ye, xe, :] = 255
visual_list = [('input_image', input_image),
('fake_image', tensor2im(fake_B)),
('fake_first_image', tensor2im(fake_B_first)),
('fake_raw_image', tensor2im(fake_B_raw)),
('real_image', tensor2im(real_B)),
('flow_ref', tensor2flow(flow_ref)),
('conf_ref', tensor2im(conf_ref, normalize=False))]
if flow is not None:
visual_list += [('flow', tensor2flow(flow)),
('weight', tensor2im(weight, normalize=False))]
visuals = OrderedDict(visual_list)
return visuals
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
if isinstance(image_tensor, torch.autograd.Variable):
image_tensor = image_tensor.data
if len(image_tensor.size()) == 5:
image_tensor = image_tensor[0, -1]
if len(image_tensor.size()) == 4:
image_tensor = image_tensor[0]
image_tensor = image_tensor[:3]
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
#image_numpy = (np.transpose(image_numpy, (1, 2, 0)) * std + mean) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1:
image_numpy = image_numpy[:,:,0]
return image_numpy.astype(imtype)
def tensor2label(output, n_label, imtype=np.uint8):
if isinstance(output, torch.autograd.Variable):
output = output.data
if len(output.size()) == 5:
output = output[0, -1]
if len(output.size()) == 4:
output = output[0]
output = output.cpu().float()
if output.size()[0] > 1:
output = output.max(0, keepdim=True)[1]
#print(output.size())
output = Colorize(n_label)(output)
output = np.transpose(output.numpy(), (1, 2, 0))
#img = Image.fromarray(output, "RGB")
return output.astype(imtype)
def tensor2flow(output, imtype=np.uint8):
if isinstance(output, torch.autograd.Variable):
output = output.data
if len(output.size()) == 5:
output = output[0, -1]
if len(output.size()) == 4:
output = output[0]
output = output.cpu().float().numpy()
output = np.transpose(output, (1, 2, 0))
#mag = np.max(np.sqrt(output[:,:,0]**2 + output[:,:,1]**2))
#print(mag)
hsv = np.zeros((output.shape[0], output.shape[1], 3), dtype=np.uint8)
hsv[:, :, 0] = 255
hsv[:, :, 1] = 255
mag, ang = cv2.cartToPolar(output[..., 0], output[..., 1])
hsv[..., 0] = ang * 180 / np.pi / 2
hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return rgb
def add_dummy_to_tensor(tensors, add_size=0):
if add_size == 0 or tensors is None: return tensors
if isinstance(tensors, list):
return [add_dummy_to_tensor(tensor, add_size) for tensor in tensors]
if isinstance(tensors, torch.Tensor):
dummy = torch.zeros_like(tensors)[:add_size]
tensors = torch.cat([dummy, tensors])
return tensors
def remove_dummy_from_tensor(tensors, remove_size=0):
if remove_size == 0 or tensors is None: return tensors
if isinstance(tensors, list):
return [remove_dummy_from_tensor(tensor, remove_size) for tensor in tensors]
if isinstance(tensors, torch.Tensor):
tensors = tensors[remove_size:]
return tensors
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def print_numpy(x, val=True, shp=False):
x = x.astype(np.float64)
if shp:
print('shape,', x.shape)
if val:
x = x.flatten()
print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def labelcolormap(N):
if N == 35: # Cityscapes train
cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81),
(128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
(180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0),
(107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70),
( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)],
dtype=np.uint8)
elif N == 20: # Cityscapes eval
cmap = np.array([(128, 64,128), (244, 35,232), ( 70, 70, 70), (102,102,156), (190,153,153), (153,153,153), (250,170, 30),
(220,220, 0), (107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142),
( 0, 0, 70), ( 0, 60,100), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0, 0)],
dtype=np.uint8)
else:
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r, g, b = 0, 0, 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7-j))
g = g ^ (np.uint8(str_id[-2]) << (7-j))
b = b ^ (np.uint8(str_id[-3]) << (7-j))
id = id >> 3
cmap[i, 0], cmap[i, 1], cmap[i, 2] = r, g, b
return cmap
def colormap(n):
cmap = np.zeros([n, 3]).astype(np.uint8)
for i in np.arange(n):
r, g, b = np.zeros(3)
for j in np.arange(8):
r = r + (1 << (7-j))*((i & (1 << (3*j))) >> (3*j))
g = g + (1 << (7-j))*((i & (1 << (3*j+1))) >> (3*j+1))
b = b + (1 << (7-j))*((i & (1 << (3*j+2))) >> (3*j+2))
cmap[i, :] = np.array([r, g, b])
return cmap
class Colorize(object):
def __init__(self, n=35):
self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image | vid2vid-master | util/util.py |
import dominate
from dominate.tags import *
import os
class HTML:
def __init__(self, web_dir, title, reflesh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
# print(self.img_dir)
self.doc = dominate.document(title=title)
if reflesh > 0:
with self.doc.head:
meta(http_equiv="reflesh", content=str(reflesh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style="table-layout: fixed;")
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=400, height=0):
self.add_table()
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
if height != 0:
img(style="width:%dpx;height:%dpx" % (width, height), src=os.path.join('images', im))
else:
img(style="width:%dpx" % (width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__':
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims = []
txts = []
links = []
for n in range(4):
ims.append('image_%d.jpg' % n)
txts.append('text_%d' % n)
links.append('image_%d.jpg' % n)
html.add_images(ims, txts, links)
html.save()
| vid2vid-master | util/html.py |
vid2vid-master | util/__init__.py |
|
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import numpy as np
import os
import time
from . import util
from . import html
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Visualizer():
def __init__(self, opt):
self.opt = opt
self.tf_log = opt.tf_log
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
if self.tf_log:
import tensorflow as tf
self.tf = tf
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs')
self.writer = tf.summary.FileWriter(self.log_dir)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, step):
if self.tf_log: # show images in tensorboard output
img_summaries = []
for label, image_numpy in visuals.items():
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(image_numpy).save(s, format="jpeg")
# Create an Image object
img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1])
# Create a Summary value
img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum))
# Create and write Summary
summary = self.tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
if self.use_html: # save images to a html file
for label, image_numpy in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i))
util.save_image(image_numpy[i], img_path)
else:
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i)
ims.append(img_path)
txts.append(label+str(i))
links.append(img_path)
else:
img_path = 'epoch%.3d_%s.jpg' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
if len(ims) < 6:
webpage.add_images(ims, txts, links, width=self.win_size)
else:
num = int(round(len(ims)/2.0))
webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size)
webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, errors, step):
if self.tf_log:
for tag, value in errors.items():
summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in sorted(errors.items()):
if v != 0:
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
# save image to the disk
def save_images(self, image_dir, visuals, image_path, webpage=None):
dirname = os.path.basename(os.path.dirname(image_path[0]))
image_dir = os.path.join(image_dir, dirname)
util.mkdir(image_dir)
name = os.path.basename(image_path[0])
name = os.path.splitext(name)[0]
if webpage is not None:
webpage.add_header(name)
ims, txts, links = [], [], []
for label, image_numpy in visuals.items():
save_ext = 'png' if 'real_A' in label and self.opt.label_nc != 0 else 'jpg'
image_name = '%s_%s.%s' % (label, name, save_ext)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
if webpage is not None:
ims.append(image_name)
txts.append(label)
links.append(image_name)
if webpage is not None:
webpage.add_images(ims, txts, links, width=self.win_size)
def vis_print(self, message):
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
| vid2vid-master | util/visualizer.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import numpy as np
import math
import torch
import torch.nn.functional as F
import os
import sys
from collections import OrderedDict
from torch.autograd import Variable
import util.util as util
from .base_model import BaseModel
from . import networks
class Vid2VidModelG(BaseModel):
def name(self):
return 'Vid2VidModelG'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
if not opt.debug:
torch.backends.cudnn.benchmark = True
# define net G
self.n_scales = opt.n_scales_spatial
self.use_single_G = opt.use_single_G
self.split_gpus = (self.opt.n_gpus_gen < len(self.opt.gpu_ids)) and (self.opt.batchSize == 1)
input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc
netG_input_nc = input_nc * opt.n_frames_G
if opt.use_instance:
netG_input_nc += opt.n_frames_G
prev_output_nc = (opt.n_frames_G - 1) * opt.output_nc
if opt.openpose_only:
opt.no_flow = True
self.netG0 = networks.define_G(netG_input_nc, opt.output_nc, prev_output_nc, opt.ngf, opt.netG,
opt.n_downsample_G, opt.norm, 0, self.gpu_ids, opt)
for s in range(1, self.n_scales):
ngf = opt.ngf // (2**s)
setattr(self, 'netG'+str(s), networks.define_G(netG_input_nc, opt.output_nc, prev_output_nc, ngf, opt.netG+'Local',
opt.n_downsample_G, opt.norm, s, self.gpu_ids, opt))
print('---------- Networks initialized -------------')
print('-----------------------------------------------')
# load networks
if not self.isTrain or opt.continue_train or opt.load_pretrain:
for s in range(self.n_scales):
self.load_network(getattr(self, 'netG'+str(s)), 'G'+str(s), opt.which_epoch, opt.load_pretrain)
self.netG_i = self.load_single_G() if self.use_single_G else None
# define training variables
if self.isTrain:
self.n_gpus = self.opt.n_gpus_gen if self.opt.batchSize == 1 else 1 # number of gpus for running generator
self.n_frames_bp = 1 # number of frames to backpropagate the loss
self.n_frames_per_gpu = min(self.opt.max_frames_per_gpu, self.opt.n_frames_total // self.n_gpus) # number of frames in each GPU
self.n_frames_load = self.n_gpus * self.n_frames_per_gpu # number of frames in all GPUs
if self.opt.debug:
print('training %d frames at once, using %d gpus, frames per gpu = %d' % (self.n_frames_load,
self.n_gpus, self.n_frames_per_gpu))
# set loss functions and optimizers
if self.isTrain:
self.old_lr = opt.lr
self.finetune_all = opt.niter_fix_global == 0
if not self.finetune_all:
print('------------ Only updating the finest scale for %d epochs -----------' % opt.niter_fix_global)
# initialize optimizer G
params = list(getattr(self, 'netG'+str(self.n_scales-1)).parameters())
if self.finetune_all:
for s in range(self.n_scales-1):
params += list(getattr(self, 'netG'+str(s)).parameters())
if opt.TTUR:
beta1, beta2 = 0, 0.9
lr = opt.lr / 2
else:
beta1, beta2 = opt.beta1, 0.999
lr = opt.lr
self.optimizer_G = torch.optim.Adam(params, lr=lr, betas=(beta1, beta2))
def encode_input(self, input_map, real_image, inst_map=None):
size = input_map.size()
self.bs, tG, self.height, self.width = size[0], size[1], size[3], size[4]
input_map = input_map.data.cuda()
if self.opt.label_nc != 0:
# create one-hot vector for label map
oneHot_size = (self.bs, tG, self.opt.label_nc, self.height, self.width)
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
input_label = input_label.scatter_(2, input_map.long(), 1.0)
input_map = input_label
input_map = Variable(input_map)
if self.opt.use_instance:
inst_map = inst_map.data.cuda()
edge_map = Variable(self.get_edges(inst_map))
input_map = torch.cat([input_map, edge_map], dim=2)
pool_map = None
if self.opt.dataset_mode == 'face':
pool_map = inst_map.data.cuda()
# real images for training
if real_image is not None:
real_image = Variable(real_image.data.cuda())
return input_map, real_image, pool_map
def forward(self, input_A, input_B, inst_A, fake_B_prev, dummy_bs=0):
tG = self.opt.n_frames_G
gpu_split_id = self.opt.n_gpus_gen + 1
if input_A.get_device() == self.gpu_ids[0]:
input_A, input_B, inst_A, fake_B_prev = util.remove_dummy_from_tensor([input_A, input_B, inst_A, fake_B_prev], dummy_bs)
if input_A.size(0) == 0: return self.return_dummy(input_A)
real_A_all, real_B_all, _ = self.encode_input(input_A, input_B, inst_A)
is_first_frame = fake_B_prev is None
if is_first_frame: # at the beginning of a sequence; needs to generate the first frame
fake_B_prev = self.generate_first_frame(real_A_all, real_B_all)
netG = []
for s in range(self.n_scales): # broadcast netG to all GPUs used for generator
netG_s = getattr(self, 'netG'+str(s))
netG_s = torch.nn.parallel.replicate(netG_s, self.opt.gpu_ids[:gpu_split_id]) if self.split_gpus else [netG_s]
netG.append(netG_s)
start_gpu = self.gpu_ids[1] if self.split_gpus else real_A_all.get_device()
fake_B, fake_B_raw, flow, weight = self.generate_frame_train(netG, real_A_all, fake_B_prev, start_gpu, is_first_frame)
fake_B_prev = [B[:, -tG+1:].detach() for B in fake_B]
fake_B = [B[:, tG-1:] for B in fake_B]
return fake_B[0], fake_B_raw, flow, weight, real_A_all[:,tG-1:], real_B_all[:,tG-2:], fake_B_prev
def generate_frame_train(self, netG, real_A_all, fake_B_pyr, start_gpu, is_first_frame):
tG = self.opt.n_frames_G
n_frames_load = self.n_frames_load
n_scales = self.n_scales
finetune_all = self.finetune_all
dest_id = self.gpu_ids[0] if self.split_gpus else start_gpu
### generate inputs
real_A_pyr = self.build_pyr(real_A_all)
fake_Bs_raw, flows, weights = None, None, None
### sequentially generate each frame
for t in range(n_frames_load):
gpu_id = (t // self.n_frames_per_gpu + start_gpu) if self.split_gpus else start_gpu # the GPU idx where we generate this frame
net_id = gpu_id if self.split_gpus else 0 # the GPU idx where the net is located
fake_B_feat = flow_feat = fake_B_fg_feat = None
# coarse-to-fine approach
for s in range(n_scales):
si = n_scales-1-s
### prepare inputs
# 1. input labels
real_As = real_A_pyr[si]
_, _, _, h, w = real_As.size()
real_As_reshaped = real_As[:, t:t+tG,...].view(self.bs, -1, h, w).cuda(gpu_id)
# 2. previous fake_Bs
fake_B_prevs = fake_B_pyr[si][:, t:t+tG-1,...].cuda(gpu_id)
if (t % self.n_frames_bp) == 0:
fake_B_prevs = fake_B_prevs.detach()
fake_B_prevs_reshaped = fake_B_prevs.view(self.bs, -1, h, w)
# 3. mask for foreground and whether to use warped previous image
mask_F = self.compute_mask(real_As, t+tG-1) if self.opt.fg else None
use_raw_only = self.opt.no_first_img and is_first_frame
### network forward
fake_B, flow, weight, fake_B_raw, fake_B_feat, flow_feat, fake_B_fg_feat \
= netG[s][net_id].forward(real_As_reshaped, fake_B_prevs_reshaped, mask_F,
fake_B_feat, flow_feat, fake_B_fg_feat, use_raw_only)
# if only training the finest scale, leave the coarser levels untouched
if s != n_scales-1 and not finetune_all:
fake_B, fake_B_feat = fake_B.detach(), fake_B_feat.detach()
if flow is not None:
flow, flow_feat = flow.detach(), flow_feat.detach()
if fake_B_fg_feat is not None:
fake_B_fg_feat = fake_B_fg_feat.detach()
# collect results into a sequence
fake_B_pyr[si] = self.concat([fake_B_pyr[si], fake_B.unsqueeze(1).cuda(dest_id)], dim=1)
if s == n_scales-1:
fake_Bs_raw = self.concat([fake_Bs_raw, fake_B_raw.unsqueeze(1).cuda(dest_id)], dim=1)
if flow is not None:
flows = self.concat([flows, flow.unsqueeze(1).cuda(dest_id)], dim=1)
weights = self.concat([weights, weight.unsqueeze(1).cuda(dest_id)], dim=1)
return fake_B_pyr, fake_Bs_raw, flows, weights
def inference(self, input_A, input_B, inst_A):
with torch.no_grad():
real_A, real_B, pool_map = self.encode_input(input_A, input_B, inst_A)
self.is_first_frame = not hasattr(self, 'fake_B_prev') or self.fake_B_prev is None
if self.is_first_frame:
self.fake_B_prev = self.generate_first_frame(real_A, real_B, pool_map)
real_A = self.build_pyr(real_A)
self.fake_B_feat = self.flow_feat = self.fake_B_fg_feat = None
for s in range(self.n_scales):
fake_B = self.generate_frame_infer(real_A[self.n_scales-1-s], s)
return fake_B, real_A[0][0, -1]
def generate_frame_infer(self, real_A, s):
tG = self.opt.n_frames_G
_, _, _, h, w = real_A.size()
si = self.n_scales-1-s
netG_s = getattr(self, 'netG'+str(s))
### prepare inputs
real_As_reshaped = real_A[0,:tG].view(1, -1, h, w)
fake_B_prevs_reshaped = self.fake_B_prev[si].view(1, -1, h, w)
mask_F = self.compute_mask(real_A, tG-1)[0] if self.opt.fg else None
use_raw_only = self.opt.no_first_img and self.is_first_frame
### network forward
fake_B, flow, weight, fake_B_raw, self.fake_B_feat, self.flow_feat, self.fake_B_fg_feat \
= netG_s.forward(real_As_reshaped, fake_B_prevs_reshaped, mask_F,
self.fake_B_feat, self.flow_feat, self.fake_B_fg_feat, use_raw_only)
self.fake_B_prev[si] = torch.cat([self.fake_B_prev[si][1:,...], fake_B])
return fake_B
def generate_first_frame(self, real_A, real_B, pool_map=None):
tG = self.opt.n_frames_G
if self.opt.no_first_img: # model also generates first frame
fake_B_prev = Variable(self.Tensor(self.bs, tG-1, self.opt.output_nc, self.height, self.width).zero_())
elif self.opt.isTrain or self.opt.use_real_img: # assume first frame is given
fake_B_prev = real_B[:,:(tG-1),...]
elif self.opt.use_single_G: # use another model (trained on single images) to generate first frame
fake_B_prev = None
if self.opt.use_instance:
real_A = real_A[:,:,:self.opt.label_nc,:,:]
for i in range(tG-1):
feat_map = self.get_face_features(real_B[:,i], pool_map[:,i]) if self.opt.dataset_mode == 'face' else None
fake_B = self.netG_i.forward(real_A[:,i], feat_map).unsqueeze(1)
fake_B_prev = self.concat([fake_B_prev, fake_B], dim=1)
else:
raise ValueError('Please specify the method for generating the first frame')
fake_B_prev = self.build_pyr(fake_B_prev)
if not self.opt.isTrain:
fake_B_prev = [B[0] for B in fake_B_prev]
return fake_B_prev
def return_dummy(self, input_A):
h, w = input_A.size()[3:]
t = self.n_frames_load
tG = self.opt.n_frames_G
flow, weight = (self.Tensor(1, t, 2, h, w), self.Tensor(1, t, 1, h, w)) if not self.opt.no_flow else (None, None)
return self.Tensor(1, t, 3, h, w), self.Tensor(1, t, 3, h, w), flow, weight, \
self.Tensor(1, t, self.opt.input_nc, h, w), self.Tensor(1, t+1, 3, h, w), self.build_pyr(self.Tensor(1, tG-1, 3, h, w))
def load_single_G(self): # load the model that generates the first frame
opt = self.opt
s = self.n_scales
if 'City' in self.opt.dataroot:
single_path = 'checkpoints/label2city_single/'
if opt.loadSize == 512:
load_path = single_path + 'latest_net_G_512.pth'
netG = networks.define_G(35, 3, 0, 64, 'global', 3, 'instance', 0, self.gpu_ids, opt)
elif opt.loadSize == 1024:
load_path = single_path + 'latest_net_G_1024.pth'
netG = networks.define_G(35, 3, 0, 64, 'global', 4, 'instance', 0, self.gpu_ids, opt)
elif opt.loadSize == 2048:
load_path = single_path + 'latest_net_G_2048.pth'
netG = networks.define_G(35, 3, 0, 32, 'local', 4, 'instance', 0, self.gpu_ids, opt)
else:
raise ValueError('Single image generator does not exist')
elif 'face' in self.opt.dataroot:
single_path = 'checkpoints/edge2face_single/'
load_path = single_path + 'latest_net_G.pth'
opt.feat_num = 16
netG = networks.define_G(15, 3, 0, 64, 'global_with_features', 3, 'instance', 0, self.gpu_ids, opt)
encoder_path = single_path + 'latest_net_E.pth'
self.netE = networks.define_G(3, 16, 0, 16, 'encoder', 4, 'instance', 0, self.gpu_ids)
self.netE.load_state_dict(torch.load(encoder_path))
else:
raise ValueError('Single image generator does not exist')
netG.load_state_dict(torch.load(load_path))
return netG
def get_face_features(self, real_image, inst):
feat_map = self.netE.forward(real_image, inst)
#if self.opt.use_encoded_image:
# return feat_map
load_name = 'checkpoints/edge2face_single/features.npy'
features = np.load(load_name, encoding='latin1').item()
inst_np = inst.cpu().numpy().astype(int)
# find nearest neighbor in the training dataset
num_images = features[6].shape[0]
feat_map = feat_map.data.cpu().numpy()
feat_ori = torch.FloatTensor(7, self.opt.feat_num, 1) # feature map for test img (for each facial part)
feat_ref = torch.FloatTensor(7, self.opt.feat_num, num_images) # feature map for training imgs
for label in np.unique(inst_np):
idx = (inst == int(label)).nonzero()
for k in range(self.opt.feat_num):
feat_ori[label,k] = float(feat_map[idx[0,0], idx[0,1] + k, idx[0,2], idx[0,3]])
for m in range(num_images):
feat_ref[label,k,m] = features[label][m,k]
cluster_idx = self.dists_min(feat_ori.expand_as(feat_ref).cuda(), feat_ref.cuda(), num=1)
# construct new feature map from nearest neighbors
feat_map = self.Tensor(inst.size()[0], self.opt.feat_num, inst.size()[2], inst.size()[3])
for label in np.unique(inst_np):
feat = features[label][:,:-1]
idx = (inst == int(label)).nonzero()
for k in range(self.opt.feat_num):
feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[min(cluster_idx, feat.shape[0]-1), k]
return Variable(feat_map)
def compute_mask(self, real_As, ts, te=None): # compute the mask for foreground objects
_, _, _, h, w = real_As.size()
if te is None:
te = ts + 1
mask_F = real_As[:, ts:te, self.opt.fg_labels[0]].clone()
for i in range(1, len(self.opt.fg_labels)):
mask_F = mask_F + real_As[:, ts:te, self.opt.fg_labels[i]]
mask_F = torch.clamp(mask_F, 0, 1)
return mask_F
def compute_fake_B_prev(self, real_B_prev, fake_B_last, fake_B):
fake_B_prev = real_B_prev[:, 0:1] if fake_B_last is None else fake_B_last[0][:, -1:]
if fake_B.size()[1] > 1:
fake_B_prev = torch.cat([fake_B_prev, fake_B[:, :-1].detach()], dim=1)
return fake_B_prev
def save(self, label):
for s in range(self.n_scales):
self.save_network(getattr(self, 'netG'+str(s)), 'G'+str(s), label, self.gpu_ids) | vid2vid-master | models/vid2vid_model_G.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import os
import torch
import torch.nn as nn
import numpy as np
import fractions
def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0
def wrap_model(opt, modelG, modelD, flowNet):
if opt.n_gpus_gen == len(opt.gpu_ids):
modelG = myModel(opt, modelG)
modelD = myModel(opt, modelD)
flowNet = myModel(opt, flowNet)
else:
if opt.batchSize == 1:
gpu_split_id = opt.n_gpus_gen + 1
modelG = nn.DataParallel(modelG, device_ids=opt.gpu_ids[0:1])
else:
gpu_split_id = opt.n_gpus_gen
modelG = nn.DataParallel(modelG, device_ids=opt.gpu_ids[:gpu_split_id])
modelD = nn.DataParallel(modelD, device_ids=[opt.gpu_ids[0]] + opt.gpu_ids[gpu_split_id:])
flowNet = nn.DataParallel(flowNet, device_ids=[opt.gpu_ids[0]] + opt.gpu_ids[gpu_split_id:])
return modelG, modelD, flowNet
class myModel(nn.Module):
def __init__(self, opt, model):
super(myModel, self).__init__()
self.opt = opt
self.module = model
self.model = nn.DataParallel(model, device_ids=opt.gpu_ids)
self.bs_per_gpu = int(np.ceil(float(opt.batchSize) / len(opt.gpu_ids))) # batch size for each GPU
self.pad_bs = self.bs_per_gpu * len(opt.gpu_ids) - opt.batchSize
def forward(self, *inputs, **kwargs):
inputs = self.add_dummy_to_tensor(inputs, self.pad_bs)
outputs = self.model(*inputs, **kwargs, dummy_bs=self.pad_bs)
if self.pad_bs == self.bs_per_gpu: # gpu 0 does 0 batch but still returns 1 batch
return self.remove_dummy_from_tensor(outputs, 1)
return outputs
def add_dummy_to_tensor(self, tensors, add_size=0):
if add_size == 0 or tensors is None: return tensors
if type(tensors) == list or type(tensors) == tuple:
return [self.add_dummy_to_tensor(tensor, add_size) for tensor in tensors]
if isinstance(tensors, torch.Tensor):
dummy = torch.zeros_like(tensors)[:add_size]
tensors = torch.cat([dummy, tensors])
return tensors
def remove_dummy_from_tensor(self, tensors, remove_size=0):
if remove_size == 0 or tensors is None: return tensors
if type(tensors) == list or type(tensors) == tuple:
return [self.remove_dummy_from_tensor(tensor, remove_size) for tensor in tensors]
if isinstance(tensors, torch.Tensor):
tensors = tensors[remove_size:]
return tensors
def create_model(opt):
print(opt.model)
if opt.model == 'vid2vid':
from .vid2vid_model_G import Vid2VidModelG
modelG = Vid2VidModelG()
if opt.isTrain:
from .vid2vid_model_D import Vid2VidModelD
modelD = Vid2VidModelD()
else:
raise ValueError("Model [%s] not recognized." % opt.model)
if opt.isTrain:
from .flownet import FlowNet
flowNet = FlowNet()
modelG.initialize(opt)
if opt.isTrain:
modelD.initialize(opt)
flowNet.initialize(opt)
if not opt.fp16:
modelG, modelD, flownet = wrap_model(opt, modelG, modelD, flowNet)
return [modelG, modelD, flowNet]
else:
return modelG
def create_optimizer(opt, models):
modelG, modelD, flowNet = models
optimizer_D_T = []
if opt.fp16:
from apex import amp
for s in range(opt.n_scales_temporal):
optimizer_D_T.append(getattr(modelD, 'optimizer_D_T'+str(s)))
modelG, optimizer_G = amp.initialize(modelG, modelG.optimizer_G, opt_level='O1')
modelD, optimizers_D = amp.initialize(modelD, [modelD.optimizer_D] + optimizer_D_T, opt_level='O1')
optimizer_D, optimizer_D_T = optimizers_D[0], optimizers_D[1:]
modelG, modelD, flownet = wrap_model(opt, modelG, modelD, flowNet)
else:
optimizer_G = modelG.module.optimizer_G
optimizer_D = modelD.module.optimizer_D
for s in range(opt.n_scales_temporal):
optimizer_D_T.append(getattr(modelD.module, 'optimizer_D_T'+str(s)))
return modelG, modelD, flowNet, optimizer_G, optimizer_D, optimizer_D_T
def init_params(opt, modelG, modelD, data_loader):
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
start_epoch, epoch_iter = 1, 0
### if continue training, recover previous states
if opt.continue_train:
if os.path.exists(iter_path):
start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
if start_epoch > opt.niter:
modelG.module.update_learning_rate(start_epoch-1, 'G')
modelD.module.update_learning_rate(start_epoch-1, 'D')
if (opt.n_scales_spatial > 1) and (opt.niter_fix_global != 0) and (start_epoch > opt.niter_fix_global):
modelG.module.update_fixed_params()
if start_epoch > opt.niter_step:
data_loader.dataset.update_training_batch((start_epoch-1)//opt.niter_step)
modelG.module.update_training_batch((start_epoch-1)//opt.niter_step)
n_gpus = opt.n_gpus_gen if opt.batchSize == 1 else 1 # number of gpus used for generator for each batch
tG, tD = opt.n_frames_G, opt.n_frames_D
tDB = tD * opt.output_nc
s_scales = opt.n_scales_spatial
t_scales = opt.n_scales_temporal
input_nc = 1 if opt.label_nc != 0 else opt.input_nc
output_nc = opt.output_nc
print_freq = lcm(opt.print_freq, opt.batchSize)
total_steps = (start_epoch-1) * len(data_loader) + epoch_iter
total_steps = total_steps // print_freq * print_freq
return n_gpus, tG, tD, tDB, s_scales, t_scales, input_nc, output_nc, start_epoch, epoch_iter, print_freq, total_steps, iter_path
def save_models(opt, epoch, epoch_iter, total_steps, visualizer, iter_path, modelG, modelD, end_of_epoch=False):
if not end_of_epoch:
if total_steps % opt.save_latest_freq == 0:
visualizer.vis_print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
modelG.module.save('latest')
modelD.module.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
else:
if epoch % opt.save_epoch_freq == 0:
visualizer.vis_print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
modelG.module.save('latest')
modelD.module.save('latest')
modelG.module.save(epoch)
modelD.module.save(epoch)
np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d')
def update_models(opt, epoch, modelG, modelD, data_loader):
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
modelG.module.update_learning_rate(epoch, 'G')
modelD.module.update_learning_rate(epoch, 'D')
### gradually grow training sequence length
if (epoch % opt.niter_step) == 0:
data_loader.dataset.update_training_batch(epoch//opt.niter_step)
modelG.module.update_training_batch(epoch//opt.niter_step)
### finetune all scales
if (opt.n_scales_spatial > 1) and (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
modelG.module.update_fixed_params() | vid2vid-master | models/models.py |
import numpy as np
import torch
import sys
from .base_model import BaseModel
class FlowNet(BaseModel):
def name(self):
return 'FlowNet'
def initialize(self, opt):
BaseModel.initialize(self, opt)
# flownet 2
from .flownet2_pytorch import models as flownet2_models
from .flownet2_pytorch.utils import tools as flownet2_tools
from .flownet2_pytorch.networks.resample2d_package.resample2d import Resample2d
self.flowNet = flownet2_tools.module_to_dict(flownet2_models)['FlowNet2'](fp16=opt.fp16).cuda(self.gpu_ids[0])
checkpoint = torch.load('models/flownet2_pytorch/FlowNet2_checkpoint.pth.tar')
self.flowNet.load_state_dict(checkpoint['state_dict'])
self.flowNet.eval()
self.resample = Resample2d()
self.downsample = torch.nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input_A, input_B, dummy_bs=0):
with torch.no_grad():
if input_A.get_device() == self.gpu_ids[0]:
input_A, input_B = input_A[dummy_bs:], input_B[dummy_bs:]
if input_A.size(0) == 0:
b, n, c, h, w = input_A.size()
return self.Tensor(1, n, 2, h, w), self.Tensor(1, n, 1, h, w)
size = input_A.size()
assert(len(size) == 4 or len(size) == 5)
if len(size) == 5:
b, n, c, h, w = size
input_A = input_A.contiguous().view(-1, c, h, w)
input_B = input_B.contiguous().view(-1, c, h, w)
flow, conf = self.compute_flow_and_conf(input_A, input_B)
return flow.view(b, n, 2, h, w), conf.view(b, n, 1, h, w)
else:
return self.compute_flow_and_conf(input_A, input_B)
def compute_flow_and_conf(self, im1, im2):
assert(im1.size()[1] == 3)
assert(im1.size() == im2.size())
old_h, old_w = im1.size()[2], im1.size()[3]
new_h, new_w = old_h//64*64, old_w//64*64
if old_h != new_h:
downsample = torch.nn.Upsample(size=(new_h, new_w), mode='bilinear')
upsample = torch.nn.Upsample(size=(old_h, old_w), mode='bilinear')
im1 = downsample(im1)
im2 = downsample(im2)
data1 = torch.cat([im1.unsqueeze(2), im2.unsqueeze(2)], dim=2)
flow1 = self.flowNet(data1)
conf = (self.norm(im1 - self.resample(im2, flow1)) < 0.02).float()
if old_h != new_h:
flow1 = upsample(flow1) * old_h / new_h
conf = upsample(conf)
return flow1.detach(), conf.detach()
def norm(self, t):
return torch.sum(t*t, dim=1, keepdim=True)
| vid2vid-master | models/flownet.py |
vid2vid-master | models/__init__.py |
|
import os, sys
import numpy as np
import torch
from .networks import get_grid
class BaseModel(torch.nn.Module):
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if len(gpu_ids) and torch.cuda.is_available():
network.cuda(gpu_ids[0])
def resolve_version(self):
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label, save_dir=''):
self.resolve_version()
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
if not save_dir:
save_dir = self.save_dir
save_path = os.path.join(save_dir, save_filename)
if not os.path.isfile(save_path):
print('%s not exists yet!' % save_path)
if 'G0' in network_label:
raise('Generator must exist!')
else:
#network.load_state_dict(torch.load(save_path))
try:
network.load_state_dict(torch.load(save_path))
except:
pretrained_dict = torch.load(save_path)
model_dict = network.state_dict()
### printout layers in pretrained model
initialized = set()
for k, v in pretrained_dict.items():
initialized.add(k.split('.')[0])
#print('pretrained model has following layers: ')
#print(sorted(initialized))
try:
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
network.load_state_dict(pretrained_dict)
print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label)
except:
print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label)
if sys.version_info >= (3,0):
not_initialized = set()
else:
from sets import Set
not_initialized = Set()
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
for k, v in model_dict.items():
if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
not_initialized.add(k.split('.')[0])
print(sorted(not_initialized))
network.load_state_dict(model_dict)
def concat(self, tensors, dim=0):
if tensors[0] is not None and tensors[1] is not None:
if isinstance(tensors[0], list):
tensors_cat = []
for i in range(len(tensors[0])):
tensors_cat.append(self.concat([tensors[0][i], tensors[1][i]], dim=dim))
return tensors_cat
return torch.cat([tensors[0], tensors[1]], dim=dim)
elif tensors[0] is not None:
return tensors[0]
else:
return tensors[1]
def build_pyr(self, tensor, nearest=False): # build image pyramid from a single image
if tensor is None:
return [None] * self.n_scales
tensor = [tensor]
if nearest:
downsample = torch.nn.AvgPool2d(1, stride=2)
else:
downsample = torch.nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
for s in range(1, self.n_scales):
b, t, c, h, w = tensor[-1].size()
down = downsample(tensor[-1].view(-1, h, w)).view(b, t, c, h//2, w//2)
tensor.append(down)
return tensor
def dists_min(self, a, b, num=1):
dists = torch.sum(torch.sum((a-b)*(a-b), dim=0), dim=0)
if num == 1:
val, idx = torch.min(dists, dim=0)
#idx = [idx]
else:
val, idx = torch.sort(dists, dim=0)
idx = idx[:num]
return idx.cpu().numpy().astype(int)
def get_edges(self, t):
edge = torch.cuda.ByteTensor(t.size()).zero_()
edge[:,:,:,:,1:] = edge[:,:,:,:,1:] | (t[:,:,:,:,1:] != t[:,:,:,:,:-1])
edge[:,:,:,:,:-1] = edge[:,:,:,:,:-1] | (t[:,:,:,:,1:] != t[:,:,:,:,:-1])
edge[:,:,:,1:,:] = edge[:,:,:,1:,:] | (t[:,:,:,1:,:] != t[:,:,:,:-1,:])
edge[:,:,:,:-1,:] = edge[:,:,:,:-1,:] | (t[:,:,:,1:,:] != t[:,:,:,:-1,:])
return edge.float()
def update_learning_rate(self, epoch, model):
lr = self.opt.lr * (1 - (epoch - self.opt.niter) / self.opt.niter_decay)
for param_group in getattr(self, 'optimizer_' + model).param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
def update_fixed_params(self): # finetune all scales instead of just finest scale
params = []
for s in range(self.n_scales):
params += list(getattr(self, 'netG'+str(s)).parameters())
self.optimizer_G = torch.optim.Adam(params, lr=self.old_lr, betas=(self.opt.beta1, 0.999))
self.finetune_all = True
print('------------ Now finetuning all scales -----------')
def update_training_batch(self, ratio): # increase number of backpropagated frames and number of frames in each GPU
nfb = self.n_frames_bp
nfl = self.n_frames_load
if nfb < nfl:
nfb = min(self.opt.max_frames_backpropagate, 2**ratio)
self.n_frames_bp = nfl // int(np.ceil(float(nfl) / nfb))
print('-------- Updating number of backpropagated frames to %d ----------' % self.n_frames_bp)
if self.n_frames_per_gpu < self.opt.max_frames_per_gpu:
self.n_frames_per_gpu = min(self.n_frames_per_gpu*2, self.opt.max_frames_per_gpu)
self.n_frames_load = self.n_gpus * self.n_frames_per_gpu
print('-------- Updating number of frames per gpu to %d ----------' % self.n_frames_per_gpu)
def grid_sample(self, input1, input2):
if self.opt.fp16: # not sure if it's necessary
return torch.nn.functional.grid_sample(input1.float(), input2.float(), mode='bilinear', padding_mode='border').half()
else:
return torch.nn.functional.grid_sample(input1, input2, mode='bilinear', padding_mode='border')
def resample(self, image, flow):
b, c, h, w = image.size()
if not hasattr(self, 'grid') or self.grid.size() != flow.size():
self.grid = get_grid(b, h, w, gpu_id=flow.get_device(), dtype=flow.dtype)
flow = torch.cat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], dim=1)
final_grid = (self.grid + flow).permute(0, 2, 3, 1).cuda(image.get_device())
output = self.grid_sample(image, final_grid)
return output | vid2vid-master | models/base_model.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import copy
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 and hasattr(m, 'weight'):
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, prev_output_nc, ngf, which_model_netG, n_downsampling, norm, scale, gpu_ids=[], opt=[]):
netG = None
norm_layer = get_norm_layer(norm_type=norm)
if which_model_netG == 'global':
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsampling, opt.n_blocks, norm_layer)
elif which_model_netG == 'local':
netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsampling, opt.n_blocks, opt.n_local_enhancers, opt.n_blocks_local, norm_layer)
elif which_model_netG == 'global_with_features':
netG = Global_with_z(input_nc, output_nc, opt.feat_num, ngf, n_downsampling, opt.n_blocks, norm_layer)
elif which_model_netG == 'local_with_features':
netG = Local_with_z(input_nc, output_nc, opt.feat_num, ngf, n_downsampling, opt.n_blocks, opt.n_local_enhancers, opt.n_blocks_local, norm_layer)
elif which_model_netG == 'composite':
netG = CompositeGenerator(opt, input_nc, output_nc, prev_output_nc, ngf, n_downsampling, opt.n_blocks, opt.fg, opt.no_flow, norm_layer)
elif which_model_netG == 'compositeLocal':
netG = CompositeLocalGenerator(opt, input_nc, output_nc, prev_output_nc, ngf, n_downsampling, opt.n_blocks_local, opt.fg, opt.no_flow,
norm_layer, scale=scale)
elif which_model_netG == 'encoder':
netG = Encoder(input_nc, output_nc, ngf, n_downsampling, norm_layer)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
#print_network(netG)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, n_layers_D, norm='instance', num_D=1, getIntermFeat=False, gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, num_D, getIntermFeat)
#print_network(netD)
if len(gpu_ids) > 0:
netD.cuda(gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
if isinstance(net, list):
net = net[0]
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
def get_grid(batchsize, rows, cols, gpu_id=0, dtype=torch.float32):
hor = torch.linspace(-1.0, 1.0, cols)
hor.requires_grad = False
hor = hor.view(1, 1, 1, cols)
hor = hor.expand(batchsize, 1, rows, cols)
ver = torch.linspace(-1.0, 1.0, rows)
ver.requires_grad = False
ver = ver.view(1, 1, rows, 1)
ver = ver.expand(batchsize, 1, rows, cols)
t_grid = torch.cat([hor, ver], 1)
t_grid.requires_grad = False
if dtype == torch.float16: t_grid = t_grid.half()
return t_grid.cuda(gpu_id)
##############################################################################
# Classes
##############################################################################
class BaseNetwork(nn.Module):
def __init__(self):
super(BaseNetwork, self).__init__()
def grid_sample(self, input1, input2):
if self.opt.fp16: # not sure if it's necessary
return torch.nn.functional.grid_sample(input1.float(), input2.float(), mode='bilinear', padding_mode='border').half()
else:
return torch.nn.functional.grid_sample(input1, input2, mode='bilinear', padding_mode='border')
def resample(self, image, flow):
b, c, h, w = image.size()
if not hasattr(self, 'grid') or self.grid.size() != flow.size():
self.grid = get_grid(b, h, w, gpu_id=flow.get_device(), dtype=flow.dtype)
flow = torch.cat([flow[:, 0:1, :, :] / ((w - 1.0) / 2.0), flow[:, 1:2, :, :] / ((h - 1.0) / 2.0)], dim=1)
final_grid = (self.grid + flow).permute(0, 2, 3, 1).cuda(image.get_device())
output = self.grid_sample(image, final_grid)
return output
class CompositeGenerator(BaseNetwork):
def __init__(self, opt, input_nc, output_nc, prev_output_nc, ngf, n_downsampling, n_blocks, use_fg_model=False, no_flow=False,
norm_layer=nn.BatchNorm2d, padding_type='reflect'):
assert(n_blocks >= 0)
super(CompositeGenerator, self).__init__()
self.opt = opt
self.n_downsampling = n_downsampling
self.use_fg_model = use_fg_model
self.no_flow = no_flow
activation = nn.ReLU(True)
if use_fg_model:
### individial image generation
ngf_indv = ngf // 2 if n_downsampling > 2 else ngf
indv_nc = input_nc
indv_down = [nn.ReflectionPad2d(3), nn.Conv2d(indv_nc, ngf_indv, kernel_size=7, padding=0),
norm_layer(ngf_indv), activation]
for i in range(n_downsampling):
mult = 2**i
indv_down += [nn.Conv2d(ngf_indv*mult, ngf_indv*mult*2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_indv*mult*2), activation]
indv_res = []
mult = 2**n_downsampling
for i in range(n_blocks):
indv_res += [ResnetBlock(ngf_indv * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
indv_up = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
indv_up += [nn.ConvTranspose2d(ngf_indv*mult, ngf_indv*mult//2, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_indv*mult//2), activation]
indv_final = [nn.ReflectionPad2d(3), nn.Conv2d(ngf_indv, output_nc, kernel_size=7, padding=0), nn.Tanh()]
### flow and image generation
### downsample
model_down_seg = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
for i in range(n_downsampling):
mult = 2**i
model_down_seg += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
mult = 2**n_downsampling
for i in range(n_blocks - n_blocks//2):
model_down_seg += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
model_down_img = [nn.ReflectionPad2d(3), nn.Conv2d(prev_output_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
model_down_img += copy.deepcopy(model_down_seg[4:])
### resnet blocks
model_res_img = []
for i in range(n_blocks//2):
model_res_img += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
if not no_flow:
model_res_flow = copy.deepcopy(model_res_img)
### upsample
model_up_img = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model_up_img += [nn.ConvTranspose2d(ngf*mult, ngf*mult//2, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf*mult//2), activation]
model_final_img = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
if not no_flow:
model_up_flow = copy.deepcopy(model_up_img)
model_final_flow = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, 2, kernel_size=7, padding=0)]
model_final_w = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, 1, kernel_size=7, padding=0), nn.Sigmoid()]
if use_fg_model:
self.indv_down = nn.Sequential(*indv_down)
self.indv_res = nn.Sequential(*indv_res)
self.indv_up = nn.Sequential(*indv_up)
self.indv_final = nn.Sequential(*indv_final)
self.model_down_seg = nn.Sequential(*model_down_seg)
self.model_down_img = nn.Sequential(*model_down_img)
self.model_res_img = nn.Sequential(*model_res_img)
self.model_up_img = nn.Sequential(*model_up_img)
self.model_final_img = nn.Sequential(*model_final_img)
if not no_flow:
self.model_res_flow = nn.Sequential(*model_res_flow)
self.model_up_flow = nn.Sequential(*model_up_flow)
self.model_final_flow = nn.Sequential(*model_final_flow)
self.model_final_w = nn.Sequential(*model_final_w)
def forward(self, input, img_prev, mask, img_feat_coarse, flow_feat_coarse, img_fg_feat_coarse, use_raw_only):
downsample = self.model_down_seg(input) + self.model_down_img(img_prev)
img_feat = self.model_up_img(self.model_res_img(downsample))
img_raw = self.model_final_img(img_feat)
flow = weight = flow_feat = None
if not self.no_flow:
res_flow = self.model_res_flow(downsample)
flow_feat = self.model_up_flow(res_flow)
flow = self.model_final_flow(flow_feat) * 20
weight = self.model_final_w(flow_feat)
gpu_id = img_feat.get_device()
if use_raw_only or self.no_flow:
img_final = img_raw
else:
img_warp = self.resample(img_prev[:,-3:,...].cuda(gpu_id), flow).cuda(gpu_id)
weight_ = weight.expand_as(img_raw)
img_final = img_raw * weight_ + img_warp * (1-weight_)
img_fg_feat = None
if self.use_fg_model:
img_fg_feat = self.indv_up(self.indv_res(self.indv_down(input)))
img_fg = self.indv_final(img_fg_feat)
mask = mask.cuda(gpu_id).expand_as(img_raw)
img_final = img_fg * mask + img_final * (1-mask)
img_raw = img_fg * mask + img_raw * (1-mask)
return img_final, flow, weight, img_raw, img_feat, flow_feat, img_fg_feat
class CompositeLocalGenerator(BaseNetwork):
def __init__(self, opt, input_nc, output_nc, prev_output_nc, ngf, n_downsampling, n_blocks_local, use_fg_model=False, no_flow=False,
norm_layer=nn.BatchNorm2d, padding_type='reflect', scale=1):
super(CompositeLocalGenerator, self).__init__()
self.opt = opt
self.use_fg_model = use_fg_model
self.no_flow = no_flow
self.scale = scale
activation = nn.ReLU(True)
if use_fg_model:
### individial image generation
ngf_indv = ngf // 2 if n_downsampling > 2 else ngf
indv_down = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_indv, kernel_size=7, padding=0), norm_layer(ngf_indv), activation,
nn.Conv2d(ngf_indv, ngf_indv*2, kernel_size=3, stride=2, padding=1), norm_layer(ngf_indv*2), activation]
indv_up = []
for i in range(n_blocks_local):
indv_up += [ResnetBlock(ngf_indv*2, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
indv_up += [nn.ConvTranspose2d(ngf_indv*2, ngf_indv, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_indv), activation]
indv_final = [nn.ReflectionPad2d(3), nn.Conv2d(ngf_indv, output_nc, kernel_size=7, padding=0), nn.Tanh()]
### flow and image generation
### downsample
model_down_seg = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation,
nn.Conv2d(ngf, ngf*2, kernel_size=3, stride=2, padding=1), norm_layer(ngf*2), activation]
model_down_img = [nn.ReflectionPad2d(3), nn.Conv2d(prev_output_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation,
nn.Conv2d(ngf, ngf*2, kernel_size=3, stride=2, padding=1), norm_layer(ngf*2), activation]
### resnet blocks
model_up_img = []
for i in range(n_blocks_local):
model_up_img += [ResnetBlock(ngf*2, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
up = [nn.ConvTranspose2d(ngf*2, ngf, kernel_size=3, stride=2, padding=1, output_padding=1), norm_layer(ngf), activation]
model_up_img += up
model_final_img = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
if not no_flow:
model_up_flow = copy.deepcopy(model_up_img)
model_final_flow = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, 2, kernel_size=7, padding=0)]
model_final_w = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, 1, kernel_size=7, padding=0), nn.Sigmoid()]
if use_fg_model:
self.indv_down = nn.Sequential(*indv_down)
self.indv_up = nn.Sequential(*indv_up)
self.indv_final = nn.Sequential(*indv_final)
self.model_down_seg = nn.Sequential(*model_down_seg)
self.model_down_img = nn.Sequential(*model_down_img)
self.model_up_img = nn.Sequential(*model_up_img)
self.model_final_img = nn.Sequential(*model_final_img)
if not no_flow:
self.model_up_flow = nn.Sequential(*model_up_flow)
self.model_final_flow = nn.Sequential(*model_final_flow)
self.model_final_w = nn.Sequential(*model_final_w)
def forward(self, input, img_prev, mask, img_feat_coarse, flow_feat_coarse, img_fg_feat_coarse, use_raw_only):
flow_multiplier = 20 * (2 ** self.scale)
down_img = self.model_down_seg(input) + self.model_down_img(img_prev)
img_feat = self.model_up_img(down_img + img_feat_coarse)
img_raw = self.model_final_img(img_feat)
flow = weight = flow_feat = None
if not self.no_flow:
down_flow = down_img
flow_feat = self.model_up_flow(down_flow + flow_feat_coarse)
flow = self.model_final_flow(flow_feat) * flow_multiplier
weight = self.model_final_w(flow_feat)
gpu_id = img_feat.get_device()
if use_raw_only or self.no_flow:
img_final = img_raw
else:
img_warp = self.resample(img_prev[:,-3:,...].cuda(gpu_id), flow).cuda(gpu_id)
weight_ = weight.expand_as(img_raw)
img_final = img_raw * weight_ + img_warp * (1-weight_)
img_fg_feat = None
if self.use_fg_model:
img_fg_feat = self.indv_up(self.indv_down(input) + img_fg_feat_coarse)
img_fg = self.indv_final(img_fg_feat)
mask = mask.cuda(gpu_id).expand_as(img_raw)
img_final = img_fg * mask + img_final * (1-mask)
img_raw = img_fg * mask + img_raw * (1-mask)
return img_final, flow, weight, img_raw, img_feat, flow_feat, img_fg_feat
class GlobalGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
padding_type='reflect'):
assert(n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = nn.ReLU(True)
ch_max = 1024
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(min(ch_max, ngf * mult), min(ch_max, ngf * mult * 2), kernel_size=3, stride=2, padding=1),
norm_layer(min(ch_max, ngf * mult * 2)), activation]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(min(ch_max, ngf * mult), padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(min(ch_max, ngf * mult), min(ch_max, int(ngf * mult / 2)),
kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(min(ch_max, int(ngf * mult / 2))), activation]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, feat=None):
if feat is not None:
input = torch.cat([input, feat], dim=1)
output = self.model(input)
return output
class LocalEnhancer(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9,
n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):
super(LocalEnhancer, self).__init__()
self.n_local_enhancers = n_local_enhancers
###### global generator model #####
ngf_global = ngf * (2**n_local_enhancers)
model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model
model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers
self.model = nn.Sequential(*model_global)
###### local enhancer layers #####
for n in range(1, n_local_enhancers+1):
### downsample
ngf_global = ngf * (2**(n_local_enhancers-n))
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
norm_layer(ngf_global), nn.ReLU(True),
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_global * 2), nn.ReLU(True)]
### residual blocks
model_upsample = []
for i in range(n_blocks_local):
model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
### upsample
model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_global), nn.ReLU(True)]
### final convolution
if n == n_local_enhancers:
model_final = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
model_upsample += model_final
setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))
ngf_global = ngf * (2**(n_local_enhancers-n)) * 2
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input, feat_map=None):
if feat_map is not None:
input = torch.cat([input, feat_map], dim=1)
### create input pyramid
input_downsampled = [input]
for i in range(self.n_local_enhancers):
input_downsampled.append(self.downsample(input_downsampled[-1]))
### output at coarest level
output_prev = self.model(input_downsampled[-1])
### build up one layer at a time
for n_local_enhancers in range(1, self.n_local_enhancers+1):
model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
output_prev = model_upsample(model_downsample(input_i) + output_prev)
return output_prev
class Global_with_z(nn.Module):
def __init__(self, input_nc, output_nc, nz, ngf=64, n_downsample_G=3, n_blocks=9,
norm_layer=nn.BatchNorm2d, padding_type='reflect'):
super(Global_with_z, self).__init__()
self.n_downsample_G = n_downsample_G
max_ngf = 1024
activation = nn.ReLU(True)
# downsample model
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc + nz, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
for i in range(n_downsample_G):
mult = 2 ** i
model_downsample += [nn.Conv2d(min(ngf * mult, max_ngf), min(ngf * mult * 2, max_ngf), kernel_size=3, stride=2, padding=1),
norm_layer(min(ngf * mult * 2, max_ngf)), activation]
# internal model
model_resnet = []
mult = 2 ** n_downsample_G
for i in range(n_blocks):
model_resnet += [ResnetBlock(min(ngf*mult, max_ngf) + nz, padding_type=padding_type, norm_layer=norm_layer)]
# upsample model
model_upsample = []
for i in range(n_downsample_G):
mult = 2 ** (n_downsample_G - i)
input_ngf = min(ngf * mult, max_ngf)
if i == 0:
input_ngf += nz * 2
model_upsample += [nn.ConvTranspose2d(input_ngf, min((ngf * mult // 2), max_ngf), kernel_size=3, stride=2,
padding=1, output_padding=1), norm_layer(min((ngf * mult // 2), max_ngf)), activation]
model_upsample_conv = [nn.ReflectionPad2d(3), nn.Conv2d(ngf + nz, output_nc, kernel_size=7), nn.Tanh()]
self.model_downsample = nn.Sequential(*model_downsample)
self.model_resnet = nn.Sequential(*model_resnet)
self.model_upsample = nn.Sequential(*model_upsample)
self.model_upsample_conv = nn.Sequential(*model_upsample_conv)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, x, z):
z_downsample = z
for i in range(self.n_downsample_G):
z_downsample = self.downsample(z_downsample)
downsample = self.model_downsample(torch.cat([x, z], dim=1))
resnet = self.model_resnet(torch.cat([downsample, z_downsample], dim=1))
upsample = self.model_upsample(torch.cat([resnet, z_downsample], dim=1))
return self.model_upsample_conv(torch.cat([upsample, z], dim=1))
class Local_with_z(nn.Module):
def __init__(self, input_nc, output_nc, nz, ngf=32, n_downsample_global=3, n_blocks_global=9,
n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):
super(Local_with_z, self).__init__()
self.n_local_enhancers = n_local_enhancers
self.n_downsample_global = n_downsample_global
###### global generator model #####
ngf_global = ngf * (2**n_local_enhancers)
model_global = Global_with_z(input_nc, output_nc, nz, ngf_global, n_downsample_global, n_blocks_global, norm_layer)
self.model_downsample = model_global.model_downsample
self.model_resnet = model_global.model_resnet
self.model_upsample = model_global.model_upsample
###### local enhancer layers #####
for n in range(1, n_local_enhancers+1):
### downsample
ngf_global = ngf * (2**(n_local_enhancers-n))
if n == n_local_enhancers:
input_nc += nz
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7),
norm_layer(ngf_global), nn.ReLU(True),
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_global * 2), nn.ReLU(True)]
### residual blocks
model_upsample = []
input_ngf = ngf_global * 2
if n == 1:
input_ngf += nz
for i in range(n_blocks_local):
model_upsample += [ResnetBlock(input_ngf, padding_type=padding_type, norm_layer=norm_layer)]
### upsample
model_upsample += [nn.ConvTranspose2d(input_ngf, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_global), nn.ReLU(True)]
setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))
### final convolution
model_final = [nn.ReflectionPad2d(3), nn.Conv2d(ngf + nz, output_nc, kernel_size=7), nn.Tanh()]
self.model_final = nn.Sequential(*model_final)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input, z):
### create input pyramid
input_downsampled = [input]
for i in range(self.n_local_enhancers):
input_downsampled.append(self.downsample(input_downsampled[-1]))
### create downsampled z
z_downsampled_local = z
for i in range(self.n_local_enhancers):
z_downsampled_local = self.downsample(z_downsampled_local)
z_downsampled_global = z_downsampled_local
for i in range(self.n_downsample_global):
z_downsampled_global = self.downsample(z_downsampled_global)
### output at coarest level
x = input_downsampled[-1]
global_downsample = self.model_downsample(torch.cat([x, z_downsampled_local], dim=1))
global_resnet = self.model_resnet(torch.cat([global_downsample, z_downsampled_global], dim=1))
global_upsample = self.model_upsample(torch.cat([global_resnet, z_downsampled_global], dim=1))
### build up one layer at a time
output_prev = global_upsample
for n_local_enhancers in range(1, self.n_local_enhancers+1):
# fetch models
model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')
# get input image
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
if n_local_enhancers == self.n_local_enhancers:
input_i = torch.cat([input_i, z], dim=1)
# combine features from different resolutions
combined_input = model_downsample(input_i) + output_prev
if n_local_enhancers == 1:
combined_input = torch.cat([combined_input, z_downsampled_local], dim=1)
# upsample features
output_prev = model_upsample(combined_input)
# final convolution
output = self.model_final(torch.cat([output_prev, z], dim=1))
return output
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf), nn.ReLU(True)]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, inst):
outputs = self.model(input)
# instance-wise average pooling
outputs_mean = outputs.clone()
for b in range(input.size()[0]):
inst_list = np.unique(inst[b].cpu().numpy().astype(int))
for i in inst_list:
indices = (inst[b:b+1] == int(i)).nonzero() # n x 4
for j in range(self.output_nc):
output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]
mean_feat = torch.mean(output_ins).expand_as(output_ins)
### add random noise to output feature
#mean_feat += torch.normal(torch.zeros_like(mean_feat), 0.05 * torch.ones_like(mean_feat)).cuda()
outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat
return outputs_mean
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
ndf_max = 64
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, min(ndf_max, ndf*(2**(num_D-1-i))), n_layers, norm_layer,
getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
##############################################################################
# Losses
##############################################################################
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
gpu_id = input.get_device()
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).cuda(gpu_id).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
class VGGLoss(nn.Module):
def __init__(self, gpu_id=0):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda(gpu_id)
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False)
def forward(self, x, y):
while x.size()[3] > 1024:
x, y = self.downsample(x), self.downsample(y)
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
class CrossEntropyLoss(nn.Module):
def __init__(self, label_nc):
super(CrossEntropyLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss2d()
def forward(self, output, label):
label = label.long().max(1)[1]
output = self.softmax(output)
return self.criterion(output, label)
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss()
def forward(self, input, target, mask):
mask = mask.expand(-1, input.size()[1], -1, -1)
loss = self.criterion(input * mask, target * mask)
return loss
class MultiscaleL1Loss(nn.Module):
def __init__(self, scale=5):
super(MultiscaleL1Loss, self).__init__()
self.criterion = nn.L1Loss()
self.downsample = nn.AvgPool2d(2, stride=2, count_include_pad=False)
#self.weights = [0.5, 1, 2, 8, 32]
self.weights = [1, 0.5, 0.25, 0.125, 0.125]
self.weights = self.weights[:scale]
def forward(self, input, target, mask=None):
loss = 0
if mask is not None:
mask = mask.expand(-1, input.size()[1], -1, -1)
for i in range(len(self.weights)):
if mask is not None:
loss += self.weights[i] * self.criterion(input * mask, target * mask)
else:
loss += self.weights[i] * self.criterion(input, target)
if i != len(self.weights)-1:
input = self.downsample(input)
target = self.downsample(target)
if mask is not None:
mask = self.downsample(mask)
return loss
from torchvision import models
class Vgg19(nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out | vid2vid-master | models/networks.py |
### Copyright (C) 2017 NVIDIA Corporation. All rights reserved.
### Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
import numpy as np
import torch
import os
import sys
from collections import OrderedDict
from torch.autograd import Variable
import util.util as util
from .base_model import BaseModel
from . import networks
class Vid2VidModelD(BaseModel):
def name(self):
return 'Vid2VidModelD'
def initialize(self, opt):
BaseModel.initialize(self, opt)
gpu_split_id = opt.n_gpus_gen
if opt.batchSize == 1:
gpu_split_id += 1
self.gpu_ids = ([opt.gpu_ids[0]] + opt.gpu_ids[gpu_split_id:]) if opt.n_gpus_gen != len(opt.gpu_ids) else opt.gpu_ids
if not opt.debug:
torch.backends.cudnn.benchmark = True
self.tD = opt.n_frames_D
self.output_nc = opt.output_nc
# define networks
# single image discriminator
self.input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc
if opt.use_instance:
self.input_nc += 1
netD_input_nc = self.input_nc + opt.output_nc
self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids)
if opt.add_face_disc:
self.netD_f = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
max(1, opt.num_D - 2), not opt.no_ganFeat, gpu_ids=self.gpu_ids)
# temporal discriminator
netD_input_nc = opt.output_nc * opt.n_frames_D + 2 * (opt.n_frames_D-1)
for s in range(opt.n_scales_temporal):
setattr(self, 'netD_T'+str(s), networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm,
opt.num_D, not opt.no_ganFeat, gpu_ids=self.gpu_ids))
print('---------- Networks initialized -------------')
print('-----------------------------------------------')
# load networks
if opt.continue_train or opt.load_pretrain:
self.load_network(self.netD, 'D', opt.which_epoch, opt.load_pretrain)
for s in range(opt.n_scales_temporal):
self.load_network(getattr(self, 'netD_T'+str(s)), 'D_T'+str(s), opt.which_epoch, opt.load_pretrain)
if opt.add_face_disc:
self.load_network(self.netD_f, 'D_f', opt.which_epoch, opt.load_pretrain)
# set loss functions and optimizers
self.old_lr = opt.lr
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.Tensor)
self.criterionFlow = networks.MaskedL1Loss()
self.criterionWarp = networks.MaskedL1Loss()
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg:
self.criterionVGG = networks.VGGLoss(self.gpu_ids[0])
self.loss_names = ['G_VGG', 'G_GAN', 'G_GAN_Feat',
'D_real', 'D_fake',
'G_Warp', 'F_Flow', 'F_Warp', 'W']
self.loss_names_T = ['G_T_GAN', 'G_T_GAN_Feat', 'D_T_real', 'D_T_fake', 'G_T_Warp']
if opt.add_face_disc:
self.loss_names += ['G_f_GAN', 'G_f_GAN_Feat', 'D_f_real', 'D_f_fake']
# initialize optimizers D and D_T
params = list(self.netD.parameters())
if opt.add_face_disc:
params += list(self.netD_f.parameters())
if opt.TTUR:
beta1, beta2 = 0, 0.9
lr = opt.lr * 2
else:
beta1, beta2 = opt.beta1, 0.999
lr = opt.lr
self.optimizer_D = torch.optim.Adam(params, lr=lr, betas=(beta1, beta2))
for s in range(opt.n_scales_temporal):
params = list(getattr(self, 'netD_T'+str(s)).parameters())
optimizer_D_T = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
setattr(self, 'optimizer_D_T'+str(s), optimizer_D_T)
def forward(self, scale_T, tensors_list, dummy_bs=0):
lambda_feat = self.opt.lambda_feat
lambda_F = self.opt.lambda_F
lambda_T = self.opt.lambda_T
scale_S = self.opt.n_scales_spatial
tD = self.opt.n_frames_D
if tensors_list[0].get_device() == self.gpu_ids[0]:
tensors_list = util.remove_dummy_from_tensor(tensors_list, dummy_bs)
if tensors_list[0].size(0) == 0:
return [self.Tensor(1, 1).fill_(0)] * (len(self.loss_names_T) if scale_T > 0 else len(self.loss_names))
if scale_T > 0:
real_B, fake_B, flow_ref, conf_ref = tensors_list
_, _, _, self.height, self.width = real_B.size()
loss_D_T_real, loss_D_T_fake, loss_G_T_GAN, loss_G_T_GAN_Feat = self.compute_loss_D_T(real_B, fake_B,
flow_ref/20, conf_ref, scale_T-1)
loss_G_T_Warp = torch.zeros_like(loss_G_T_GAN)
loss_list = [loss_G_T_GAN, loss_G_T_GAN_Feat, loss_D_T_real, loss_D_T_fake, loss_G_T_Warp]
loss_list = [loss.view(-1, 1) for loss in loss_list]
return loss_list
real_B, fake_B, fake_B_raw, real_A, real_B_prev, fake_B_prev, flow, weight, flow_ref, conf_ref = tensors_list
_, _, self.height, self.width = real_B.size()
################### Flow loss #################
if flow is not None:
# similar to flownet flow
loss_F_Flow = self.criterionFlow(flow, flow_ref, conf_ref) * lambda_F / (2 ** (scale_S-1))
# warped prev image should be close to current image
real_B_warp = self.resample(real_B_prev, flow)
loss_F_Warp = self.criterionFlow(real_B_warp, real_B, conf_ref) * lambda_T
################## weight loss ##################
loss_W = torch.zeros_like(weight)
if self.opt.no_first_img:
dummy0 = torch.zeros_like(weight)
loss_W = self.criterionFlow(weight, dummy0, conf_ref)
else:
loss_F_Flow = loss_F_Warp = loss_W = torch.zeros_like(conf_ref)
#################### fake_B loss ####################
### VGG + GAN loss
loss_G_VGG = (self.criterionVGG(fake_B, real_B) * lambda_feat) if not self.opt.no_vgg else torch.zeros_like(loss_W)
loss_D_real, loss_D_fake, loss_G_GAN, loss_G_GAN_Feat = self.compute_loss_D(self.netD, real_A, real_B, fake_B)
### Warp loss
fake_B_warp_ref = self.resample(fake_B_prev, flow_ref)
loss_G_Warp = self.criterionWarp(fake_B, fake_B_warp_ref.detach(), conf_ref) * lambda_T
if fake_B_raw is not None:
if not self.opt.no_vgg:
loss_G_VGG += self.criterionVGG(fake_B_raw, real_B) * lambda_feat
l_D_real, l_D_fake, l_G_GAN, l_G_GAN_Feat = self.compute_loss_D(self.netD, real_A, real_B, fake_B_raw)
loss_G_GAN += l_G_GAN; loss_G_GAN_Feat += l_G_GAN_Feat
loss_D_real += l_D_real; loss_D_fake += l_D_fake
if self.opt.add_face_disc:
face_weight = 2
ys, ye, xs, xe = self.get_face_region(real_A)
if ys is not None:
loss_D_f_real, loss_D_f_fake, loss_G_f_GAN, loss_G_f_GAN_Feat = self.compute_loss_D(self.netD_f,
real_A[:,:,ys:ye,xs:xe], real_B[:,:,ys:ye,xs:xe], fake_B[:,:,ys:ye,xs:xe])
loss_G_f_GAN *= face_weight
loss_G_f_GAN_Feat *= face_weight
else:
loss_D_f_real = loss_D_f_fake = loss_G_f_GAN = loss_G_f_GAN_Feat = torch.zeros_like(loss_D_real)
loss_list = [loss_G_VGG, loss_G_GAN, loss_G_GAN_Feat,
loss_D_real, loss_D_fake,
loss_G_Warp, loss_F_Flow, loss_F_Warp, loss_W]
if self.opt.add_face_disc:
loss_list += [loss_G_f_GAN, loss_G_f_GAN_Feat, loss_D_f_real, loss_D_f_fake]
loss_list = [loss.view(-1, 1) for loss in loss_list]
return loss_list
def compute_loss_D(self, netD, real_A, real_B, fake_B):
real_AB = torch.cat((real_A, real_B), dim=1)
fake_AB = torch.cat((real_A, fake_B), dim=1)
pred_real = netD.forward(real_AB)
pred_fake = netD.forward(fake_AB.detach())
loss_D_real = self.criterionGAN(pred_real, True)
loss_D_fake = self.criterionGAN(pred_fake, False)
pred_fake = netD.forward(fake_AB)
loss_G_GAN, loss_G_GAN_Feat = self.GAN_and_FM_loss(pred_real, pred_fake)
return loss_D_real, loss_D_fake, loss_G_GAN, loss_G_GAN_Feat
def compute_loss_D_T(self, real_B, fake_B, flow_ref, conf_ref, scale_T):
netD_T = getattr(self, 'netD_T'+str(scale_T))
real_B = real_B.view(-1, self.output_nc * self.tD, self.height, self.width)
fake_B = fake_B.view(-1, self.output_nc * self.tD, self.height, self.width)
if flow_ref is not None:
flow_ref = flow_ref.view(-1, 2 * (self.tD-1), self.height, self.width)
real_B = torch.cat([real_B, flow_ref], dim=1)
fake_B = torch.cat([fake_B, flow_ref], dim=1)
pred_real = netD_T.forward(real_B)
pred_fake = netD_T.forward(fake_B.detach())
loss_D_T_real = self.criterionGAN(pred_real, True)
loss_D_T_fake = self.criterionGAN(pred_fake, False)
pred_fake = netD_T.forward(fake_B)
loss_G_T_GAN, loss_G_T_GAN_Feat = self.GAN_and_FM_loss(pred_real, pred_fake)
return loss_D_T_real, loss_D_T_fake, loss_G_T_GAN, loss_G_T_GAN_Feat
def GAN_and_FM_loss(self, pred_real, pred_fake):
### GAN loss
loss_G_GAN = self.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = torch.zeros_like(loss_G_GAN)
if not self.opt.no_ganFeat:
feat_weights = 4.0 / (self.opt.n_layers_D + 1)
D_weights = 1.0 / self.opt.num_D
for i in range(min(len(pred_fake), self.opt.num_D)):
for j in range(len(pred_fake[i])-1):
loss_G_GAN_Feat += D_weights * feat_weights * \
self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat
return loss_G_GAN, loss_G_GAN_Feat
def get_face_region(self, real_A):
_, _, h, w = real_A.size()
if not self.opt.openpose_only:
face = (real_A[:,2] > 0.9).nonzero()
else:
face = ((real_A[:,0] > 0.19) & (real_A[:,0] < 0.21) & (real_A[:,1] < -0.99) & (real_A[:,2] > -0.61) & (real_A[:,2] < -0.59)).nonzero()
if face.size()[0]:
y, x = face[:,1], face[:,2]
ys, ye, xs, xe = y.min().item(), y.max().item(), x.min().item(), x.max().item()
yc, ylen = int(ys+ye)//2, self.opt.fineSize//32*8
xc, xlen = int(xs+xe)//2, self.opt.fineSize//32*8
yc = max(ylen//2, min(h-1 - ylen//2, yc))
xc = max(xlen//2, min(w-1 - xlen//2, xc))
ys, ye, xs, xe = yc - ylen//2, yc + ylen//2, xc - xlen//2, xc + xlen//2
return ys, ye, xs, xe
return None, None, None, None
def get_all_skipped_frames(self, frames_all, real_B, fake_B, flow_ref, conf_ref, t_scales, tD, n_frames_load, i, flowNet):
real_B_all, fake_B_all, flow_ref_all, conf_ref_all = frames_all
if t_scales > 0:
if self.opt.sparse_D:
real_B_all, real_B_skipped = get_skipped_frames_sparse(real_B_all, real_B, t_scales, tD, n_frames_load, i)
fake_B_all, fake_B_skipped = get_skipped_frames_sparse(fake_B_all, fake_B, t_scales, tD, n_frames_load, i)
flow_ref_all, flow_ref_skipped = get_skipped_frames_sparse(flow_ref_all, flow_ref, t_scales, tD, n_frames_load, i, is_flow=True)
conf_ref_all, conf_ref_skipped = get_skipped_frames_sparse(conf_ref_all, conf_ref, t_scales, tD, n_frames_load, i, is_flow=True)
else:
real_B_all, real_B_skipped = get_skipped_frames(real_B_all, real_B, t_scales, tD)
fake_B_all, fake_B_skipped = get_skipped_frames(fake_B_all, fake_B, t_scales, tD)
flow_ref_all, conf_ref_all, flow_ref_skipped, conf_ref_skipped = get_skipped_flows(flowNet,
flow_ref_all, conf_ref_all, real_B_skipped, flow_ref, conf_ref, t_scales, tD)
frames_all = real_B_all, fake_B_all, flow_ref_all, conf_ref_all
frames_skipped = real_B_skipped, fake_B_skipped, flow_ref_skipped, conf_ref_skipped
return frames_all, frames_skipped
def get_losses(self, loss_dict, loss_dict_T, t_scales):
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict['G_GAN_Feat'] + loss_dict['G_VGG']
loss_G += loss_dict['G_Warp'] + loss_dict['F_Flow'] + loss_dict['F_Warp'] + loss_dict['W']
if self.opt.add_face_disc:
loss_G += loss_dict['G_f_GAN'] + loss_dict['G_f_GAN_Feat']
loss_D += (loss_dict['D_f_fake'] + loss_dict['D_f_real']) * 0.5
# collect temporal losses
loss_D_T = []
t_scales_act = min(t_scales, len(loss_dict_T))
for s in range(t_scales_act):
loss_G += loss_dict_T[s]['G_T_GAN'] + loss_dict_T[s]['G_T_GAN_Feat'] + loss_dict_T[s]['G_T_Warp']
loss_D_T.append((loss_dict_T[s]['D_T_fake'] + loss_dict_T[s]['D_T_real']) * 0.5)
return loss_G, loss_D, loss_D_T, t_scales_act
def save(self, label):
self.save_network(self.netD, 'D', label, self.gpu_ids)
for s in range(self.opt.n_scales_temporal):
self.save_network(getattr(self, 'netD_T'+str(s)), 'D_T'+str(s), label, self.gpu_ids)
if self.opt.add_face_disc:
self.save_network(self.netD_f, 'D_f', label, self.gpu_ids)
# get temporally subsampled frames for real/fake sequences
def get_skipped_frames(B_all, B, t_scales, tD):
B_all = torch.cat([B_all.detach(), B], dim=1) if B_all is not None else B
B_skipped = [None] * t_scales
for s in range(t_scales):
tDs = tD ** s # number of skipped frames between neighboring frames (e.g. 1, 3, 9, ...)
span = tDs * (tD-1) # number of frames the final triplet frames span before skipping (e.g., 2, 6, 18, ...)
n_groups = min(B_all.size()[1] - span, B.size()[1])
if n_groups > 0:
for t in range(0, n_groups, tD):
skip = B_all[:, (-span-t-1):-t:tDs].contiguous() if t != 0 else B_all[:, -span-1::tDs].contiguous()
B_skipped[s] = torch.cat([B_skipped[s], skip]) if B_skipped[s] is not None else skip
max_prev_frames = tD ** (t_scales-1) * (tD-1)
if B_all.size()[1] > max_prev_frames:
B_all = B_all[:, -max_prev_frames:]
return B_all, B_skipped
# get temporally subsampled frames for flows
def get_skipped_flows(flowNet, flow_ref_all, conf_ref_all, real_B, flow_ref, conf_ref, t_scales, tD):
flow_ref_skipped, conf_ref_skipped = [None] * t_scales, [None] * t_scales
flow_ref_all, flow = get_skipped_frames(flow_ref_all, flow_ref, 1, tD)
conf_ref_all, conf = get_skipped_frames(conf_ref_all, conf_ref, 1, tD)
if flow[0] is not None:
flow_ref_skipped[0], conf_ref_skipped[0] = flow[0][:,1:], conf[0][:,1:]
for s in range(1, t_scales):
if real_B[s] is not None and real_B[s].size()[1] == tD:
flow_ref_skipped[s], conf_ref_skipped[s] = flowNet(real_B[s][:,1:], real_B[s][:,:-1])
return flow_ref_all, conf_ref_all, flow_ref_skipped, conf_ref_skipped
def get_skipped_frames_sparse(B_all, B, t_scales, tD, n_frames_load, i, is_flow=False):
B_skipped = [None] * t_scales
_, _, ch, h, w = B.size()
for s in range(t_scales):
t_len = B_all[s].size()[1] if B_all[s] is not None else 0
if t_len > 0 and (t_len % tD) == 0:
B_all[s] = B_all[s][:, (-tD+1):] # get rid of unnecessary past frames
if s == 0:
B_all[0] = torch.cat([B_all[0].detach(), B], dim=1) if B_all[0] is not None else B
else:
tDs = tD ** s
idx_start = 0 if i == 0 else tDs - ((i-1) % tDs + 1)
if idx_start < n_frames_load:
tmp = B[:, idx_start::tDs].contiguous()
B_all[s] = torch.cat([B_all[s].detach(), tmp], dim=1) if B_all[s] is not None else tmp
t_len = B_all[s].size()[1] if B_all[s] is not None else 0
if t_len >= tD:
B_all[s] = B_all[s][:, (t_len % tD):]
B_skipped[s] = B_all[s].view(-1, tD, ch, h, w)
if is_flow:
B_skipped[s] = B_skipped[s][:, 1:]
return B_all, B_skipped | vid2vid-master | models/vid2vid_model_D.py |
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .networks.resample2d_package.resample2d import Resample2d
from .networks.channelnorm_package.channelnorm import ChannelNorm
from .networks import FlowNetC
from .networks import FlowNetS
from .networks import FlowNetSD
from .networks import FlowNetFusion
from .networks.submodules import *
'Parameter count = 162,518,834'
class MyDict(dict):
pass
class fp16_resample2d(nn.Module):
def __init__(self):
super(fp16_resample2d, self).__init__()
self.resample = Resample2d()
def forward(self, input1, input2):
return self.resample(input1.float(), input2.float()).half()
class FlowNet2(nn.Module):
def __init__(self, args=None, batchNorm=False, div_flow = 20., fp16=False):
super(FlowNet2,self).__init__()
if args is None:
args = MyDict()
args.rgb_max = 1
args.fp16 = fp16
args.grads = {}
self.fp16 = fp16
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
# Block (FlowNetS2)
self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
# Block (FlowNetSD)
self.flownets_d = FlowNetSD.FlowNetSD(args, batchNorm=self.batchNorm)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
self.upsample4 = nn.Upsample(scale_factor=4, mode='nearest')
self.resample = Resample2d() if not args.fp16 else fp16_resample2d()
# Block (FLowNetFusion)
self.flownetfusion = FlowNetFusion.FlowNetFusion(args, batchNorm=self.batchNorm)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
def init_deconv_bilinear(self, weight):
f_shape = weight.size()
heigh, width = f_shape[-2], f_shape[-1]
f = np.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([heigh, width])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
min_dim = min(f_shape[0], f_shape[1])
weight.data.fill_(0.)
for i in range(min_dim):
weight.data[i,i,:,:] = torch.from_numpy(bilinear)
return
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample(x[:,3:,:,:], flownets1_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample4(flownets2_flow2 * self.div_flow)
norm_flownets2_flow = self.channelnorm(flownets2_flow)
diff_flownets2_flow = self.resample(x[:,3:,:,:], flownets2_flow)
# if not diff_flownets2_flow.volatile:
# diff_flownets2_flow.register_hook(save_grad(self.args.grads, 'diff_flownets2_flow'))
diff_flownets2_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownets2_flow))
# if not diff_flownets2_img1.volatile:
# diff_flownets2_img1.register_hook(save_grad(self.args.grads, 'diff_flownets2_img1'))
# flownetsd
flownetsd_flow2 = self.flownets_d(x)[0]
flownetsd_flow = self.upsample3(flownetsd_flow2 / self.div_flow)
norm_flownetsd_flow = self.channelnorm(flownetsd_flow)
diff_flownetsd_flow = self.resample(x[:,3:,:,:], flownetsd_flow)
# if not diff_flownetsd_flow.volatile:
# diff_flownetsd_flow.register_hook(save_grad(self.args.grads, 'diff_flownetsd_flow'))
diff_flownetsd_img1 = self.channelnorm((x[:,:3,:,:]-diff_flownetsd_flow))
# if not diff_flownetsd_img1.volatile:
# diff_flownetsd_img1.register_hook(save_grad(self.args.grads, 'diff_flownetsd_img1'))
# concat img1 flownetsd, flownets2, norm_flownetsd, norm_flownets2, diff_flownetsd_img1, diff_flownets2_img1
concat3 = torch.cat((x[:,:3,:,:], flownetsd_flow, flownets2_flow, norm_flownetsd_flow, norm_flownets2_flow, diff_flownetsd_img1, diff_flownets2_img1), dim=1)
flownetfusion_flow = self.flownetfusion(concat3)
# if not flownetfusion_flow.volatile:
# flownetfusion_flow.register_hook(save_grad(self.args.grads, 'flownetfusion_flow'))
return flownetfusion_flow
class FlowNet2C(FlowNetC.FlowNetC):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2C,self).__init__(args, batchNorm=batchNorm, div_flow=20)
self.rgb_max = args.rgb_max
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
# FlownetC top input stream
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
# FlownetC bottom input stream
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
# Merge streams
out_corr = self.corr(out_conv3a, out_conv3b) # False
out_corr = self.corr_activation(out_corr)
# Redirect top input stream and concatenate
out_conv_redir = self.conv_redir(out_conv3a)
in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1)
# Merged conv layers
out_conv3_1 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3_1))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2S(FlowNetS.FlowNetS):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2S,self).__init__(args, input_channels = 6, batchNorm=batchNorm)
self.rgb_max = args.rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2SD(FlowNetSD.FlowNetSD):
def __init__(self, args, batchNorm=False, div_flow=20):
super(FlowNet2SD,self).__init__(args, batchNorm=batchNorm)
self.rgb_max = args.rgb_max
self.div_flow = div_flow
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x = torch.cat( (x[:,:,0,:,:], x[:,:,1,:,:]), dim = 1)
out_conv0 = self.conv0(x)
out_conv1 = self.conv1_1(self.conv1(out_conv0))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
out_interconv5 = self.inter_conv5(concat5)
flow5 = self.predict_flow5(out_interconv5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
out_interconv4 = self.inter_conv4(concat4)
flow4 = self.predict_flow4(out_interconv4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
out_interconv3 = self.inter_conv3(concat3)
flow3 = self.predict_flow3(out_interconv3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
out_interconv2 = self.inter_conv2(concat2)
flow2 = self.predict_flow2(out_interconv2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return self.upsample1(flow2*self.div_flow)
class FlowNet2CS(nn.Module):
def __init__(self, args, batchNorm=False, div_flow = 20.):
super(FlowNet2CS,self).__init__()
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample1 = Resample2d() if not args.fp16 else fp16_resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
# init_deconv_bilinear(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
return flownets1_flow
class FlowNet2CSS(nn.Module):
def __init__(self, args, batchNorm=False, div_flow = 20.):
super(FlowNet2CSS,self).__init__()
self.batchNorm = batchNorm
self.div_flow = div_flow
self.rgb_max = args.rgb_max
self.args = args
self.channelnorm = ChannelNorm()
# First Block (FlowNetC)
self.flownetc = FlowNetC.FlowNetC(args, batchNorm=self.batchNorm)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample1 = Resample2d() if not args.fp16 else fp16_resample2d()
# Block (FlowNetS1)
self.flownets_1 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample2 = nn.Upsample(scale_factor=4, mode='bilinear')
self.resample2 = Resample2d() if not args.fp16 else fp16_resample2d()
# Block (FlowNetS2)
self.flownets_2 = FlowNetS.FlowNetS(args, batchNorm=self.batchNorm)
self.upsample3 = nn.Upsample(scale_factor=4, mode='nearest')
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform(m.bias)
init.xavier_uniform(m.weight)
# init_deconv_bilinear(m.weight)
def forward(self, inputs):
rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,1,))
x = (inputs - rgb_mean) / self.rgb_max
x1 = x[:,:,0,:,:]
x2 = x[:,:,1,:,:]
x = torch.cat((x1,x2), dim = 1)
# flownetc
flownetc_flow2 = self.flownetc(x)[0]
flownetc_flow = self.upsample1(flownetc_flow2*self.div_flow)
# warp img1 to img0; magnitude of diff between img0 and and warped_img1,
resampled_img1 = self.resample1(x[:,3:,:,:], flownetc_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag ;
concat1 = torch.cat((x, resampled_img1, flownetc_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets1
flownets1_flow2 = self.flownets_1(concat1)[0]
flownets1_flow = self.upsample2(flownets1_flow2*self.div_flow)
# warp img1 to img0 using flownets1; magnitude of diff between img0 and and warped_img1
resampled_img1 = self.resample2(x[:,3:,:,:], flownets1_flow)
diff_img0 = x[:,:3,:,:] - resampled_img1
norm_diff_img0 = self.channelnorm(diff_img0)
# concat img0, img1, img1->img0, flow, diff-mag
concat2 = torch.cat((x, resampled_img1, flownets1_flow/self.div_flow, norm_diff_img0), dim=1)
# flownets2
flownets2_flow2 = self.flownets_2(concat2)[0]
flownets2_flow = self.upsample3(flownets2_flow2 * self.div_flow)
return flownets2_flow
| vid2vid-master | models/flownet2_pytorch/models.py |
#!/usr/bin/env python2.7
import caffe
from caffe.proto import caffe_pb2
import sys, os
import torch
import torch.nn as nn
import argparse, tempfile
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('caffe_model', help='input model in hdf5 or caffemodel format')
parser.add_argument('prototxt_template',help='prototxt template')
parser.add_argument('flownet2_pytorch', help='path to flownet2-pytorch')
args = parser.parse_args()
args.rgb_max = 255
args.fp16 = False
args.grads = {}
# load models
sys.path.append(args.flownet2_pytorch)
import models
from utils.param_utils import *
width = 256
height = 256
keys = {'TARGET_WIDTH': width,
'TARGET_HEIGHT': height,
'ADAPTED_WIDTH':width,
'ADAPTED_HEIGHT':height,
'SCALE_WIDTH':1.,
'SCALE_HEIGHT':1.,}
template = '\n'.join(np.loadtxt(args.prototxt_template, dtype=str, delimiter='\n'))
for k in keys:
template = template.replace('$%s$'%(k),str(keys[k]))
prototxt = tempfile.NamedTemporaryFile(mode='w', delete=True)
prototxt.write(template)
prototxt.flush()
net = caffe.Net(prototxt.name, args.caffe_model, caffe.TEST)
weights = {}
biases = {}
for k, v in list(net.params.items()):
weights[k] = np.array(v[0].data).reshape(v[0].data.shape)
biases[k] = np.array(v[1].data).reshape(v[1].data.shape)
print((k, weights[k].shape, biases[k].shape))
if 'FlowNet2/' in args.caffe_model:
model = models.FlowNet2(args)
parse_flownetc(model.flownetc.modules(), weights, biases)
parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')
parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')
parse_flownetsd(model.flownets_d.modules(), weights, biases, param_prefix='netsd_')
parse_flownetfusion(model.flownetfusion.modules(), weights, biases, param_prefix='fuse_')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2_checkpoint.pth.tar'))
elif 'FlowNet2-C/' in args.caffe_model:
model = models.FlowNet2C(args)
parse_flownetc(model.modules(), weights, biases)
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-C_checkpoint.pth.tar'))
elif 'FlowNet2-CS/' in args.caffe_model:
model = models.FlowNet2CS(args)
parse_flownetc(model.flownetc.modules(), weights, biases)
parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CS_checkpoint.pth.tar'))
elif 'FlowNet2-CSS/' in args.caffe_model:
model = models.FlowNet2CSS(args)
parse_flownetc(model.flownetc.modules(), weights, biases)
parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')
parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS_checkpoint.pth.tar'))
elif 'FlowNet2-CSS-ft-sd/' in args.caffe_model:
model = models.FlowNet2CSS(args)
parse_flownetc(model.flownetc.modules(), weights, biases)
parse_flownets(model.flownets_1.modules(), weights, biases, param_prefix='net2_')
parse_flownets(model.flownets_2.modules(), weights, biases, param_prefix='net3_')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-CSS-ft-sd_checkpoint.pth.tar'))
elif 'FlowNet2-S/' in args.caffe_model:
model = models.FlowNet2S(args)
parse_flownetsonly(model.modules(), weights, biases, param_prefix='')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-S_checkpoint.pth.tar'))
elif 'FlowNet2-SD/' in args.caffe_model:
model = models.FlowNet2SD(args)
parse_flownetsd(model.modules(), weights, biases, param_prefix='')
state = {'epoch': 0,
'state_dict': model.state_dict(),
'best_EPE': 1e10}
torch.save(state, os.path.join(args.flownet2_pytorch, 'FlowNet2-SD_checkpoint.pth.tar'))
else:
print(('model type cound not be determined from input caffe model %s'%(args.caffe_model)))
quit()
print(("done converting ", args.caffe_model)) | vid2vid-master | models/flownet2_pytorch/convert.py |
import torch
import torch.utils.data as data
import os, math, random
from os.path import *
import numpy as np
from glob import glob
import utils.frame_utils as frame_utils
from scipy.misc import imread, imresize
class StaticRandomCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
h, w = image_size
self.h1 = random.randint(0, h - self.th)
self.w1 = random.randint(0, w - self.tw)
def __call__(self, img):
return img[self.h1:(self.h1+self.th), self.w1:(self.w1+self.tw),:]
class StaticCenterCrop(object):
def __init__(self, image_size, crop_size):
self.th, self.tw = crop_size
self.h, self.w = image_size
def __call__(self, img):
return img[(self.h-self.th)//2:(self.h+self.th)//2, (self.w-self.tw)//2:(self.w+self.tw)//2,:]
class MpiSintel(data.Dataset):
def __init__(self, args, is_cropped = False, root = '', dstype = 'clean', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
flow_root = join(root, 'flow')
image_root = join(root, dstype)
file_list = sorted(glob(join(flow_root, '*/*.flo')))
self.flow_list = []
self.image_list = []
for file in file_list:
if 'test' in file:
# print file
continue
fbase = file[len(flow_root)+1:]
fprefix = fbase[:-8]
fnum = int(fbase[-8:-4])
img1 = join(image_root, fprefix + "%04d"%(fnum+0) + '.png')
img2 = join(image_root, fprefix + "%04d"%(fnum+1) + '.png')
if not isfile(img1) or not isfile(img2) or not isfile(file):
continue
self.image_list += [[img1, img2]]
self.flow_list += [file]
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
assert (len(self.image_list) == len(self.flow_list))
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class MpiSintelClean(MpiSintel):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(MpiSintelClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'clean', replicates = replicates)
class MpiSintelFinal(MpiSintel):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(MpiSintelFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'final', replicates = replicates)
class FlyingChairs(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/FlyingChairs_release/data', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
images = sorted( glob( join(root, '*.ppm') ) )
self.flow_list = sorted( glob( join(root, '*.flo') ) )
assert (len(images)//2 == len(self.flow_list))
self.image_list = []
for i in range(len(self.flow_list)):
im1 = images[2*i]
im2 = images[2*i + 1]
self.image_list += [ [ im1, im2 ] ]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class FlyingThings(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/flyingthings3d', dstype = 'frames_cleanpass', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
image_dirs = sorted(glob(join(root, dstype, 'TRAIN/*/*')))
image_dirs = sorted([join(f, 'left') for f in image_dirs] + [join(f, 'right') for f in image_dirs])
flow_dirs = sorted(glob(join(root, 'optical_flow_flo_format/TRAIN/*/*')))
flow_dirs = sorted([join(f, 'into_future/left') for f in flow_dirs] + [join(f, 'into_future/right') for f in flow_dirs])
assert (len(image_dirs) == len(flow_dirs))
self.image_list = []
self.flow_list = []
for idir, fdir in zip(image_dirs, flow_dirs):
images = sorted( glob(join(idir, '*.png')) )
flows = sorted( glob(join(fdir, '*.flo')) )
for i in range(len(flows)):
self.image_list += [ [ images[i], images[i+1] ] ]
self.flow_list += [flows[i]]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class FlyingThingsClean(FlyingThings):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(FlyingThingsClean, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_cleanpass', replicates = replicates)
class FlyingThingsFinal(FlyingThings):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(FlyingThingsFinal, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'frames_finalpass', replicates = replicates)
class ChairsSDHom(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/chairssdhom/data', dstype = 'train', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
image1 = sorted( glob( join(root, dstype, 't0/*.png') ) )
image2 = sorted( glob( join(root, dstype, 't1/*.png') ) )
self.flow_list = sorted( glob( join(root, dstype, 'flow/*.flo') ) )
assert (len(image1) == len(self.flow_list))
self.image_list = []
for i in range(len(self.flow_list)):
im1 = image1[i]
im2 = image2[i]
self.image_list += [ [ im1, im2 ] ]
assert len(self.image_list) == len(self.flow_list)
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
flow = frame_utils.read_gen(self.flow_list[index])
flow = flow[::-1,:,:]
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
flow = cropper(flow)
images = np.array(images).transpose(3,0,1,2)
flow = flow.transpose(2,0,1)
images = torch.from_numpy(images.astype(np.float32))
flow = torch.from_numpy(flow.astype(np.float32))
return [images], [flow]
def __len__(self):
return self.size * self.replicates
class ChairsSDHomTrain(ChairsSDHom):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(ChairsSDHomTrain, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'train', replicates = replicates)
class ChairsSDHomTest(ChairsSDHom):
def __init__(self, args, is_cropped = False, root = '', replicates = 1):
super(ChairsSDHomTest, self).__init__(args, is_cropped = is_cropped, root = root, dstype = 'test', replicates = replicates)
class ImagesFromFolder(data.Dataset):
def __init__(self, args, is_cropped, root = '/path/to/frames/only/folder', iext = 'png', replicates = 1):
self.args = args
self.is_cropped = is_cropped
self.crop_size = args.crop_size
self.render_size = args.inference_size
self.replicates = replicates
images = sorted( glob( join(root, '*.' + iext) ) )
self.image_list = []
for i in range(len(images)-1):
im1 = images[i]
im2 = images[i+1]
self.image_list += [ [ im1, im2 ] ]
self.size = len(self.image_list)
self.frame_size = frame_utils.read_gen(self.image_list[0][0]).shape
if (self.render_size[0] < 0) or (self.render_size[1] < 0) or (self.frame_size[0]%64) or (self.frame_size[1]%64):
self.render_size[0] = ( (self.frame_size[0])//64 ) * 64
self.render_size[1] = ( (self.frame_size[1])//64 ) * 64
args.inference_size = self.render_size
def __getitem__(self, index):
index = index % self.size
img1 = frame_utils.read_gen(self.image_list[index][0])
img2 = frame_utils.read_gen(self.image_list[index][1])
images = [img1, img2]
image_size = img1.shape[:2]
if self.is_cropped:
cropper = StaticRandomCrop(image_size, self.crop_size)
else:
cropper = StaticCenterCrop(image_size, self.render_size)
images = list(map(cropper, images))
images = np.array(images).transpose(3,0,1,2)
images = torch.from_numpy(images.astype(np.float32))
return [images], [torch.zeros(images.size()[0:1] + (2,) + images.size()[-2:])]
def __len__(self):
return self.size * self.replicates
'''
import argparse
import sys, os
import importlib
from scipy.misc import imsave
import numpy as np
import datasets
reload(datasets)
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.inference_size = [1080, 1920]
args.crop_size = [384, 512]
args.effective_batch_size = 1
index = 500
v_dataset = datasets.MpiSintelClean(args, True, root='../MPI-Sintel/flow/training')
a, b = v_dataset[index]
im1 = a[0].numpy()[:,0,:,:].transpose(1,2,0)
im2 = a[0].numpy()[:,1,:,:].transpose(1,2,0)
imsave('./img1.png', im1)
imsave('./img2.png', im2)
flow_utils.writeFlow('./flow.flo', b[0].numpy().transpose(1,2,0))
'''
| vid2vid-master | models/flownet2_pytorch/datasets.py |
vid2vid-master | models/flownet2_pytorch/__init__.py |
|
'''
Portions of this code copyright 2017, Clement Pinard
'''
# freda (todo) : adversarial loss
import torch
import torch.nn as nn
import math
def EPE(input_flow, target_flow):
return torch.norm(target_flow-input_flow,p=2,dim=1).mean()
class L1(nn.Module):
def __init__(self):
super(L1, self).__init__()
def forward(self, output, target):
lossvalue = torch.abs(output - target).mean()
return lossvalue
class L2(nn.Module):
def __init__(self):
super(L2, self).__init__()
def forward(self, output, target):
lossvalue = torch.norm(output-target,p=2,dim=1).mean()
return lossvalue
class L1Loss(nn.Module):
def __init__(self, args):
super(L1Loss, self).__init__()
self.args = args
self.loss = L1()
self.loss_labels = ['L1', 'EPE']
def forward(self, output, target):
lossvalue = self.loss(output, target)
epevalue = EPE(output, target)
return [lossvalue, epevalue]
class L2Loss(nn.Module):
def __init__(self, args):
super(L2Loss, self).__init__()
self.args = args
self.loss = L2()
self.loss_labels = ['L2', 'EPE']
def forward(self, output, target):
lossvalue = self.loss(output, target)
epevalue = EPE(output, target)
return [lossvalue, epevalue]
class MultiScale(nn.Module):
def __init__(self, args, startScale = 4, numScales = 5, l_weight= 0.32, norm= 'L1'):
super(MultiScale,self).__init__()
self.startScale = startScale
self.numScales = numScales
self.loss_weights = torch.FloatTensor([(l_weight / 2 ** scale) for scale in range(self.numScales)])
self.args = args
self.l_type = norm
self.div_flow = 0.05
assert(len(self.loss_weights) == self.numScales)
if self.l_type == 'L1':
self.loss = L1()
else:
self.loss = L2()
self.multiScales = [nn.AvgPool2d(self.startScale * (2**scale), self.startScale * (2**scale)) for scale in range(self.numScales)]
self.loss_labels = ['MultiScale-'+self.l_type, 'EPE'],
def forward(self, output, target):
lossvalue = 0
epevalue = 0
if type(output) is tuple:
target = self.div_flow * target
for i, output_ in enumerate(output):
target_ = self.multiScales[i](target)
epevalue += self.loss_weights[i]*EPE(output_, target_)
lossvalue += self.loss_weights[i]*self.loss(output_, target_)
return [lossvalue, epevalue]
else:
epevalue += EPE(output, target)
lossvalue += self.loss(output, target)
return [lossvalue, epevalue]
| vid2vid-master | models/flownet2_pytorch/losses.py |
#!/usr/bin/env python
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
from tensorboardX import SummaryWriter
import argparse, os, sys, subprocess
import setproctitle, colorama
import numpy as np
from tqdm import tqdm
from glob import glob
from os.path import *
import models, losses, datasets
from utils import flow_utils, tools
# fp32 copy of parameters for update
global param_copy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start_epoch', type=int, default=1)
parser.add_argument('--total_epochs', type=int, default=10000)
parser.add_argument('--batch_size', '-b', type=int, default=8, help="Batch size")
parser.add_argument('--train_n_batches', type=int, default = -1, help='Number of min-batches per epoch. If < 0, it will be determined by training_dataloader')
parser.add_argument('--crop_size', type=int, nargs='+', default = [256, 256], help="Spatial dimension to crop training samples for training")
parser.add_argument('--gradient_clip', type=float, default=None)
parser.add_argument('--schedule_lr_frequency', type=int, default=0, help='in number of iterations (0 for no schedule)')
parser.add_argument('--schedule_lr_fraction', type=float, default=10)
parser.add_argument("--rgb_max", type=float, default = 255.)
parser.add_argument('--number_workers', '-nw', '--num_workers', type=int, default=8)
parser.add_argument('--number_gpus', '-ng', type=int, default=-1, help='number of GPUs to use')
parser.add_argument('--no_cuda', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--name', default='run', type=str, help='a name to append to the save directory')
parser.add_argument('--save', '-s', default='./work', type=str, help='directory for saving')
parser.add_argument('--validation_frequency', type=int, default=5, help='validate every n epochs')
parser.add_argument('--validation_n_batches', type=int, default=-1)
parser.add_argument('--render_validation', action='store_true', help='run inference (save flows to file) and every validation_frequency epoch')
parser.add_argument('--inference', action='store_true')
parser.add_argument('--inference_size', type=int, nargs='+', default = [-1,-1], help='spatial size divisible by 64. default (-1,-1) - largest possible valid size would be used')
parser.add_argument('--inference_batch_size', type=int, default=1)
parser.add_argument('--inference_n_batches', type=int, default=-1)
parser.add_argument('--save_flow', action='store_true', help='save predicted flows to file')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--log_frequency', '--summ_iter', type=int, default=1, help="Log every n batches")
parser.add_argument('--skip_training', action='store_true')
parser.add_argument('--skip_validation', action='store_true')
parser.add_argument('--fp16', action='store_true', help='Run model in pseudo-fp16 mode (fp16 storage fp32 math).')
parser.add_argument('--fp16_scale', type=float, default=1024., help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
tools.add_arguments_for_module(parser, models, argument_for_class='model', default='FlowNet2')
tools.add_arguments_for_module(parser, losses, argument_for_class='loss', default='L1Loss')
tools.add_arguments_for_module(parser, torch.optim, argument_for_class='optimizer', default='Adam', skip_params=['params'])
tools.add_arguments_for_module(parser, datasets, argument_for_class='training_dataset', default='MpiSintelFinal',
skip_params=['is_cropped'],
parameter_defaults={'root': './MPI-Sintel/flow/training'})
tools.add_arguments_for_module(parser, datasets, argument_for_class='validation_dataset', default='MpiSintelClean',
skip_params=['is_cropped'],
parameter_defaults={'root': './MPI-Sintel/flow/training',
'replicates': 1})
tools.add_arguments_for_module(parser, datasets, argument_for_class='inference_dataset', default='MpiSintelClean',
skip_params=['is_cropped'],
parameter_defaults={'root': './MPI-Sintel/flow/training',
'replicates': 1})
main_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(main_dir)
# Parse the official arguments
with tools.TimerBlock("Parsing Arguments") as block:
args = parser.parse_args()
if args.number_gpus < 0 : args.number_gpus = torch.cuda.device_count()
# Get argument defaults (hastag #thisisahack)
parser.add_argument('--IGNORE', action='store_true')
defaults = vars(parser.parse_args(['--IGNORE']))
# Print all arguments, color the non-defaults
for argument, value in sorted(vars(args).items()):
reset = colorama.Style.RESET_ALL
color = reset if value == defaults[argument] else colorama.Fore.MAGENTA
block.log('{}{}: {}{}'.format(color, argument, value, reset))
args.model_class = tools.module_to_dict(models)[args.model]
args.optimizer_class = tools.module_to_dict(torch.optim)[args.optimizer]
args.loss_class = tools.module_to_dict(losses)[args.loss]
args.training_dataset_class = tools.module_to_dict(datasets)[args.training_dataset]
args.validation_dataset_class = tools.module_to_dict(datasets)[args.validation_dataset]
args.inference_dataset_class = tools.module_to_dict(datasets)[args.inference_dataset]
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.current_hash = subprocess.check_output(["git", "rev-parse", "HEAD"]).rstrip()
args.log_file = join(args.save, 'args.txt')
# dict to collect activation gradients (for training debug purpose)
args.grads = {}
if args.inference:
args.skip_validation = True
args.skip_training = True
args.total_epochs = 1
args.inference_dir = "{}/inference".format(args.save)
print('Source Code')
print((' Current Git Hash: {}\n'.format(args.current_hash)))
# Change the title for `top` and `pkill` commands
setproctitle.setproctitle(args.save)
# Dynamically load the dataset class with parameters passed in via "--argument_[param]=[value]" arguments
with tools.TimerBlock("Initializing Datasets") as block:
args.effective_batch_size = args.batch_size * args.number_gpus
args.effective_inference_batch_size = args.inference_batch_size * args.number_gpus
args.effective_number_workers = args.number_workers * args.number_gpus
gpuargs = {'num_workers': args.effective_number_workers,
'pin_memory': True,
'drop_last' : True} if args.cuda else {}
inf_gpuargs = gpuargs.copy()
inf_gpuargs['num_workers'] = args.number_workers
if exists(args.training_dataset_root):
train_dataset = args.training_dataset_class(args, True, **tools.kwargs_from_args(args, 'training_dataset'))
block.log('Training Dataset: {}'.format(args.training_dataset))
block.log('Training Input: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][0]])))
block.log('Training Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in train_dataset[0][1]])))
train_loader = DataLoader(train_dataset, batch_size=args.effective_batch_size, shuffle=True, **gpuargs)
if exists(args.validation_dataset_root):
validation_dataset = args.validation_dataset_class(args, True, **tools.kwargs_from_args(args, 'validation_dataset'))
block.log('Validation Dataset: {}'.format(args.validation_dataset))
block.log('Validation Input: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][0]])))
block.log('Validation Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in validation_dataset[0][1]])))
validation_loader = DataLoader(validation_dataset, batch_size=args.effective_batch_size, shuffle=False, **gpuargs)
if exists(args.inference_dataset_root):
inference_dataset = args.inference_dataset_class(args, False, **tools.kwargs_from_args(args, 'inference_dataset'))
block.log('Inference Dataset: {}'.format(args.inference_dataset))
block.log('Inference Input: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][0]])))
block.log('Inference Targets: {}'.format(' '.join([str([d for d in x.size()]) for x in inference_dataset[0][1]])))
inference_loader = DataLoader(inference_dataset, batch_size=args.effective_inference_batch_size, shuffle=False, **inf_gpuargs)
# Dynamically load model and loss class with parameters passed in via "--model_[param]=[value]" or "--loss_[param]=[value]" arguments
with tools.TimerBlock("Building {} model".format(args.model)) as block:
class ModelAndLoss(nn.Module):
def __init__(self, args):
super(ModelAndLoss, self).__init__()
kwargs = tools.kwargs_from_args(args, 'model')
self.model = args.model_class(args, **kwargs)
kwargs = tools.kwargs_from_args(args, 'loss')
self.loss = args.loss_class(args, **kwargs)
def forward(self, data, target, inference=False ):
output = self.model(data)
loss_values = self.loss(output, target)
if not inference :
return loss_values
else :
return loss_values, output
model_and_loss = ModelAndLoss(args)
block.log('Effective Batch Size: {}'.format(args.effective_batch_size))
block.log('Number of parameters: {}'.format(sum([p.data.nelement() if p.requires_grad else 0 for p in model_and_loss.parameters()])))
# assing to cuda or wrap with dataparallel, model and loss
if args.cuda and (args.number_gpus > 0) and args.fp16:
block.log('Parallelizing')
model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))
block.log('Initializing CUDA')
model_and_loss = model_and_loss.cuda().half()
torch.cuda.manual_seed(args.seed)
param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model_and_loss.parameters()]
elif args.cuda and args.number_gpus > 0:
block.log('Initializing CUDA')
model_and_loss = model_and_loss.cuda()
block.log('Parallelizing')
model_and_loss = nn.parallel.DataParallel(model_and_loss, device_ids=list(range(args.number_gpus)))
torch.cuda.manual_seed(args.seed)
else:
block.log('CUDA not being used')
torch.manual_seed(args.seed)
# Load weights if needed, otherwise randomly initialize
if args.resume and os.path.isfile(args.resume):
block.log("Loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
if not args.inference:
args.start_epoch = checkpoint['epoch']
best_err = checkpoint['best_EPE']
model_and_loss.module.model.load_state_dict(checkpoint['state_dict'])
block.log("Loaded checkpoint '{}' (at epoch {})".format(args.resume, checkpoint['epoch']))
elif args.resume and args.inference:
block.log("No checkpoint found at '{}'".format(args.resume))
quit()
else:
block.log("Random initialization")
block.log("Initializing save directory: {}".format(args.save))
if not os.path.exists(args.save):
os.makedirs(args.save)
train_logger = SummaryWriter(log_dir = os.path.join(args.save, 'train'), comment = 'training')
validation_logger = SummaryWriter(log_dir = os.path.join(args.save, 'validation'), comment = 'validation')
# Dynamically load the optimizer with parameters passed in via "--optimizer_[param]=[value]" arguments
with tools.TimerBlock("Initializing {} Optimizer".format(args.optimizer)) as block:
kwargs = tools.kwargs_from_args(args, 'optimizer')
if args.fp16:
optimizer = args.optimizer_class([p for p in param_copy if p.requires_grad], **kwargs)
else:
optimizer = args.optimizer_class([p for p in model_and_loss.parameters() if p.requires_grad], **kwargs)
for param, default in list(kwargs.items()):
block.log("{} = {} ({})".format(param, default, type(default)))
# Log all arguments to file
for argument, value in sorted(vars(args).items()):
block.log2file(args.log_file, '{}: {}'.format(argument, value))
# Reusable function for training and validataion
def train(args, epoch, start_iteration, data_loader, model, optimizer, logger, is_validate=False, offset=0):
statistics = []
total_loss = 0
if is_validate:
model.eval()
title = 'Validating Epoch {}'.format(epoch)
args.validation_n_batches = np.inf if args.validation_n_batches < 0 else args.validation_n_batches
progress = tqdm(tools.IteratorTimer(data_loader), ncols=100, total=np.minimum(len(data_loader), args.validation_n_batches), leave=True, position=offset, desc=title)
else:
model.train()
title = 'Training Epoch {}'.format(epoch)
args.train_n_batches = np.inf if args.train_n_batches < 0 else args.train_n_batches
progress = tqdm(tools.IteratorTimer(data_loader), ncols=120, total=np.minimum(len(data_loader), args.train_n_batches), smoothing=.9, miniters=1, leave=True, position=offset, desc=title)
last_log_time = progress._time()
for batch_idx, (data, target) in enumerate(progress):
data, target = [Variable(d) for d in data], [Variable(t) for t in target]
if args.cuda and args.number_gpus == 1:
data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target]
optimizer.zero_grad() if not is_validate else None
losses = model(data[0], target[0])
losses = [torch.mean(loss_value) for loss_value in losses]
loss_val = losses[0] # Collect first loss for weight update
total_loss += loss_val.data[0]
loss_values = [v.data[0] for v in losses]
# gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
loss_labels = list(model.module.loss.loss_labels)
assert not np.isnan(total_loss)
if not is_validate and args.fp16:
loss_val.backward()
if args.gradient_clip:
torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)
params = list(model.parameters())
for i in range(len(params)):
param_copy[i].grad = params[i].grad.clone().type_as(params[i]).detach()
param_copy[i].grad.mul_(1./args.loss_scale)
optimizer.step()
for i in range(len(params)):
params[i].data.copy_(param_copy[i].data)
elif not is_validate:
loss_val.backward()
if args.gradient_clip:
torch.nn.utils.clip_grad_norm(model.parameters(), args.gradient_clip)
optimizer.step()
# Update hyperparameters if needed
global_iteration = start_iteration + batch_idx
if not is_validate:
tools.update_hyperparameter_schedule(args, epoch, global_iteration, optimizer)
loss_labels.append('lr')
loss_values.append(optimizer.param_groups[0]['lr'])
loss_labels.append('load')
loss_values.append(progress.iterable.last_duration)
# Print out statistics
statistics.append(loss_values)
title = '{} Epoch {}'.format('Validating' if is_validate else 'Training', epoch)
progress.set_description(title + ' ' + tools.format_dictionary_of_losses(loss_labels, statistics[-1]))
if ((((global_iteration + 1) % args.log_frequency) == 0 and not is_validate) or
(is_validate and batch_idx == args.validation_n_batches - 1)):
global_iteration = global_iteration if not is_validate else start_iteration
logger.add_scalar('batch logs per second', len(statistics) / (progress._time() - last_log_time), global_iteration)
last_log_time = progress._time()
all_losses = np.array(statistics)
for i, key in enumerate(loss_labels):
logger.add_scalar('average batch ' + str(key), all_losses[:, i].mean(), global_iteration)
logger.add_histogram(str(key), all_losses[:, i], global_iteration)
# Reset Summary
statistics = []
if ( is_validate and ( batch_idx == args.validation_n_batches) ):
break
if ( (not is_validate) and (batch_idx == (args.train_n_batches)) ):
break
progress.close()
return total_loss / float(batch_idx + 1), (batch_idx + 1)
# Reusable function for inference
def inference(args, epoch, data_loader, model, offset=0):
model.eval()
if args.save_flow or args.render_validation:
flow_folder = "{}/inference/{}.epoch-{}-flow-field".format(args.save,args.name.replace('/', '.'),epoch)
if not os.path.exists(flow_folder):
os.makedirs(flow_folder)
args.inference_n_batches = np.inf if args.inference_n_batches < 0 else args.inference_n_batches
progress = tqdm(data_loader, ncols=100, total=np.minimum(len(data_loader), args.inference_n_batches), desc='Inferencing ',
leave=True, position=offset)
statistics = []
total_loss = 0
for batch_idx, (data, target) in enumerate(progress):
if args.cuda:
data, target = [d.cuda(async=True) for d in data], [t.cuda(async=True) for t in target]
data, target = [Variable(d) for d in data], [Variable(t) for t in target]
# when ground-truth flows are not available for inference_dataset,
# the targets are set to all zeros. thus, losses are actually L1 or L2 norms of compute optical flows,
# depending on the type of loss norm passed in
with torch.no_grad():
losses, output = model(data[0], target[0], inference=True)
losses = [torch.mean(loss_value) for loss_value in losses]
loss_val = losses[0] # Collect first loss for weight update
total_loss += loss_val.data[0]
loss_values = [v.data[0] for v in losses]
# gather loss_labels, direct return leads to recursion limit error as it looks for variables to gather'
loss_labels = list(model.module.loss.loss_labels)
statistics.append(loss_values)
# import IPython; IPython.embed()
if args.save_flow or args.render_validation:
for i in range(args.inference_batch_size):
_pflow = output[i].data.cpu().numpy().transpose(1, 2, 0)
flow_utils.writeFlow( join(flow_folder, '%06d.flo'%(batch_idx * args.inference_batch_size + i)), _pflow)
progress.set_description('Inference Averages for Epoch {}: '.format(epoch) + tools.format_dictionary_of_losses(loss_labels, np.array(statistics).mean(axis=0)))
progress.update(1)
if batch_idx == (args.inference_n_batches - 1):
break
progress.close()
return
# Primary epoch loop
best_err = 1e8
progress = tqdm(list(range(args.start_epoch, args.total_epochs + 1)), miniters=1, ncols=100, desc='Overall Progress', leave=True, position=0)
offset = 1
last_epoch_time = progress._time()
global_iteration = 0
for epoch in progress:
if args.inference or (args.render_validation and ((epoch - 1) % args.validation_frequency) == 0):
stats = inference(args=args, epoch=epoch - 1, data_loader=inference_loader, model=model_and_loss, offset=offset)
offset += 1
if not args.skip_validation and ((epoch - 1) % args.validation_frequency) == 0:
validation_loss, _ = train(args=args, epoch=epoch - 1, start_iteration=global_iteration, data_loader=validation_loader, model=model_and_loss, optimizer=optimizer, logger=validation_logger, is_validate=True, offset=offset)
offset += 1
is_best = False
if validation_loss < best_err:
best_err = validation_loss
is_best = True
checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset)
tools.save_checkpoint({ 'arch' : args.model,
'epoch': epoch,
'state_dict': model_and_loss.module.model.state_dict(),
'best_EPE': best_err},
is_best, args.save, args.model)
checkpoint_progress.update(1)
checkpoint_progress.close()
offset += 1
if not args.skip_training:
train_loss, iterations = train(args=args, epoch=epoch, start_iteration=global_iteration, data_loader=train_loader, model=model_and_loss, optimizer=optimizer, logger=train_logger, offset=offset)
global_iteration += iterations
offset += 1
# save checkpoint after every validation_frequency number of epochs
if ((epoch - 1) % args.validation_frequency) == 0:
checkpoint_progress = tqdm(ncols=100, desc='Saving Checkpoint', position=offset)
tools.save_checkpoint({ 'arch' : args.model,
'epoch': epoch,
'state_dict': model_and_loss.module.model.state_dict(),
'best_EPE': train_loss},
False, args.save, args.model, filename = 'train-checkpoint.pth.tar')
checkpoint_progress.update(1)
checkpoint_progress.close()
train_logger.add_scalar('seconds per epoch', progress._time() - last_epoch_time, epoch)
last_epoch_time = progress._time()
print("\n")
| vid2vid-master | models/flownet2_pytorch/main.py |
import numpy as np
TAG_CHAR = np.array([202021.25], np.float32)
def readFlow(fn):
""" Read .flo file in Middlebury format"""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2*int(w)*int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def writeFlow(filename,uv,v=None):
""" Write optical flow to file.
If v is None, uv is assumed to contain both u and v channels,
stacked in depth.
Original code by Deqing Sun, adapted from Daniel Scharstein.
"""
nBands = 2
if v is None:
assert(uv.ndim == 3)
assert(uv.shape[2] == 2)
u = uv[:,:,0]
v = uv[:,:,1]
else:
u = uv
assert(u.shape == v.shape)
height,width = u.shape
f = open(filename,'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width*nBands))
tmp[:,np.arange(width)*2] = u
tmp[:,np.arange(width)*2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
| vid2vid-master | models/flownet2_pytorch/utils/flow_utils.py |
# freda (todo) :
import os, time, sys, math
import subprocess, shutil
from os.path import *
import numpy as np
from inspect import isclass
from pytz import timezone
from datetime import datetime
import inspect
import torch
def datestr():
pacific = timezone('US/Pacific')
now = datetime.now(pacific)
return '{}{:02}{:02}_{:02}{:02}'.format(now.year, now.month, now.day, now.hour, now.minute)
def module_to_dict(module, exclude=[]):
return dict([(x, getattr(module, x)) for x in dir(module)
if isclass(getattr(module, x))
and x not in exclude
and getattr(module, x) not in exclude])
class TimerBlock:
def __init__(self, title):
print(("{}".format(title)))
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.end = time.clock()
self.interval = self.end - self.start
if exc_type is not None:
self.log("Operation failed\n")
else:
self.log("Operation finished\n")
def log(self, string):
duration = time.clock() - self.start
units = 's'
if duration > 60:
duration = duration / 60.
units = 'm'
print((" [{:.3f}{}] {}".format(duration, units, string)))
def log2file(self, fid, string):
fid = open(fid, 'a')
fid.write("%s\n"%(string))
fid.close()
def add_arguments_for_module(parser, module, argument_for_class, default, skip_params=[], parameter_defaults={}):
argument_group = parser.add_argument_group(argument_for_class.capitalize())
module_dict = module_to_dict(module)
argument_group.add_argument('--' + argument_for_class, type=str, default=default, choices=list(module_dict.keys()))
args, unknown_args = parser.parse_known_args()
class_obj = module_dict[vars(args)[argument_for_class]]
argspec = inspect.getargspec(class_obj.__init__)
defaults = argspec.defaults[::-1] if argspec.defaults else None
args = argspec.args[::-1]
for i, arg in enumerate(args):
cmd_arg = '{}_{}'.format(argument_for_class, arg)
if arg not in skip_params + ['self', 'args']:
if arg in list(parameter_defaults.keys()):
argument_group.add_argument('--{}'.format(cmd_arg), type=type(parameter_defaults[arg]), default=parameter_defaults[arg])
elif (defaults is not None and i < len(defaults)):
argument_group.add_argument('--{}'.format(cmd_arg), type=type(defaults[i]), default=defaults[i])
else:
print(("[Warning]: non-default argument '{}' detected on class '{}'. This argument cannot be modified via the command line"
.format(arg, module.__class__.__name__)))
# We don't have a good way of dealing with inferring the type of the argument
# TODO: try creating a custom action and using ast's infer type?
# else:
# argument_group.add_argument('--{}'.format(cmd_arg), required=True)
def kwargs_from_args(args, argument_for_class):
argument_for_class = argument_for_class + '_'
return {key[len(argument_for_class):]: value for key, value in list(vars(args).items()) if argument_for_class in key and key != argument_for_class + 'class'}
def format_dictionary_of_losses(labels, values):
try:
string = ', '.join([('{}: {:' + ('.3f' if value >= 0.001 else '.1e') +'}').format(name, value) for name, value in zip(labels, values)])
except (TypeError, ValueError) as e:
print((list(zip(labels, values))))
string = '[Log Error] ' + str(e)
return string
class IteratorTimer():
def __init__(self, iterable):
self.iterable = iterable
self.iterator = self.iterable.__iter__()
def __iter__(self):
return self
def __len__(self):
return len(self.iterable)
def __next__(self):
start = time.time()
n = next(self.iterator)
self.last_duration = (time.time() - start)
return n
next = __next__
def gpumemusage():
gpu_mem = subprocess.check_output("nvidia-smi | grep MiB | cut -f 3 -d '|'", shell=True).replace(' ', '').replace('\n', '').replace('i', '')
all_stat = [float(a) for a in gpu_mem.replace('/','').split('MB')[:-1]]
gpu_mem = ''
for i in range(len(all_stat)/2):
curr, tot = all_stat[2*i], all_stat[2*i+1]
util = "%1.2f"%(100*curr/tot)+'%'
cmem = str(int(math.ceil(curr/1024.)))+'GB'
gmem = str(int(math.ceil(tot/1024.)))+'GB'
gpu_mem += util + '--' + join(cmem, gmem) + ' '
return gpu_mem
def update_hyperparameter_schedule(args, epoch, global_iteration, optimizer):
if args.schedule_lr_frequency > 0:
for param_group in optimizer.param_groups:
if (global_iteration + 1) % args.schedule_lr_frequency == 0:
param_group['lr'] /= float(args.schedule_lr_fraction)
param_group['lr'] = float(np.maximum(param_group['lr'], 0.000001))
def save_checkpoint(state, is_best, path, prefix, filename='checkpoint.pth.tar'):
prefix_save = os.path.join(path, prefix)
name = prefix_save + '_' + filename
torch.save(state, name)
if is_best:
shutil.copyfile(name, prefix_save + '_model_best.pth.tar')
| vid2vid-master | models/flownet2_pytorch/utils/tools.py |
vid2vid-master | models/flownet2_pytorch/utils/__init__.py |
|
import torch
import torch.nn as nn
import numpy as np
def parse_flownetc(modules, weights, biases):
keys = [
'conv1',
'conv2',
'conv3',
'conv_redir',
'conv3_1',
'conv4',
'conv4_1',
'conv5',
'conv5_1',
'conv6',
'conv6_1',
'deconv5',
'deconv4',
'deconv3',
'deconv2',
'Convolution1',
'Convolution2',
'Convolution3',
'Convolution4',
'Convolution5',
'upsample_flow6to5',
'upsample_flow5to4',
'upsample_flow4to3',
'upsample_flow3to2',
]
i = 0
for m in modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
weight = weights[keys[i]].copy()
bias = biases[keys[i]].copy()
if keys[i] == 'conv1':
m.weight.data[:,:,:,:] = torch.from_numpy(np.flip(weight, axis=1).copy())
m.bias.data[:] = torch.from_numpy(bias)
else:
m.weight.data[:,:,:,:] = torch.from_numpy(weight)
m.bias.data[:] = torch.from_numpy(bias)
i = i + 1
return
def parse_flownets(modules, weights, biases, param_prefix='net2_'):
keys = [
'conv1',
'conv2',
'conv3',
'conv3_1',
'conv4',
'conv4_1',
'conv5',
'conv5_1',
'conv6',
'conv6_1',
'deconv5',
'deconv4',
'deconv3',
'deconv2',
'predict_conv6',
'predict_conv5',
'predict_conv4',
'predict_conv3',
'predict_conv2',
'upsample_flow6to5',
'upsample_flow5to4',
'upsample_flow4to3',
'upsample_flow3to2',
]
for i, k in enumerate(keys):
if 'upsample' in k:
keys[i] = param_prefix + param_prefix + k
else:
keys[i] = param_prefix + k
i = 0
for m in modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
weight = weights[keys[i]].copy()
bias = biases[keys[i]].copy()
if keys[i] == param_prefix+'conv1':
m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy())
m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy())
m.weight.data[:,6:9,:,:] = torch.from_numpy(np.flip(weight[:,6:9,:,:], axis=1).copy())
m.weight.data[:,9::,:,:] = torch.from_numpy(weight[:,9:,:,:].copy())
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
else:
m.weight.data[:,:,:,:] = torch.from_numpy(weight)
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
i = i + 1
return
def parse_flownetsonly(modules, weights, biases, param_prefix=''):
keys = [
'conv1',
'conv2',
'conv3',
'conv3_1',
'conv4',
'conv4_1',
'conv5',
'conv5_1',
'conv6',
'conv6_1',
'deconv5',
'deconv4',
'deconv3',
'deconv2',
'Convolution1',
'Convolution2',
'Convolution3',
'Convolution4',
'Convolution5',
'upsample_flow6to5',
'upsample_flow5to4',
'upsample_flow4to3',
'upsample_flow3to2',
]
for i, k in enumerate(keys):
if 'upsample' in k:
keys[i] = param_prefix + param_prefix + k
else:
keys[i] = param_prefix + k
i = 0
for m in modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
weight = weights[keys[i]].copy()
bias = biases[keys[i]].copy()
if keys[i] == param_prefix+'conv1':
# print ("%s :"%(keys[i]), m.weight.size(), m.bias.size(), tf_w[keys[i]].shape[::-1])
m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy())
m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy())
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
else:
m.weight.data[:,:,:,:] = torch.from_numpy(weight)
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
i = i + 1
return
def parse_flownetsd(modules, weights, biases, param_prefix='netsd_'):
keys = [
'conv0',
'conv1',
'conv1_1',
'conv2',
'conv2_1',
'conv3',
'conv3_1',
'conv4',
'conv4_1',
'conv5',
'conv5_1',
'conv6',
'conv6_1',
'deconv5',
'deconv4',
'deconv3',
'deconv2',
'interconv5',
'interconv4',
'interconv3',
'interconv2',
'Convolution1',
'Convolution2',
'Convolution3',
'Convolution4',
'Convolution5',
'upsample_flow6to5',
'upsample_flow5to4',
'upsample_flow4to3',
'upsample_flow3to2',
]
for i, k in enumerate(keys):
keys[i] = param_prefix + k
i = 0
for m in modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
weight = weights[keys[i]].copy()
bias = biases[keys[i]].copy()
if keys[i] == param_prefix+'conv0':
m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy())
m.weight.data[:,3:6,:,:] = torch.from_numpy(np.flip(weight[:,3:6,:,:], axis=1).copy())
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
else:
m.weight.data[:,:,:,:] = torch.from_numpy(weight)
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
i = i + 1
return
def parse_flownetfusion(modules, weights, biases, param_prefix='fuse_'):
keys = [
'conv0',
'conv1',
'conv1_1',
'conv2',
'conv2_1',
'deconv1',
'deconv0',
'interconv1',
'interconv0',
'_Convolution5',
'_Convolution6',
'_Convolution7',
'upsample_flow2to1',
'upsample_flow1to0',
]
for i, k in enumerate(keys):
keys[i] = param_prefix + k
i = 0
for m in modules:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
weight = weights[keys[i]].copy()
bias = biases[keys[i]].copy()
if keys[i] == param_prefix+'conv0':
m.weight.data[:,0:3,:,:] = torch.from_numpy(np.flip(weight[:,0:3,:,:], axis=1).copy())
m.weight.data[:,3::,:,:] = torch.from_numpy(weight[:,3:,:,:].copy())
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
else:
m.weight.data[:,:,:,:] = torch.from_numpy(weight)
if m.bias is not None:
m.bias.data[:] = torch.from_numpy(bias)
i = i + 1
return
| vid2vid-master | models/flownet2_pytorch/utils/param_utils.py |
import numpy as np
from os.path import *
from scipy.misc import imread
from . import flow_utils
def read_gen(file_name):
ext = splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
im = imread(file_name)
if im.shape[2] > 3:
return im[:,:,:3]
else:
return im
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return flow_utils.readFlow(file_name).astype(np.float32)
return []
| vid2vid-master | models/flownet2_pytorch/utils/frame_utils.py |
'''
Portions of this code copyright 2017, Clement Pinard
'''
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .submodules import *
'Parameter count : 38,676,504 '
class FlowNetS(nn.Module):
def __init__(self, args, input_channels = 12, batchNorm=True):
super(FlowNetS,self).__init__()
self.batchNorm = batchNorm
self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv3_1 = conv(self.batchNorm, 256, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm,1024, 1024)
self.deconv5 = deconv(1024,512)
self.deconv4 = deconv(1026,256)
self.deconv3 = deconv(770,128)
self.deconv2 = deconv(386,64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
out_conv1 = self.conv1(x)
out_conv2 = self.conv2(out_conv1)
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2,
| vid2vid-master | models/flownet2_pytorch/networks/FlowNetS.py |
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .submodules import *
'Parameter count = 581,226'
class FlowNetFusion(nn.Module):
def __init__(self,args, batchNorm=True):
super(FlowNetFusion,self).__init__()
self.batchNorm = batchNorm
self.conv0 = conv(self.batchNorm, 11, 64)
self.conv1 = conv(self.batchNorm, 64, 64, stride=2)
self.conv1_1 = conv(self.batchNorm, 64, 128)
self.conv2 = conv(self.batchNorm, 128, 128, stride=2)
self.conv2_1 = conv(self.batchNorm, 128, 128)
self.deconv1 = deconv(128,32)
self.deconv0 = deconv(162,16)
self.inter_conv1 = i_conv(self.batchNorm, 162, 32)
self.inter_conv0 = i_conv(self.batchNorm, 82, 16)
self.predict_flow2 = predict_flow(128)
self.predict_flow1 = predict_flow(32)
self.predict_flow0 = predict_flow(16)
self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
def forward(self, x):
out_conv0 = self.conv0(x)
out_conv1 = self.conv1_1(self.conv1(out_conv0))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
flow2 = self.predict_flow2(out_conv2)
flow2_up = self.upsampled_flow2_to_1(flow2)
out_deconv1 = self.deconv1(out_conv2)
concat1 = torch.cat((out_conv1,out_deconv1,flow2_up),1)
out_interconv1 = self.inter_conv1(concat1)
flow1 = self.predict_flow1(out_interconv1)
flow1_up = self.upsampled_flow1_to_0(flow1)
out_deconv0 = self.deconv0(concat1)
concat0 = torch.cat((out_conv0,out_deconv0,flow1_up),1)
out_interconv0 = self.inter_conv0(concat0)
flow0 = self.predict_flow0(out_interconv0)
return flow0
| vid2vid-master | models/flownet2_pytorch/networks/FlowNetFusion.py |
# freda (todo) :
import torch.nn as nn
import torch
import numpy as np
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.1,inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.LeakyReLU(0.1,inplace=True)
)
def i_conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, bias = True):
if batchNorm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),
nn.BatchNorm2d(out_planes),
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias),
)
def predict_flow(in_planes):
return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True)
def deconv(in_planes, out_planes):
return nn.Sequential(
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True),
nn.LeakyReLU(0.1,inplace=True)
)
class tofp16(nn.Module):
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
class tofp32(nn.Module):
def __init__(self):
super(tofp32, self).__init__()
def forward(self, input):
return input.float()
def init_deconv_bilinear(weight):
f_shape = weight.size()
heigh, width = f_shape[-2], f_shape[-1]
f = np.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([heigh, width])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weight.data.fill_(0.)
for i in range(f_shape[0]):
for j in range(f_shape[1]):
weight.data[i,j,:,:] = torch.from_numpy(bilinear)
def save_grad(grads, name):
def hook(grad):
grads[name] = grad
return hook
'''
def save_grad(grads, name):
def hook(grad):
grads[name] = grad
return hook
import torch
from channelnorm_package.modules.channelnorm import ChannelNorm
model = ChannelNorm().cuda()
grads = {}
a = 100*torch.autograd.Variable(torch.randn((1,3,5,5)).cuda(), requires_grad=True)
a.register_hook(save_grad(grads, 'a'))
b = model(a)
y = torch.mean(b)
y.backward()
'''
| vid2vid-master | models/flownet2_pytorch/networks/submodules.py |
vid2vid-master | models/flownet2_pytorch/networks/__init__.py |
|
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .correlation_package.correlation import Correlation
from .submodules import *
'Parameter count , 39,175,298 '
class FlowNetC(nn.Module):
def __init__(self, args, batchNorm=True, div_flow = 20):
super(FlowNetC,self).__init__()
self.fp16 = args.fp16
self.batchNorm = batchNorm
self.div_flow = div_flow
self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2)
self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1)
"""if args.fp16:
self.corr = nn.Sequential(
tofp32(),
Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1),
tofp16())
else:"""
self.corr = Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1)
self.corr_activation = nn.LeakyReLU(0.1,inplace=True)
self.conv3_1 = conv(self.batchNorm, 473, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm,1024, 1024)
self.deconv5 = deconv(1024,512)
self.deconv4 = deconv(1026,256)
self.deconv3 = deconv(770,128)
self.deconv2 = deconv(386,64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(1026)
self.predict_flow4 = predict_flow(770)
self.predict_flow3 = predict_flow(386)
self.predict_flow2 = predict_flow(194)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
x1 = x[:,0:3,:,:]
x2 = x[:,3::,:,:]
out_conv1a = self.conv1(x1)
out_conv2a = self.conv2(out_conv1a)
out_conv3a = self.conv3(out_conv2a)
# FlownetC bottom input stream
out_conv1b = self.conv1(x2)
out_conv2b = self.conv2(out_conv1b)
out_conv3b = self.conv3(out_conv2b)
# Merge streams
if self.fp16:
out_corr = self.corr(out_conv3a.float(), out_conv3b.float()).half() # False
else:
out_corr = self.corr(out_conv3a, out_conv3b) # False
out_corr = self.corr_activation(out_corr)
# Redirect top input stream and concatenate
out_conv_redir = self.conv_redir(out_conv3a)
in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1)
# Merged conv layers
out_conv3_1 = self.conv3_1(in_conv3_1)
out_conv4 = self.conv4_1(self.conv4(out_conv3_1))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
flow5 = self.predict_flow5(concat5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
flow4 = self.predict_flow4(concat4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1)
flow3 = self.predict_flow3(concat3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1)
flow2 = self.predict_flow2(concat2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2,
| vid2vid-master | models/flownet2_pytorch/networks/FlowNetC.py |
import torch
import torch.nn as nn
from torch.nn import init
import math
import numpy as np
from .submodules import *
'Parameter count = 45,371,666'
class FlowNetSD(nn.Module):
def __init__(self, args, batchNorm=True):
super(FlowNetSD,self).__init__()
self.batchNorm = batchNorm
self.conv0 = conv(self.batchNorm, 6, 64)
self.conv1 = conv(self.batchNorm, 64, 64, stride=2)
self.conv1_1 = conv(self.batchNorm, 64, 128)
self.conv2 = conv(self.batchNorm, 128, 128, stride=2)
self.conv2_1 = conv(self.batchNorm, 128, 128)
self.conv3 = conv(self.batchNorm, 128, 256, stride=2)
self.conv3_1 = conv(self.batchNorm, 256, 256)
self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
self.conv4_1 = conv(self.batchNorm, 512, 512)
self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
self.conv5_1 = conv(self.batchNorm, 512, 512)
self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
self.conv6_1 = conv(self.batchNorm,1024, 1024)
self.deconv5 = deconv(1024,512)
self.deconv4 = deconv(1026,256)
self.deconv3 = deconv(770,128)
self.deconv2 = deconv(386,64)
self.inter_conv5 = i_conv(self.batchNorm, 1026, 512)
self.inter_conv4 = i_conv(self.batchNorm, 770, 256)
self.inter_conv3 = i_conv(self.batchNorm, 386, 128)
self.inter_conv2 = i_conv(self.batchNorm, 194, 64)
self.predict_flow6 = predict_flow(1024)
self.predict_flow5 = predict_flow(512)
self.predict_flow4 = predict_flow(256)
self.predict_flow3 = predict_flow(128)
self.predict_flow2 = predict_flow(64)
self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
if isinstance(m, nn.ConvTranspose2d):
if m.bias is not None:
init.uniform_(m.bias)
init.xavier_uniform_(m.weight)
# init_deconv_bilinear(m.weight)
self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear')
def forward(self, x):
out_conv0 = self.conv0(x)
out_conv1 = self.conv1_1(self.conv1(out_conv0))
out_conv2 = self.conv2_1(self.conv2(out_conv1))
out_conv3 = self.conv3_1(self.conv3(out_conv2))
out_conv4 = self.conv4_1(self.conv4(out_conv3))
out_conv5 = self.conv5_1(self.conv5(out_conv4))
out_conv6 = self.conv6_1(self.conv6(out_conv5))
flow6 = self.predict_flow6(out_conv6)
flow6_up = self.upsampled_flow6_to_5(flow6)
out_deconv5 = self.deconv5(out_conv6)
concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1)
out_interconv5 = self.inter_conv5(concat5)
flow5 = self.predict_flow5(out_interconv5)
flow5_up = self.upsampled_flow5_to_4(flow5)
out_deconv4 = self.deconv4(concat5)
concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1)
out_interconv4 = self.inter_conv4(concat4)
flow4 = self.predict_flow4(out_interconv4)
flow4_up = self.upsampled_flow4_to_3(flow4)
out_deconv3 = self.deconv3(concat4)
concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1)
out_interconv3 = self.inter_conv3(concat3)
flow3 = self.predict_flow3(out_interconv3)
flow3_up = self.upsampled_flow3_to_2(flow3)
out_deconv2 = self.deconv2(concat3)
concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1)
out_interconv2 = self.inter_conv2(concat2)
flow2 = self.predict_flow2(out_interconv2)
if self.training:
return flow2,flow3,flow4,flow5,flow6
else:
return flow2,
| vid2vid-master | models/flownet2_pytorch/networks/FlowNetSD.py |
from torch.autograd import Function, Variable
from torch.nn.modules.module import Module
import channelnorm_cuda
class ChannelNormFunction(Function):
@staticmethod
def forward(ctx, input1, norm_deg=2):
assert input1.is_contiguous()
b, _, h, w = input1.size()
output = input1.new(b, 1, h, w).zero_()
channelnorm_cuda.forward(input1, output, norm_deg)
ctx.save_for_backward(input1, output)
ctx.norm_deg = norm_deg
return output
@staticmethod
def backward(ctx, grad_output):
input1, output = ctx.saved_tensors
grad_input1 = Variable(input1.new(input1.size()).zero_())
channelnorm.backward(input1, output, grad_output.data,
grad_input1.data, ctx.norm_deg)
return grad_input1, None
class ChannelNorm(Module):
def __init__(self, norm_deg=2):
super(ChannelNorm, self).__init__()
self.norm_deg = norm_deg
def forward(self, input1):
return ChannelNormFunction.apply(input1, self.norm_deg)
| vid2vid-master | models/flownet2_pytorch/networks/channelnorm_package/channelnorm.py |
vid2vid-master | models/flownet2_pytorch/networks/channelnorm_package/__init__.py |
|
#!/usr/bin/env python3
import os
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++11']
nvcc_args = [
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70'
]
setup(
name='channelnorm_cuda',
ext_modules=[
CUDAExtension('channelnorm_cuda', [
'channelnorm_cuda.cc',
'channelnorm_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})
],
cmdclass={
'build_ext': BuildExtension
})
| vid2vid-master | models/flownet2_pytorch/networks/channelnorm_package/setup.py |
vid2vid-master | models/flownet2_pytorch/networks/correlation_package/__init__.py |
|
#!/usr/bin/env python3
import os
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++11']
nvcc_args = [
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70'
]
setup(
name='correlation_cuda',
ext_modules=[
CUDAExtension('correlation_cuda', [
'correlation_cuda.cc',
'correlation_cuda_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})
],
cmdclass={
'build_ext': BuildExtension
})
| vid2vid-master | models/flownet2_pytorch/networks/correlation_package/setup.py |
import torch
from torch.nn.modules.module import Module
from torch.autograd import Function
import correlation_cuda
class CorrelationFunction(Function):
def __init__(self, pad_size=3, kernel_size=3, max_displacement=20, stride1=1, stride2=2, corr_multiply=1):
super(CorrelationFunction, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
# self.out_channel = ((max_displacement/stride2)*2 + 1) * ((max_displacement/stride2)*2 + 1)
def forward(self, input1, input2):
self.save_for_backward(input1, input2)
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
output = input1.new()
correlation_cuda.forward(input1, input2, rbot1, rbot2, output,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return output
def backward(self, grad_output):
input1, input2 = self.saved_tensors
with torch.cuda.device_of(input1):
rbot1 = input1.new()
rbot2 = input2.new()
grad_input1 = input1.new()
grad_input2 = input2.new()
correlation_cuda.backward(input1, input2, rbot1, rbot2, grad_output, grad_input1, grad_input2,
self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)
return grad_input1, grad_input2
class Correlation(Module):
def __init__(self, pad_size=0, kernel_size=0, max_displacement=0, stride1=1, stride2=2, corr_multiply=1):
super(Correlation, self).__init__()
self.pad_size = pad_size
self.kernel_size = kernel_size
self.max_displacement = max_displacement
self.stride1 = stride1
self.stride2 = stride2
self.corr_multiply = corr_multiply
def forward(self, input1, input2):
result = CorrelationFunction(self.pad_size, self.kernel_size, self.max_displacement,self.stride1, self.stride2, self.corr_multiply)(input1, input2)
return result
| vid2vid-master | models/flownet2_pytorch/networks/correlation_package/correlation.py |
from torch.nn.modules.module import Module
from torch.autograd import Function, Variable
import resample2d_cuda
class Resample2dFunction(Function):
@staticmethod
def forward(ctx, input1, input2, kernel_size=1):
assert input1.is_contiguous()
assert input2.is_contiguous()
ctx.save_for_backward(input1, input2)
ctx.kernel_size = kernel_size
_, d, _, _ = input1.size()
b, _, h, w = input2.size()
output = input1.new(b, d, h, w).zero_()
resample2d_cuda.forward(input1, input2, output, kernel_size)
return output
@staticmethod
def backward(ctx, grad_output):
assert grad_output.is_contiguous()
input1, input2 = ctx.saved_tensors
grad_input1 = Variable(input1.new(input1.size()).zero_())
grad_input2 = Variable(input1.new(input2.size()).zero_())
resample2d_cuda.backward(input1, input2, grad_output.data,
grad_input1.data, grad_input2.data,
ctx.kernel_size)
return grad_input1, grad_input2, None
class Resample2d(Module):
def __init__(self, kernel_size=1):
super(Resample2d, self).__init__()
self.kernel_size = kernel_size
def forward(self, input1, input2):
input1_c = input1.contiguous()
return Resample2dFunction.apply(input1_c, input2, self.kernel_size)
| vid2vid-master | models/flownet2_pytorch/networks/resample2d_package/resample2d.py |
vid2vid-master | models/flownet2_pytorch/networks/resample2d_package/__init__.py |
|
#!/usr/bin/env python3
import os
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
cxx_args = ['-std=c++11']
nvcc_args = [
'-gencode', 'arch=compute_50,code=sm_50',
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70'
]
setup(
name='resample2d_cuda',
ext_modules=[
CUDAExtension('resample2d_cuda', [
'resample2d_cuda.cc',
'resample2d_kernel.cu'
], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args})
],
cmdclass={
'build_ext': BuildExtension
})
| vid2vid-master | models/flownet2_pytorch/networks/resample2d_package/setup.py |
import os
from download_gdrive import *
file_id = '1E8re-b6csNuo-abg1vJKCDjCzlIam50F'
chpt_path = './models/flownet2_pytorch/'
destination = os.path.join(chpt_path, 'FlowNet2_checkpoint.pth.tar')
download_file_from_google_drive(file_id, destination) | vid2vid-master | scripts/download_models_flownet2.py |
import os
from download_gdrive import *
file_id = '1rPcbnanuApZeo2uc7h55OneBkbcFCnnf'
chpt_path = './datasets/'
if not os.path.isdir(chpt_path):
os.makedirs(chpt_path)
destination = os.path.join(chpt_path, 'datasets.zip')
download_file_from_google_drive(file_id, destination)
unzip_file(destination, chpt_path) | vid2vid-master | scripts/download_datasets.py |
# Download code taken from Code taken from https://stackoverflow.com questions/25010369/wget-curl-large-file-from-google-drive/39225039#39225039
import requests, zipfile, os
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip_file(file_name, unzip_path):
zip_ref = zipfile.ZipFile(file_name, 'r')
zip_ref.extractall(unzip_path)
zip_ref.close()
os.remove(file_name) | vid2vid-master | scripts/download_gdrive.py |
import os
from download_gdrive import *
import torch
"""if torch.__version__ == '0.4.1':
file_id = '1gKwE1Ad41TwtAzwDcN3dYa_S6DcVyiSl'
file_name = 'flownet2_pytorch_041.zip'
else:
file_id = '1F2h_6e8gyTqxnbmFFW72zsxx_JX0dKFo'
file_name = 'flownet2_pytorch_040.zip'"""
chpt_path = './models/'
if not os.path.isdir(chpt_path):
os.makedirs(chpt_path)
"""destination = os.path.join(chpt_path, file_name)
download_file_from_google_drive(file_id, destination)
unzip_file(destination, chpt_path)"""
os.system('cd %s/flownet2_pytorch/; bash install.sh; cd ../../' % chpt_path) | vid2vid-master | scripts/download_flownet2.py |
import os
from download_gdrive import *
file_id = '10LvNw-2lrh-6sPGkWbQDfHspkqz5AKxb'
chpt_path = './checkpoints/'
if not os.path.isdir(chpt_path):
os.makedirs(chpt_path)
destination = os.path.join(chpt_path, 'models_face.zip')
download_file_from_google_drive(file_id, destination)
unzip_file(destination, chpt_path) | vid2vid-master | scripts/face/download_models.py |
# Download code taken from Code taken from https://stackoverflow.com questions/25010369/wget-curl-large-file-from-google-drive/39225039#39225039
import requests, zipfile, os
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip_file(file_name, unzip_path):
zip_ref = zipfile.ZipFile(file_name, 'r')
zip_ref.extractall(unzip_path)
zip_ref.close()
os.remove(file_name) | vid2vid-master | scripts/face/download_gdrive.py |
import os
from download_gdrive import *
file_id = '1QoE1p3QikxNVbbTBWWRDtIspg-RcLE8y'
chpt_path = './checkpoints/'
if not os.path.isdir(chpt_path):
os.makedirs(chpt_path)
destination = os.path.join(chpt_path, 'models_g1.zip')
download_file_from_google_drive(file_id, destination)
unzip_file(destination, chpt_path)
| vid2vid-master | scripts/street/download_models_g1.py |
import os
from download_gdrive import *
file_id = '1MKtImgtnGC28EPU7Nh9DfFpHW6okNVkl'
chpt_path = './checkpoints/'
if not os.path.isdir(chpt_path):
os.makedirs(chpt_path)
destination = os.path.join(chpt_path, 'models.zip')
download_file_from_google_drive(file_id, destination)
unzip_file(destination, chpt_path) | vid2vid-master | scripts/street/download_models.py |
# Download code taken from Code taken from https://stackoverflow.com questions/25010369/wget-curl-large-file-from-google-drive/39225039#39225039
import requests, zipfile, os
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def unzip_file(file_name, unzip_path):
zip_ref = zipfile.ZipFile(file_name, 'r')
zip_ref.extractall(unzip_path)
zip_ref.close()
os.remove(file_name) | vid2vid-master | scripts/street/download_gdrive.py |
from util.util import add_dummy_to_tensor
import torch.utils.data as data
import torch
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def update_training_batch(self, ratio): # update the training sequence length to be longer
seq_len_max = min(128, self.seq_len_max) - (self.opt.n_frames_G - 1)
if self.n_frames_total < seq_len_max:
self.n_frames_total = min(seq_len_max, self.opt.n_frames_total * (2**ratio))
#self.n_frames_total = min(seq_len_max, self.opt.n_frames_total * (ratio + 1))
print('--------- Updating training sequence length to %d ---------' % self.n_frames_total)
def init_frame_idx(self, A_paths):
self.n_of_seqs = min(len(A_paths), self.opt.max_dataset_size) # number of sequences to train
self.seq_len_max = max([len(A) for A in A_paths]) # max number of frames in the training sequences
self.seq_idx = 0 # index for current sequence
self.frame_idx = self.opt.start_frame if not self.opt.isTrain else 0 # index for current frame in the sequence
self.frames_count = [] # number of frames in each sequence
for path in A_paths:
self.frames_count.append(len(path) - self.opt.n_frames_G + 1)
self.folder_prob = [count / sum(self.frames_count) for count in self.frames_count]
self.n_frames_total = self.opt.n_frames_total if self.opt.isTrain else 1
self.A, self.B, self.I = None, None, None
def update_frame_idx(self, A_paths, index):
if self.opt.isTrain:
if self.opt.dataset_mode == 'pose':
seq_idx = np.random.choice(len(A_paths), p=self.folder_prob) # randomly pick sequence to train
self.frame_idx = index
else:
seq_idx = index % self.n_of_seqs
return None, None, None, seq_idx
else:
self.change_seq = self.frame_idx >= self.frames_count[self.seq_idx]
if self.change_seq:
self.seq_idx += 1
self.frame_idx = 0
self.A, self.B, self.I = None, None, None
return self.A, self.B, self.I, self.seq_idx
def init_data_params(self, data, n_gpus, tG):
opt = self.opt
_, n_frames_total, self.height, self.width = data['B'].size() # n_frames_total = n_frames_load * n_loadings + tG - 1
n_frames_total = n_frames_total // opt.output_nc
n_frames_load = opt.max_frames_per_gpu * n_gpus # number of total frames loaded into GPU at a time for each batch
n_frames_load = min(n_frames_load, n_frames_total - tG + 1)
self.t_len = n_frames_load + tG - 1 # number of loaded frames plus previous frames
return n_frames_total-self.t_len+1, n_frames_load, self.t_len
def init_data(self, t_scales):
fake_B_last = None # the last generated frame from previous training batch (which becomes input to the next batch)
real_B_all, fake_B_all, flow_ref_all, conf_ref_all = None, None, None, None # all real/generated frames so far
if self.opt.sparse_D:
real_B_all, fake_B_all, flow_ref_all, conf_ref_all = [None]*t_scales, [None]*t_scales, [None]*t_scales, [None]*t_scales
frames_all = real_B_all, fake_B_all, flow_ref_all, conf_ref_all
return fake_B_last, frames_all
def prepare_data(self, data, i, input_nc, output_nc):
t_len, height, width = self.t_len, self.height, self.width
# 5D tensor: batchSize, # of frames, # of channels, height, width
input_A = (data['A'][:, i*input_nc:(i+t_len)*input_nc, ...]).view(-1, t_len, input_nc, height, width)
input_B = (data['B'][:, i*output_nc:(i+t_len)*output_nc, ...]).view(-1, t_len, output_nc, height, width)
inst_A = (data['inst'][:, i:i+t_len, ...]).view(-1, t_len, 1, height, width) if len(data['inst'].size()) > 2 else None
return [input_A, input_B, inst_A]
def make_power_2(n, base=32.0):
return int(round(n / base) * base)
def get_img_params(opt, size):
w, h = size
new_h, new_w = h, w
if 'resize' in opt.resize_or_crop: # resize image to be loadSize x loadSize
new_h = new_w = opt.loadSize
elif 'scaleWidth' in opt.resize_or_crop: # scale image width to be loadSize
new_w = opt.loadSize
new_h = opt.loadSize * h // w
elif 'scaleHeight' in opt.resize_or_crop: # scale image height to be loadSize
new_h = opt.loadSize
new_w = opt.loadSize * w // h
elif 'randomScaleWidth' in opt.resize_or_crop: # randomly scale image width to be somewhere between loadSize and fineSize
new_w = random.randint(opt.fineSize, opt.loadSize + 1)
new_h = new_w * h // w
elif 'randomScaleHeight' in opt.resize_or_crop: # randomly scale image height to be somewhere between loadSize and fineSize
new_h = random.randint(opt.fineSize, opt.loadSize + 1)
new_w = new_h * w // h
new_w = int(round(new_w / 4)) * 4
new_h = int(round(new_h / 4)) * 4
crop_x = crop_y = 0
crop_w = crop_h = 0
if 'crop' in opt.resize_or_crop or 'scaledCrop' in opt.resize_or_crop:
if 'crop' in opt.resize_or_crop: # crop patches of size fineSize x fineSize
crop_w = crop_h = opt.fineSize
else:
if 'Width' in opt.resize_or_crop: # crop patches of width fineSize
crop_w = opt.fineSize
crop_h = opt.fineSize * h // w
else: # crop patches of height fineSize
crop_h = opt.fineSize
crop_w = opt.fineSize * w // h
crop_w, crop_h = make_power_2(crop_w), make_power_2(crop_h)
x_span = (new_w - crop_w) // 2
crop_x = np.maximum(0, np.minimum(x_span*2, int(np.random.randn() * x_span/3 + x_span)))
crop_y = random.randint(0, np.minimum(np.maximum(0, new_h - crop_h), new_h // 8))
#crop_x = random.randint(0, np.maximum(0, new_w - crop_w))
#crop_y = random.randint(0, np.maximum(0, new_h - crop_h))
else:
new_w, new_h = make_power_2(new_w), make_power_2(new_h)
flip = (random.random() > 0.5) and (opt.dataset_mode != 'pose')
return {'new_size': (new_w, new_h), 'crop_size': (crop_w, crop_h), 'crop_pos': (crop_x, crop_y), 'flip': flip}
def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True):
transform_list = []
### resize input image
if 'resize' in opt.resize_or_crop:
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, method))
else:
transform_list.append(transforms.Lambda(lambda img: __scale_image(img, params['new_size'], method)))
### crop patches from image
if 'crop' in opt.resize_or_crop or 'scaledCrop' in opt.resize_or_crop:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_size'], params['crop_pos'])))
### random flip
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
if toTensor:
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def toTensor_normalize():
transform_list = [transforms.ToTensor()]
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __scale_image(img, size, method=Image.BICUBIC):
w, h = size
return img.resize((w, h), method)
def __crop(img, size, pos):
ow, oh = img.size
tw, th = size
x1, y1 = pos
if (ow > tw or oh > th):
return img.crop((x1, y1, min(ow, x1 + tw), min(oh, y1 + th)))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
def get_video_params(opt, n_frames_total, cur_seq_len, index):
tG = opt.n_frames_G
if opt.isTrain:
n_frames_total = min(n_frames_total, cur_seq_len - tG + 1)
n_gpus = opt.n_gpus_gen if opt.batchSize == 1 else 1 # number of generator GPUs for each batch
n_frames_per_load = opt.max_frames_per_gpu * n_gpus # number of frames to load into GPUs at one time (for each batch)
n_frames_per_load = min(n_frames_total, n_frames_per_load)
n_loadings = n_frames_total // n_frames_per_load # how many times are needed to load entire sequence into GPUs
n_frames_total = n_frames_per_load * n_loadings + tG - 1 # rounded overall number of frames to read from the sequence
max_t_step = min(opt.max_t_step, (cur_seq_len-1) // (n_frames_total-1))
t_step = np.random.randint(max_t_step) + 1 # spacing between neighboring sampled frames
offset_max = max(1, cur_seq_len - (n_frames_total-1)*t_step) # maximum possible index for the first frame
if opt.dataset_mode == 'pose':
start_idx = index % offset_max
else:
start_idx = np.random.randint(offset_max) # offset for the first frame to load
if opt.debug:
print("loading %d frames in total, first frame starting at index %d, space between neighboring frames is %d"
% (n_frames_total, start_idx, t_step))
else:
n_frames_total = tG
start_idx = index
t_step = 1
return n_frames_total, start_idx, t_step
def concat_frame(A, Ai, nF):
if A is None:
A = Ai
else:
c = Ai.size()[0]
if A.size()[0] == nF * c:
A = A[c:]
A = torch.cat([A, Ai])
return A | vid2vid-master | data/base_dataset.py |
import os.path
import torchvision.transforms as transforms
import torch
from PIL import Image
import numpy as np
import cv2
from skimage import feature
from data.base_dataset import BaseDataset, get_img_params, get_transform, get_video_params, concat_frame
from data.image_folder import make_grouped_dataset, check_path_valid
from data.keypoint2img import interpPoints, drawEdge
class FaceDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
self.dir_A = os.path.join(opt.dataroot, opt.phase + '_keypoints')
self.dir_B = os.path.join(opt.dataroot, opt.phase + '_img')
self.A_paths = sorted(make_grouped_dataset(self.dir_A))
self.B_paths = sorted(make_grouped_dataset(self.dir_B))
check_path_valid(self.A_paths, self.B_paths)
self.init_frame_idx(self.A_paths)
self.scale_ratio = np.array([[0.9, 1], [1, 1], [0.9, 1], [1, 1.1], [0.9, 0.9], [0.9, 0.9]])#np.random.uniform(0.9, 1.1, size=[6, 2])
self.scale_ratio_sym = np.array([[1, 1], [0.9, 1], [1, 1], [0.9, 1], [1, 1], [1, 1]]) #np.random.uniform(0.9, 1.1, size=[6, 2])
self.scale_shift = np.zeros((6, 2)) #np.random.uniform(-5, 5, size=[6, 2])
def __getitem__(self, index):
A, B, I, seq_idx = self.update_frame_idx(self.A_paths, index)
A_paths = self.A_paths[seq_idx]
B_paths = self.B_paths[seq_idx]
n_frames_total, start_idx, t_step = get_video_params(self.opt, self.n_frames_total, len(A_paths), self.frame_idx)
B_img = Image.open(B_paths[start_idx]).convert('RGB')
B_size = B_img.size
points = np.loadtxt(A_paths[start_idx], delimiter=',')
is_first_frame = self.opt.isTrain or not hasattr(self, 'min_x')
if is_first_frame: # crop only the face region
self.get_crop_coords(points, B_size)
params = get_img_params(self.opt, self.crop(B_img).size)
transform_scaleA = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
transform_scaleB = get_transform(self.opt, params)
# read in images
frame_range = list(range(n_frames_total)) if self.A is None else [self.opt.n_frames_G-1]
for i in frame_range:
A_path = A_paths[start_idx + i * t_step]
B_path = B_paths[start_idx + i * t_step]
B_img = Image.open(B_path)
Ai, Li = self.get_face_image(A_path, transform_scaleA, transform_label, B_size, B_img)
Bi = transform_scaleB(self.crop(B_img))
A = concat_frame(A, Ai, n_frames_total)
B = concat_frame(B, Bi, n_frames_total)
I = concat_frame(I, Li, n_frames_total)
if not self.opt.isTrain:
self.A, self.B, self.I = A, B, I
self.frame_idx += 1
change_seq = False if self.opt.isTrain else self.change_seq
return_list = {'A': A, 'B': B, 'inst': I, 'A_path': A_path, 'change_seq': change_seq}
return return_list
def get_image(self, A_path, transform_scaleA):
A_img = Image.open(A_path)
A_scaled = transform_scaleA(self.crop(A_img))
return A_scaled
def get_face_image(self, A_path, transform_A, transform_L, size, img):
# read face keypoints from path and crop face region
keypoints, part_list, part_labels = self.read_keypoints(A_path, size)
# draw edges and possibly add distance transform maps
add_dist_map = not self.opt.no_dist_map
im_edges, dist_tensor = self.draw_face_edges(keypoints, part_list, transform_A, size, add_dist_map)
# canny edge for background
if not self.opt.no_canny_edge:
edges = feature.canny(np.array(img.convert('L')))
edges = edges * (part_labels == 0) # remove edges within face
im_edges += (edges * 255).astype(np.uint8)
edge_tensor = transform_A(Image.fromarray(self.crop(im_edges)))
# final input tensor
input_tensor = torch.cat([edge_tensor, dist_tensor]) if add_dist_map else edge_tensor
label_tensor = transform_L(Image.fromarray(self.crop(part_labels.astype(np.uint8)))) * 255.0
return input_tensor, label_tensor
def read_keypoints(self, A_path, size):
# mapping from keypoints to face part
part_list = [[list(range(0, 17)) + list(range(68, 83)) + [0]], # face
[range(17, 22)], # right eyebrow
[range(22, 27)], # left eyebrow
[[28, 31], range(31, 36), [35, 28]], # nose
[[36,37,38,39], [39,40,41,36]], # right eye
[[42,43,44,45], [45,46,47,42]], # left eye
[range(48, 55), [54,55,56,57,58,59,48]], # mouth
[range(60, 65), [64,65,66,67,60]] # tongue
]
label_list = [1, 2, 2, 3, 4, 4, 5, 6] # labeling for different facial parts
keypoints = np.loadtxt(A_path, delimiter=',')
# add upper half face by symmetry
pts = keypoints[:17, :].astype(np.int32)
baseline_y = (pts[0,1] + pts[-1,1]) / 2
upper_pts = pts[1:-1,:].copy()
upper_pts[:,1] = baseline_y + (baseline_y-upper_pts[:,1]) * 2 // 3
keypoints = np.vstack((keypoints, upper_pts[::-1,:]))
# label map for facial part
w, h = size
part_labels = np.zeros((h, w), np.uint8)
for p, edge_list in enumerate(part_list):
indices = [item for sublist in edge_list for item in sublist]
pts = keypoints[indices, :].astype(np.int32)
cv2.fillPoly(part_labels, pts=[pts], color=label_list[p])
# move the keypoints a bit
if not self.opt.isTrain and self.opt.random_scale_points:
self.scale_points(keypoints, part_list[1] + part_list[2], 1, sym=True)
self.scale_points(keypoints, part_list[4] + part_list[5], 3, sym=True)
for i, part in enumerate(part_list):
self.scale_points(keypoints, part, label_list[i]-1)
return keypoints, part_list, part_labels
def draw_face_edges(self, keypoints, part_list, transform_A, size, add_dist_map):
w, h = size
edge_len = 3 # interpolate 3 keypoints to form a curve when drawing edges
# edge map for face region from keypoints
im_edges = np.zeros((h, w), np.uint8) # edge map for all edges
dist_tensor = 0
e = 1
for edge_list in part_list:
for edge in edge_list:
im_edge = np.zeros((h, w), np.uint8) # edge map for the current edge
for i in range(0, max(1, len(edge)-1), edge_len-1): # divide a long edge into multiple small edges when drawing
sub_edge = edge[i:i+edge_len]
x = keypoints[sub_edge, 0]
y = keypoints[sub_edge, 1]
curve_x, curve_y = interpPoints(x, y) # interp keypoints to get the curve shape
drawEdge(im_edges, curve_x, curve_y)
if add_dist_map:
drawEdge(im_edge, curve_x, curve_y)
if add_dist_map: # add distance transform map on each facial part
im_dist = cv2.distanceTransform(255-im_edge, cv2.DIST_L1, 3)
im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8)
im_dist = Image.fromarray(im_dist)
tensor_cropped = transform_A(self.crop(im_dist))
dist_tensor = tensor_cropped if e == 1 else torch.cat([dist_tensor, tensor_cropped])
e += 1
return im_edges, dist_tensor
def get_crop_coords(self, keypoints, size):
min_y, max_y = keypoints[:,1].min(), keypoints[:,1].max()
min_x, max_x = keypoints[:,0].min(), keypoints[:,0].max()
xc = (min_x + max_x) // 2
yc = (min_y*3 + max_y) // 4
h = w = (max_x - min_x) * 2.5
xc = min(max(0, xc - w//2) + w, size[0]) - w//2
yc = min(max(0, yc - h//2) + h, size[1]) - h//2
min_x, max_x = xc - w//2, xc + w//2
min_y, max_y = yc - h//2, yc + h//2
self.min_y, self.max_y, self.min_x, self.max_x = int(min_y), int(max_y), int(min_x), int(max_x)
def crop(self, img):
if isinstance(img, np.ndarray):
return img[self.min_y:self.max_y, self.min_x:self.max_x]
else:
return img.crop((self.min_x, self.min_y, self.max_x, self.max_y))
def scale_points(self, keypoints, part, index, sym=False):
if sym:
pts_idx = sum([list(idx) for idx in part], [])
pts = keypoints[pts_idx]
ratio_x = self.scale_ratio_sym[index, 0]
ratio_y = self.scale_ratio_sym[index, 1]
mean = np.mean(pts, axis=0)
mean_x, mean_y = mean[0], mean[1]
for idx in part:
pts_i = keypoints[idx]
mean_i = np.mean(pts_i, axis=0)
mean_ix, mean_iy = mean_i[0], mean_i[1]
new_mean_ix = (mean_ix - mean_x) * ratio_x + mean_x
new_mean_iy = (mean_iy - mean_y) * ratio_y + mean_y
pts_i[:,0] = (pts_i[:,0] - mean_ix) + new_mean_ix
pts_i[:,1] = (pts_i[:,1] - mean_iy) + new_mean_iy
keypoints[idx] = pts_i
else:
pts_idx = sum([list(idx) for idx in part], [])
pts = keypoints[pts_idx]
ratio_x = self.scale_ratio[index, 0]
ratio_y = self.scale_ratio[index, 1]
mean = np.mean(pts, axis=0)
mean_x, mean_y = mean[0], mean[1]
pts[:,0] = (pts[:,0] - mean_x) * ratio_x + mean_x + self.scale_shift[index, 0]
pts[:,1] = (pts[:,1] - mean_y) * ratio_y + mean_y + self.scale_shift[index, 1]
keypoints[pts_idx] = pts
def __len__(self):
if self.opt.isTrain:
return len(self.A_paths)
else:
return sum(self.frames_count)
def name(self):
return 'FaceDataset' | vid2vid-master | data/face_dataset.py |
def CreateDataLoader(opt):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
| vid2vid-master | data/data_loader.py |
import os.path
from PIL import Image
import numpy as np
import json
import glob
from scipy.optimize import curve_fit
import warnings
def func(x, a, b, c):
return a * x**2 + b * x + c
def linear(x, a, b):
return a * x + b
def setColor(im, yy, xx, color):
if len(im.shape) == 3:
if (im[yy, xx] == 0).all():
im[yy, xx, 0], im[yy, xx, 1], im[yy, xx, 2] = color[0], color[1], color[2]
else:
im[yy, xx, 0] = ((im[yy, xx, 0].astype(float) + color[0]) / 2).astype(np.uint8)
im[yy, xx, 1] = ((im[yy, xx, 1].astype(float) + color[1]) / 2).astype(np.uint8)
im[yy, xx, 2] = ((im[yy, xx, 2].astype(float) + color[2]) / 2).astype(np.uint8)
else:
im[yy, xx] = color[0]
def drawEdge(im, x, y, bw=1, color=(255,255,255), draw_end_points=False):
if x is not None and x.size:
h, w = im.shape[0], im.shape[1]
# edge
for i in range(-bw, bw):
for j in range(-bw, bw):
yy = np.maximum(0, np.minimum(h-1, y+i))
xx = np.maximum(0, np.minimum(w-1, x+j))
setColor(im, yy, xx, color)
# edge endpoints
if draw_end_points:
for i in range(-bw*2, bw*2):
for j in range(-bw*2, bw*2):
if (i**2) + (j**2) < (4 * bw**2):
yy = np.maximum(0, np.minimum(h-1, np.array([y[0], y[-1]])+i))
xx = np.maximum(0, np.minimum(w-1, np.array([x[0], x[-1]])+j))
setColor(im, yy, xx, color)
def interpPoints(x, y):
if abs(x[:-1] - x[1:]).max() < abs(y[:-1] - y[1:]).max():
curve_y, curve_x = interpPoints(y, x)
if curve_y is None:
return None, None
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if len(x) < 3:
popt, _ = curve_fit(linear, x, y)
else:
popt, _ = curve_fit(func, x, y)
if abs(popt[0]) > 1:
return None, None
if x[0] > x[-1]:
x = list(reversed(x))
y = list(reversed(y))
curve_x = np.linspace(x[0], x[-1], (x[-1]-x[0]))
if len(x) < 3:
curve_y = linear(curve_x, *popt)
else:
curve_y = func(curve_x, *popt)
return curve_x.astype(int), curve_y.astype(int)
def read_keypoints(json_input, size, random_drop_prob=0, remove_face_labels=False, basic_point_only=False):
with open(json_input, encoding='utf-8') as f:
keypoint_dicts = json.loads(f.read())["people"]
edge_lists = define_edge_lists(basic_point_only)
w, h = size
pose_img = np.zeros((h, w, 3), np.uint8)
for keypoint_dict in keypoint_dicts:
pose_pts = np.array(keypoint_dict["pose_keypoints_2d"]).reshape(25, 3)
face_pts = np.array(keypoint_dict["face_keypoints_2d"]).reshape(70, 3)
hand_pts_l = np.array(keypoint_dict["hand_left_keypoints_2d"]).reshape(21, 3)
hand_pts_r = np.array(keypoint_dict["hand_right_keypoints_2d"]).reshape(21, 3)
pts = [extract_valid_keypoints(pts, edge_lists) for pts in [pose_pts, face_pts, hand_pts_l, hand_pts_r]]
pose_img += connect_keypoints(pts, edge_lists, size, random_drop_prob, remove_face_labels, basic_point_only)
return pose_img
def extract_valid_keypoints(pts, edge_lists):
pose_edge_list, _, hand_edge_list, _, face_list = edge_lists
p = pts.shape[0]
thre = 0.1 if p == 70 else 0.01
output = np.zeros((p, 2))
if p == 70: # face
for edge_list in face_list:
for edge in edge_list:
if (pts[edge, 2] > thre).all():
output[edge, :] = pts[edge, :2]
elif p == 21: # hand
for edge in hand_edge_list:
if (pts[edge, 2] > thre).all():
output[edge, :] = pts[edge, :2]
else: # pose
valid = (pts[:, 2] > thre)
output[valid, :] = pts[valid, :2]
return output
def connect_keypoints(pts, edge_lists, size, random_drop_prob, remove_face_labels, basic_point_only):
pose_pts, face_pts, hand_pts_l, hand_pts_r = pts
w, h = size
output_edges = np.zeros((h, w, 3), np.uint8)
pose_edge_list, pose_color_list, hand_edge_list, hand_color_list, face_list = edge_lists
if random_drop_prob > 0 and remove_face_labels:
# add random noise to keypoints
pose_pts[[0,15,16,17,18], :] += 5 * np.random.randn(5,2)
face_pts[:,0] += 2 * np.random.randn()
face_pts[:,1] += 2 * np.random.randn()
### pose
for i, edge in enumerate(pose_edge_list):
x, y = pose_pts[edge, 0], pose_pts[edge, 1]
if (np.random.rand() > random_drop_prob) and (0 not in x):
curve_x, curve_y = interpPoints(x, y)
drawEdge(output_edges, curve_x, curve_y, bw=3, color=pose_color_list[i], draw_end_points=True)
if not basic_point_only:
### hand
for hand_pts in [hand_pts_l, hand_pts_r]: # for left and right hand
if np.random.rand() > random_drop_prob:
for i, edge in enumerate(hand_edge_list): # for each finger
for j in range(0, len(edge)-1): # for each part of the finger
sub_edge = edge[j:j+2]
x, y = hand_pts[sub_edge, 0], hand_pts[sub_edge, 1]
if 0 not in x:
line_x, line_y = interpPoints(x, y)
drawEdge(output_edges, line_x, line_y, bw=1, color=hand_color_list[i], draw_end_points=True)
### face
edge_len = 2
if (np.random.rand() > random_drop_prob):
for edge_list in face_list:
for edge in edge_list:
for i in range(0, max(1, len(edge)-1), edge_len-1):
sub_edge = edge[i:i+edge_len]
x, y = face_pts[sub_edge, 0], face_pts[sub_edge, 1]
if 0 not in x:
curve_x, curve_y = interpPoints(x, y)
drawEdge(output_edges, curve_x, curve_y, draw_end_points=True)
return output_edges
def define_edge_lists(basic_point_only):
### pose
pose_edge_list = []
pose_color_list = []
if not basic_point_only:
pose_edge_list += [[17, 15], [15, 0], [ 0, 16], [16, 18]] # head
pose_color_list += [[153, 0,153], [153, 0,102], [102, 0,153], [ 51, 0,153]]
pose_edge_list += [
[ 0, 1], [ 1, 8], # body
[ 1, 2], [ 2, 3], [ 3, 4], # right arm
[ 1, 5], [ 5, 6], [ 6, 7], # left arm
[ 8, 9], [ 9, 10], [10, 11], [11, 24], [11, 22], [22, 23], # right leg
[ 8, 12], [12, 13], [13, 14], [14, 21], [14, 19], [19, 20] # left leg
]
pose_color_list += [
[153, 0, 51], [153, 0, 0],
[153, 51, 0], [153,102, 0], [153,153, 0],
[102,153, 0], [ 51,153, 0], [ 0,153, 0],
[ 0,153, 51], [ 0,153,102], [ 0,153,153], [ 0,153,153], [ 0,153,153], [ 0,153,153],
[ 0,102,153], [ 0, 51,153], [ 0, 0,153], [ 0, 0,153], [ 0, 0,153], [ 0, 0,153]
]
### hand
hand_edge_list = [
[0, 1, 2, 3, 4],
[0, 5, 6, 7, 8],
[0, 9, 10, 11, 12],
[0, 13, 14, 15, 16],
[0, 17, 18, 19, 20]
]
hand_color_list = [
[204,0,0], [163,204,0], [0,204,82], [0,82,204], [163,0,204]
]
### face
face_list = [
#[range(0, 17)], # face
[range(17, 22)], # left eyebrow
[range(22, 27)], # right eyebrow
[range(27, 31), range(31, 36)], # nose
[[36,37,38,39], [39,40,41,36]], # left eye
[[42,43,44,45], [45,46,47,42]], # right eye
[range(48, 55), [54,55,56,57,58,59,48]], # mouth
]
return pose_edge_list, pose_color_list, hand_edge_list, hand_color_list, face_list | vid2vid-master | data/keypoint2img.py |
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
| vid2vid-master | data/base_data_loader.py |
vid2vid-master | data/__init__.py |
Subsets and Splits