python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus Dataset constructors for discrete type data
"""
from pathlib import Path
from typing import Union, Dict, List
import numpy as np
import h5py
from modulus.sym.utils.io.vtk import grid_to_vtk
from modulus.sym.dataset.dataset import Dataset, _DictDatasetMixin
class _DictGridDatasetMixin(_DictDatasetMixin):
"Special mixin class for dealing with dictionaries as input"
def save_dataset(self, filename):
named_lambda_weighting = {
"lambda_" + key: value for key, value in self.lambda_weighting.items()
}
save_var = {**self.invar, **self.outvar, **named_lambda_weighting}
grid_to_vtk(filename, save_var) # Structured grid output in future
class DictGridDataset(_DictGridDatasetMixin, Dataset):
"""Default map-style grid dataset
Parameters
----------
invar : Dict[str, np.array]
Dictionary of numpy arrays as input. Input arrays should be of form [B, cin, xdim, ...]
outvar : Dict[str, np.array]
Dictionary of numpy arrays as target outputs. Target arrays should be of form [B, cin, xdim, ...]
lambda_weighting : Dict[str, np.array], optional
The weighting of the each example, by default None
"""
auto_collation = True
def __init__(
self,
invar: Dict[str, np.array],
outvar: Dict[str, np.array],
lambda_weighting: Dict[str, np.array] = None,
):
super().__init__(invar=invar, outvar=outvar, lambda_weighting=lambda_weighting)
def __getitem__(self, idx):
invar = _DictDatasetMixin._idx_var(self.invar, idx)
outvar = _DictDatasetMixin._idx_var(self.outvar, idx)
lambda_weighting = _DictDatasetMixin._idx_var(self.lambda_weighting, idx)
return (invar, outvar, lambda_weighting)
def __len__(self):
return self.length
class HDF5GridDataset(Dataset):
"""lazy-loading HDF5 map-style grid dataset"""
def __init__(
self,
filename: Union[str, Path],
invar_keys: List[str],
outvar_keys: List[str],
n_examples: int = None,
):
self._invar_keys = invar_keys
self._outvar_keys = outvar_keys
self.path = Path(filename)
# check path
assert self.path.is_file(), f"Could not find file {self.path}"
assert self.path.suffix in [
".h5",
".hdf5",
], f"File type should be HDF5, got {self.path.suffix}"
# check dataset/ get length
with h5py.File(self.path, "r") as f:
# check keys exist
for k in invar_keys + outvar_keys:
if not k in f.keys():
raise KeyError(f"Variable {k} not found in HDF5 file")
length = len(f[k])
if n_examples is not None:
assert (
n_examples <= length
), "error, n_examples greater than length of file data"
length = min(n_examples, length)
self.length = length
def __getitem__(self, idx):
invar = Dataset._to_tensor_dict(
{k: self.f[k][idx, ...] for k in self.invar_keys}
)
outvar = Dataset._to_tensor_dict(
{k: self.f[k][idx, ...] for k in self.outvar_keys}
)
lambda_weighting = Dataset._to_tensor_dict(
{k: np.ones_like(v) for k, v in outvar.items()}
)
return invar, outvar, lambda_weighting
def __len__(self):
return self.length
def worker_init_fn(self, iworker):
super().worker_init_fn(iworker)
# open file on worker thread
# note each torch DataLoader worker process should open file individually when reading
# do not share open file descriptors across separate workers!
# note files are closed when worker process is destroyed so no need to explicitly close
self.f = h5py.File(self.path, "r")
@property
def invar_keys(self):
return list(self._invar_keys)
@property
def outvar_keys(self):
return list(self._outvar_keys)
| modulus-sym-main | modulus/sym/dataset/discrete.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dataset import Dataset, IterableDataset
from .continuous import (
DictPointwiseDataset,
ListIntegralDataset,
ContinuousPointwiseIterableDataset,
ContinuousIntegralIterableDataset,
DictImportanceSampledPointwiseIterableDataset,
DictVariationalDataset,
DictInferencePointwiseDataset,
)
from .discrete import DictGridDataset, HDF5GridDataset
| modulus-sym-main | modulus/sym/dataset/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Dataset classes
"""
from typing import Dict
import numpy as np
import torch.utils.data
from modulus.sym.constants import tf_dt
from modulus.sym.distributed import DistributedManager
class _BaseDataset:
"Defines common requirements across map- and iterable- style datasets"
def worker_init_fn(self, iworker):
"Called by each worker in torch dataloader when it initialises"
# get the distributed manager object
manager = DistributedManager()
worker_rank = manager.group_rank("data_parallel") if manager.distributed else 0
worker_size = manager.group_size("data_parallel") if manager.distributed else 1
# set different numpy seed per worker
# set seed so first worker id's seed matches single-process case
np.random.seed(seed=(worker_rank + iworker * worker_size))
@property
def invar_keys(self):
"Return list of invar keys"
raise NotImplementedError("subclass must implement this")
@property
def outvar_keys(self):
"Return list of outvar keys"
raise NotImplementedError("subclass must implement this")
def save_dataset(self, filename):
"Save dataset to file"
raise NotImplementedError("subclass must implement this")
@staticmethod
def _to_tensor_dict(var_dict, device=None):
# convert to torch
tensor_dict = {
key: torch.as_tensor(value, dtype=tf_dt, device=device)
for key, value in var_dict.items()
}
return tensor_dict
class Dataset(_BaseDataset, torch.utils.data.Dataset):
"For defining map-style datasets, can be subclassed by user"
auto_collation = False
def __getitem__(self, idx):
"""Must return a single example tuple e.g. (invar, outvar, lambda_weighting)
if Dataset.auto_collation is False, or a batched example tuple if
Dataset.auto_collation is True. For the latter case idx is a batch of indices."""
raise NotImplementedError("subclass must implement this")
def __len__(self):
raise NotImplementedError("subclass must implement this")
class IterableDataset(_BaseDataset, torch.utils.data.IterableDataset):
"For defining iterable-style datasets, can be subclassed by user"
def __iter__(self):
"Must yield batched example tuple e.g. (invar, outvar, lambda_weighting)"
raise NotImplementedError("subclass must implement this")
class _DictDatasetMixin:
"Special mixin class for dealing with dictionary-based datasets"
def __init__(
self,
invar: Dict[str, np.array],
outvar: Dict[str, np.array],
lambda_weighting: Dict[str, np.array] = None,
):
# get default lambda weighting
if lambda_weighting is None:
lambda_weighting = {key: np.ones_like(x) for key, x in outvar.items()}
# convert dataset arrays to tensors
self.invar = Dataset._to_tensor_dict(invar)
self.outvar = Dataset._to_tensor_dict(outvar)
self.lambda_weighting = Dataset._to_tensor_dict(lambda_weighting)
# get length
self.length = len(next(iter(self.invar.values())))
@property
def invar_keys(self):
return list(self.invar.keys())
@property
def outvar_keys(self):
return list(self.outvar.keys())
@staticmethod
def _idx_var(var, idx):
# index, idx can be an int or an array
idx_var = {}
for key, value in var.items():
idx_var[key] = value[idx]
return idx_var
| modulus-sym-main | modulus/sym/dataset/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus Dataset constructors for continuous type data
"""
from typing import Dict, List, Callable
import numpy as np
from modulus.sym.utils.io.vtk import var_to_polyvtk
from .dataset import Dataset, IterableDataset, _DictDatasetMixin
class _DictPointwiseDatasetMixin(_DictDatasetMixin):
"Special mixin class for dealing with dictionaries as input"
def save_dataset(self, filename):
named_lambda_weighting = {
"lambda_" + key: value for key, value in self.lambda_weighting.items()
}
save_var = {**self.invar, **self.outvar, **named_lambda_weighting}
var_to_polyvtk(filename, save_var)
class DictPointwiseDataset(_DictPointwiseDatasetMixin, Dataset):
"""A map-style dataset for a finite set of pointwise training examples."""
auto_collation = True
def __init__(
self,
invar: Dict[str, np.array],
outvar: Dict[str, np.array],
lambda_weighting: Dict[str, np.array] = None,
):
super().__init__(invar=invar, outvar=outvar, lambda_weighting=lambda_weighting)
def __getitem__(self, idx):
invar = _DictDatasetMixin._idx_var(self.invar, idx)
outvar = _DictDatasetMixin._idx_var(self.outvar, idx)
lambda_weighting = _DictDatasetMixin._idx_var(self.lambda_weighting, idx)
return (invar, outvar, lambda_weighting)
def __len__(self):
return self.length
class DictInferencePointwiseDataset(Dataset):
"""
A map-style dataset for inferencing the model, only contains inputs
"""
auto_collation = True
def __init__(
self,
invar: Dict[str, np.array],
output_names: List[str], # Just names of output vars
):
self.invar = Dataset._to_tensor_dict(invar)
self.output_names = output_names
self.length = len(next(iter(invar.values())))
def __getitem__(self, idx):
invar = _DictDatasetMixin._idx_var(self.invar, idx)
return (invar,)
def __len__(self):
return self.length
@property
def invar_keys(self):
return list(self.invar.keys())
@property
def outvar_keys(self):
return list(self.output_names)
class ContinuousPointwiseIterableDataset(IterableDataset):
"""
An infinitely iterable dataset for a continuous set of pointwise training examples.
This will resample training examples (create new ones) every iteration.
"""
def __init__(
self,
invar_fn: Callable,
outvar_fn: Callable,
lambda_weighting_fn: Callable = None,
):
self.invar_fn = invar_fn
self.outvar_fn = outvar_fn
self.lambda_weighting_fn = lambda_weighting_fn
if lambda_weighting_fn is None:
lambda_weighting_fn = lambda _, outvar: {
key: np.ones_like(x) for key, x in outvar.items()
}
def iterable_function():
while True:
invar = Dataset._to_tensor_dict(self.invar_fn())
outvar = Dataset._to_tensor_dict(self.outvar_fn(invar))
lambda_weighting = Dataset._to_tensor_dict(
self.lambda_weighting_fn(invar, outvar)
)
yield (invar, outvar, lambda_weighting)
self.iterable_function = iterable_function
def __iter__(self):
yield from self.iterable_function()
@property
def invar_keys(self):
invar = self.invar_fn()
return list(invar.keys())
@property
def outvar_keys(self):
invar = self.invar_fn()
outvar = self.outvar_fn(invar)
return list(outvar.keys())
def save_dataset(self, filename):
# Cannot save continuous data-set
pass
class DictImportanceSampledPointwiseIterableDataset(
_DictPointwiseDatasetMixin, IterableDataset
):
"""
An infinitely iterable dataset that applies importance sampling for faster more accurate monte carlo integration
"""
def __init__(
self,
invar: Dict[str, np.array],
outvar: Dict[str, np.array],
batch_size: int,
importance_measure: Callable,
lambda_weighting: Dict[str, np.array] = None,
shuffle: bool = True,
resample_freq: int = 1000,
):
super().__init__(invar=invar, outvar=outvar, lambda_weighting=lambda_weighting)
self.batch_size = min(batch_size, self.length)
self.shuffle = shuffle
self.resample_freq = resample_freq
self.importance_measure = importance_measure
def iterable_function():
# TODO: re-write idx calculation using pytorch sampling - to improve performance
counter = 0
while True:
# resample all points when needed
if counter % self.resample_freq == 0:
list_importance = []
list_invar = {
key: np.split(value, value.shape[0] // self.batch_size)
for key, value in self.invar.items()
}
for i in range(len(next(iter(list_invar.values())))):
importance = self.importance_measure(
{key: value[i] for key, value in list_invar.items()}
)
list_importance.append(importance)
importance = np.concatenate(list_importance, axis=0)
prob = importance / np.sum(self.invar["area"].numpy() * importance)
# sample points from probability distribution and store idx
idx = np.array([])
while True:
r = np.random.uniform(0, np.max(prob), size=self.batch_size)
try_idx = np.random.choice(self.length, self.batch_size)
if_sample = np.less(r, prob[try_idx, :][:, 0])
idx = np.concatenate([idx, try_idx[if_sample]])
if idx.shape[0] >= batch_size:
idx = idx[:batch_size]
break
idx = idx.astype(np.int64)
# gather invar, outvar, and lambda weighting
invar = _DictDatasetMixin._idx_var(self.invar, idx)
outvar = _DictDatasetMixin._idx_var(self.outvar, idx)
lambda_weighting = _DictDatasetMixin._idx_var(
self.lambda_weighting, idx
)
# set area value from importance sampling
invar["area"] = 1.0 / (prob[idx] * batch_size)
# return and count up
counter += 1
yield (invar, outvar, lambda_weighting)
self.iterable_function = iterable_function
def __iter__(self):
yield from self.iterable_function()
class ListIntegralDataset(_DictDatasetMixin, Dataset):
"""
A map-style dataset for a finite set of integral training examples.
"""
auto_collation = True
def __init__(
self,
list_invar: List[Dict[str, np.array]],
list_outvar: List[Dict[str, np.array]],
list_lambda_weighting: List[Dict[str, np.array]] = None,
):
if list_lambda_weighting is None:
list_lambda_weighting = []
for outvar in list_outvar:
list_lambda_weighting.append(
{key: np.ones_like(x) for key, x in outvar.items()}
)
invar = _stack_list_numpy_dict(list_invar)
outvar = _stack_list_numpy_dict(list_outvar)
lambda_weighting = _stack_list_numpy_dict(list_lambda_weighting)
super().__init__(invar=invar, outvar=outvar, lambda_weighting=lambda_weighting)
def __getitem__(self, idx):
invar = _DictDatasetMixin._idx_var(self.invar, idx)
outvar = _DictDatasetMixin._idx_var(self.outvar, idx)
lambda_weighting = _DictDatasetMixin._idx_var(self.lambda_weighting, idx)
return (invar, outvar, lambda_weighting)
def __len__(self):
return self.length
def save_dataset(self, filename):
for idx in range(self.length):
var_to_polyvtk(
filename + "_" + str(idx).zfill(5),
_DictDatasetMixin._idx_var(self.invar, idx),
)
class ContinuousIntegralIterableDataset(IterableDataset):
"""
An infinitely iterable dataset for a continuous set of integral training examples.
This will resample training examples (create new ones) every iteration.
"""
def __init__(
self,
invar_fn: Callable,
outvar_fn: Callable,
batch_size: int,
lambda_weighting_fn: Callable = None,
param_ranges_fn: Callable = None,
):
self.invar_fn = invar_fn
self.outvar_fn = outvar_fn
self.lambda_weighting_fn = lambda_weighting_fn
if lambda_weighting_fn is None:
lambda_weighting_fn = lambda _, outvar: {
key: np.ones_like(x) for key, x in outvar.items()
}
if param_ranges_fn is None:
param_ranges_fn = lambda: {} # Potentially unsafe?
self.param_ranges_fn = param_ranges_fn
self.batch_size = batch_size
# TODO: re-write iterable function so that for loop not needed - to improve performance
def iterable_function():
while True:
list_invar = []
list_outvar = []
list_lambda_weighting = []
for _ in range(self.batch_size):
param_range = self.param_ranges_fn()
list_invar.append(self.invar_fn(param_range))
if (
not param_range
): # TODO this can be removed after a np_lambdify rewrite
param_range = {"_": next(iter(list_invar[-1].values()))[0:1]}
list_outvar.append(self.outvar_fn(param_range))
list_lambda_weighting.append(
self.lambda_weighting_fn(param_range, list_outvar[-1])
)
invar = Dataset._to_tensor_dict(_stack_list_numpy_dict(list_invar))
outvar = Dataset._to_tensor_dict(_stack_list_numpy_dict(list_outvar))
lambda_weighting = Dataset._to_tensor_dict(
_stack_list_numpy_dict(list_lambda_weighting)
)
yield (invar, outvar, lambda_weighting)
self.iterable_function = iterable_function
def __iter__(self):
yield from self.iterable_function()
@property
def invar_keys(self):
param_range = self.param_ranges_fn()
invar = self.invar_fn(param_range)
return list(invar.keys())
@property
def outvar_keys(self):
param_range = self.param_ranges_fn()
invar = self.invar_fn(param_range)
outvar = self.outvar_fn(invar)
return list(outvar.keys())
def save_dataset(self, filename):
# Cannot save continuous data-set
pass
class DictVariationalDataset(Dataset):
"""
A map-style dataset for a finite set of variational training examples.
"""
auto_collation = True
def __init__(
self,
invar: Dict[str, np.array],
outvar_names: List[str], # Just names of output vars
):
self.invar = Dataset._to_tensor_dict(invar)
self.outvar_names = outvar_names
self.length = len(next(iter(invar.values())))
def __getitem__(self, idx):
invar = _DictDatasetMixin._idx_var(self.invar, idx)
return invar
def __len__(self):
return self.length
@property
def invar_keys(self):
return list(self.invar.keys())
@property
def outvar_keys(self):
return list(self.outvar_names)
def save_dataset(self, filename):
for i, invar in self.invar.items():
var_to_polyvtk(invar, filename + "_" + str(i))
def _stack_list_numpy_dict(list_var):
var = {}
for key in list_var[0].keys():
var[key] = np.stack([v[key] for v in list_var], axis=0)
return var
| modulus-sym-main | modulus/sym/dataset/continuous.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
from modulus.sym import quantity
from modulus.sym.node import Node
class NonDimensionalizer:
"""
Used for Non-dimensionalizion and normalization of physical quantities
Parameters
----------
length_scale : quantity
length scale. Defaults to quantity(1.0, "m").
time_scale : quantity
time scale. Defaults to quantity(1.0, "s").
mass_scale : quantity
mass scale. Defaults to quantity(1.0, "kg").
temperature_scale : quantity
temperature scale. Defaults to quantity(1.0, "K").
current_scale : quantity
current scale. Defaults to quantity(1.0, "A").
substance_scale : quantity
substance scale. Defaults to quantity(1.0, "mol").
luminosity_scale : quantity
luminosity scale. Defaults to quantity(1.0, "cd").
"""
def __init__(
self,
length_scale=quantity(1.0, "m"),
time_scale=quantity(1.0, "s"),
mass_scale=quantity(1.0, "kg"),
temperature_scale=quantity(1.0, "K"),
current_scale=quantity(1.0, "A"),
substance_scale=quantity(1.0, "mol"),
luminosity_scale=quantity(1.0, "cd"),
):
self._print_scale(length_scale, "length")
self._print_scale(time_scale, "time")
self._print_scale(mass_scale, "mass")
self._print_scale(temperature_scale, "temperature")
self._print_scale(current_scale, "current")
self._print_scale(substance_scale, "substance")
self._print_scale(luminosity_scale, "luminosity")
self.scale_dict = {
"[length]": length_scale.to_base_units(),
"[time]": time_scale.to_base_units(),
"[mass]": mass_scale.to_base_units(),
"[temperature]": temperature_scale.to_base_units(),
"[current]": current_scale.to_base_units(),
"[substance]": substance_scale.to_base_units(),
"[luminosity]": luminosity_scale.to_base_units(),
}
def ndim(self, qty, return_unit=False):
"""
Non-dimensionalize and normalize physical quantities
Parameters
----------
qty : quantity
Physical quantity
return_unit : bool
If True, returns the non-dimensionalized and normalized value in for of a quantity with a "dimensionless" unit. If False, only returns the non-dimensionalized and normalized value
"""
qty.ito_base_units()
for key, value in dict(qty.dimensionality).items():
qty /= self.scale_dict[key] ** value
if dict(qty.dimensionality):
raise RuntimeError("Error in non-dimensionalization")
if return_unit:
return qty
else:
return qty.magnitude
def dim(self, invar, unit, return_unit=False):
"""
Scales back a non-dimensionalized quantity or value to a quantity with a desired unit
Parameters
----------
invar : Any(quantity, float)
Non-dimensionalized value or quantity
unit:
The target physical unit for the value or quantity
return_unit : bool
If True, returns the scaled value in for of a quantity with a unit. If False, only returns the scaled value
"""
try:
if dict(invar.dimensionality):
raise RuntimeError("Error in dimensionalization")
except:
pass
try:
qty = quantity(invar, "")
except:
qty = invar
dummy_qty = quantity(1, unit)
dummy_qty.ito_base_units()
for key, value in dict(dummy_qty.dimensionality).items():
qty *= self.scale_dict[key] ** value
qty.ito(unit)
if return_unit:
return qty
else:
return qty.magnitude
def _print_scale(self, scale, name):
"""
Print scales only if the default values are changed
"""
if scale.magnitude != 1.0:
print(f"{name} scale is {scale}")
class Scaler:
"""
generates a Modulus Node for scaling back non-dimensionalized and normalized quantities
Parameters
----------
invar : List[str]
List of non-dimensionalized variable names to be scaled back
outvar : List[str]
List of names for the scaled variables.
outvar_unit : List[str]
List of unots for the scaled variables.
non_dimensionalizer = NonDimensionalizer
Modulus non-dimensionalizer object
"""
def __init__(self, invar, outvar, outvar_unit, non_dimensionalizer):
self.invar = invar
self.outvar = outvar
self.outvar_unit = outvar_unit
self.non_dimensionalizer = non_dimensionalizer
def make_node(self):
"""
generates a Modulus Node
"""
return [
Node(
inputs=self.invar,
outputs=self.outvar,
evaluate=_Scale(
self.invar, self.outvar, self.outvar_unit, self.non_dimensionalizer
),
)
]
class _Scale(torch.nn.Module):
"""
Scales back non-dimensionalized and normalized quantities
Parameters
----------
invar : List[str]
List of non-dimensionalized variable names to be scaled back
outvar : List[str]
List of names for the scaled variables.
outvar_unit : List[str]
List of unots for the scaled variables.
non_dimensionalizer = NonDimensionalizer
Modulus non-dimensionalizer object
"""
def __init__(self, invar, outvar, outvar_unit, non_dimensionalizer):
super().__init__()
self.invar = invar
self.outvar = outvar
self.outvar_unit = outvar_unit
self.non_dimensionalizer = non_dimensionalizer
def forward(self, invar: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
outvar = {}
for i, key in enumerate(self.invar):
outvar[self.outvar[i]] = self.non_dimensionalizer.dim(
invar[key], self.outvar_unit[i]
)
return outvar
| modulus-sym-main | modulus/sym/eq/non_dim.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-sym-main | modulus/sym/eq/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" base class for PDEs
"""
from sympy import (
Symbol,
Function,
init_printing,
pprint,
latex,
preview,
Matrix,
Eq,
Basic,
)
from typing import Dict, Tuple, List, Union
from modulus.sym.node import Node
from modulus.sym.constants import diff_str
from modulus.sym.key import Key
class PDE(object):
"""base class for all partial differential equations"""
name = "PDE"
def __init__(self):
super().__init__()
self.equations = Variables()
def pprint(self, print_latex=False):
"""
Print differential equation.
Parameters
----------
print_latex : bool
If True print the equations in Latex. Else, just
print as text.
"""
init_printing(use_latex=True)
for key, value in self.equations.items():
print(str(key) + ": " + str(value))
if print_latex:
preview(
Matrix(
[
Eq(Function(name, real=True), eq)
for name, eq in self.equations.items()
]
),
mat_str="cases",
mat_delim="",
)
def subs(self, x, y):
for name, eq in self.equations.items():
self.equations[name] = eq.subs(x, y).doit()
def make_nodes(
self,
create_instances: int = 1,
freeze_terms: Dict[str, List[int]] = {},
detach_names: List[str] = [],
):
"""
Make a list of nodes from PDE.
Parameters
----------
create_instances : int
This will create various instances of the same equations
freeze_terms : Dict[str, List[int]]
This will freeze the terms in appropiate equation
detach_names : List[str]
This will detach the inputs of the resulting node.
Returns
-------
nodes : List[Node]
Makes a separate node for every equation.
"""
nodes = []
if create_instances == 1:
if bool(freeze_terms):
print(
"Freezing of terms is not supported when create_instance = 1. No terms will be frozen!"
)
freeze_terms = {} # override with an empty dict
for name, eq in self.equations.items():
nodes.append(Node.from_sympy(eq, str(name), freeze_terms, detach_names))
else:
# look for empty lists in freeze_terms dict
for k in list(freeze_terms):
if not freeze_terms[k]:
freeze_terms.pop(k)
for i in range(create_instances):
for name, eq in self.equations.items():
if str(name) + "_" + str(i) in freeze_terms.keys():
nodes.append(
Node.from_sympy(
eq,
str(name) + "_" + str(i),
freeze_terms[str(name) + "_" + str(i)],
detach_names,
)
)
else:
# set the freeze terms to an empty list
print(
"No freeze terms found for instance: "
+ str(name)
+ "_"
+ str(i)
+ ", setting to empty"
)
nodes.append(
Node.from_sympy(
eq,
str(name) + "_" + str(i),
[],
detach_names,
)
)
return nodes
| modulus-sym-main | modulus/sym/eq/pde.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
import numpy as np
import logging
from torch.autograd import Function
from modulus.sym.constants import diff
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.mfd import FirstDeriv, SecondDeriv, ThirdDeriv, ForthDeriv
from typing import Dict, List, Set, Optional, Union, Callable
Tensor = torch.Tensor
logger = logging.getLogger(__name__)
# ==== Autodiff ====
@torch.jit.script
def gradient(y: torch.Tensor, x: List[torch.Tensor]) -> List[torch.Tensor]:
"""
TorchScript function to compute the gradient of a tensor wrt multople inputs
"""
grad_outputs: List[Optional[torch.Tensor]] = [torch.ones_like(y, device=y.device)]
grad = torch.autograd.grad(
[
y,
],
x,
grad_outputs=grad_outputs,
create_graph=True,
allow_unused=True,
)
if grad is None:
grad = [torch.zeros_like(xx) for xx in x]
assert grad is not None
grad = [g if g is not None else torch.zeros_like(x[i]) for i, g in enumerate(grad)]
return grad
class Derivative(torch.nn.Module):
"""
Module to compute derivatives using backward automatic differentiation
"""
def __init__(self, bwd_derivative_dict: Dict[Key, List[Key]]):
"""
Constructor of the Derivative class.
Parameters
----------
inputs : List[Key]
A list of keys of the available variables to compute the required variables
This list should contain both the variables that need to be differentiated
and the variables to differentiate with respect to.
derivatives : List[Key]
A list of keys of the required derivatives
"""
super().__init__()
self.gradient_dict: Dict[str, Dict[str, int]] = {
str(k): {str(w): w.size for w in v} for k, v in bwd_derivative_dict.items()
}
self.gradient_names: Dict[str, List[str]] = {
k: [diff(k, der) for der in v.keys()] for k, v in self.gradient_dict.items()
}
self.nvtx_str: str = f"Auto-Diff Node: {list(self.gradient_dict.keys())}"
@staticmethod
def prepare_input(
input_variables: Dict[str, torch.Tensor], mask: List[str]
) -> List[torch.Tensor]:
return [input_variables[x] for x in mask]
@staticmethod
def dict_output(
output_tensors: List[torch.Tensor], sizes: List[str], var_name: str
) -> Dict[str, torch.Tensor]:
return {diff(var_name, name): output_tensors[i] for i, name in enumerate(sizes)}
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
output_var = {}
for var_name, grad_sizes in self.gradient_dict.items():
var = input_var[var_name]
grad_var = self.prepare_input(input_var, grad_sizes.keys())
grad = gradient(var, grad_var)
grad_dict = {
name: grad[i] for i, name in enumerate(self.gradient_names[var_name])
}
output_var.update(grad_dict)
return output_var
@classmethod
def make_node(cls, inputs: List[Key], derivatives: List[Key], name=None, jit=True):
derivatives = [d for d in derivatives if d not in inputs]
bwd_derivative_dict = _derivative_dict(inputs, derivatives, forward=False)
output_derivatives = []
for key, value in bwd_derivative_dict.items():
output_derivatives += [
Key(key.name, key.size, key.derivatives + [x]) for x in value
]
evaluate = cls(bwd_derivative_dict)
nvtx_str = evaluate.nvtx_str
if jit:
evaluate = torch.jit.script(evaluate)
derivative_node = Node(
inputs,
output_derivatives,
evaluate,
name=(nvtx_str if name is None else str(name)),
)
return derivative_node
def _derivative_dict(var, derivatives, forward=False):
needed = derivatives
while True: # break apart diff to see if first order needed
break_loop = True
for n in needed:
l_n = Key(n.name, n.size, n.derivatives[:-1])
if (len(n.derivatives) > 1) and l_n not in needed and l_n not in var:
needed.append(l_n)
break_loop = False
if break_loop:
break
current = var
diff_dict = {}
for c, n in itertools.product(current, needed):
c_under_n = Key(n.name, n.size, n.derivatives[0 : len(c.derivatives)])
if (c == c_under_n) and (len(n.derivatives) == len(c.derivatives) + 1):
if forward:
if n.derivatives[len(c.derivatives)] not in diff_dict:
diff_dict[n.derivatives[len(c.derivatives)]] = set()
diff_dict[n.derivatives[len(c.derivatives)]].add(c)
else:
if c not in diff_dict:
diff_dict[c] = set()
diff_dict[c].add(n.derivatives[len(c.derivatives)])
diff_dict = {key: list(value) for key, value in diff_dict.items()}
return diff_dict
# ==== Meshless finite derivs ====
class MeshlessFiniteDerivative(torch.nn.Module):
"""
Module to compute derivatives using meshless finite difference
Parameters
----------
model : torch.nn.Module
Forward torch module for calculating stencil values
derivatives : List[Key]
List of derivative keys to calculate
dx : Union[float, Callable]
Spatial discretization of all axis, can be function with parameter `count` which is
the number of forward passes for dynamically adjusting dx
order : int, optional
Order of derivative, by default 2
max_batch_size : Union[int, None], optional
Max batch size of stencil calucations, by default uses batch size of inputs
double_cast : bool, optional
Cast fields to double precision to calculate derivatives, by default True
jit : bool, optional
Use torch script for finite deriv calcs, by default True
"""
def __init__(
self,
model: torch.nn.Module,
derivatives: List[Key],
dx: Union[float, Callable],
order: int = 2,
max_batch_size: Union[int, None] = None,
double_cast: bool = True,
input_keys: Union[List[Key], None] = None,
):
super().__init__()
self.model = model
self._dx = dx
self.double_cast = double_cast
self.max_batch_size = max_batch_size
self.input_keys = input_keys
self.count = 0
self.derivatives = {1: [], 2: [], 3: [], 4: []}
for key in derivatives:
try:
self.derivatives[len(key.derivatives)].append(key)
except:
raise NotImplementedError(
f"{len(key.derivatives)}th derivatives not supported"
)
self.first_deriv = FirstDeriv(self.derivatives[1], self.dx, order=order)
self.second_deriv = SecondDeriv(self.derivatives[2], self.dx, order=order)
self.third_deriv = ThirdDeriv(self.derivatives[3], self.dx, order=order)
self.forth_deriv = ForthDeriv(self.derivatives[4], self.dx, order=order)
@torch.jit.ignore()
def forward(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
self.count += 1
dx = self.dx
self.first_deriv.dx = dx
self.second_deriv.dx = dx
self.third_deriv.dx = dx
self.forth_deriv.dx = dx
torch.cuda.nvtx.range_push(f"Calculating meshless finite derivatives")
# Assemble global stencil
global_stencil = []
for deriv in [
self.first_deriv,
self.second_deriv,
self.third_deriv,
self.forth_deriv,
]:
stencil_list = deriv.stencil
# Remove centered stencil points if already in input dictionary
for i, point in enumerate(stencil_list):
if point.split("::")[1] == str(0) and point.split("::")[0] in inputs:
stencil_list.pop(i)
global_stencil.extend(stencil_list)
global_stencil = list(set(global_stencil))
# Number of stencil points to fit into a forward pass
input_batch_size = next(iter(inputs.values())).size(0)
if self.max_batch_size is None:
num_batch = 1
else:
num_batch = max([self.max_batch_size, input_batch_size]) // input_batch_size
# Stencil forward passes
index = 0
finite_diff_inputs = inputs.copy()
while index < len(global_stencil):
torch.cuda.nvtx.range_push(f"Running stencil forward pass")
# Batch up stencil inputs
stencil_batch = [global_stencil[index]]
index += 1
for j in range(1, min([len(global_stencil) - (index - 1), num_batch])):
stencil_batch.append(global_stencil[index])
index += 1
model_inputs = self._get_stencil_input(inputs, stencil_batch)
# Model forward
outputs = self.model(model_inputs)
# Dissassemble batched inputs
for key, value in outputs.items():
outputs[key] = torch.split(value.view(-1, len(stencil_batch)), 1, dim=1)
for i, stencil_str in enumerate(stencil_batch):
for key, value in outputs.items():
finite_diff_inputs[f"{key}>>{stencil_str}"] = value[i]
torch.cuda.nvtx.range_pop()
# Calc finite diff grads
torch.cuda.nvtx.range_push(f"Calc finite difference")
if self.double_cast: # Cast tensors to doubles for finite diff calc
for key, value in finite_diff_inputs.items():
finite_diff_inputs[key] = value.double()
outputs_first = self.first_deriv(finite_diff_inputs)
outputs_second = self.second_deriv(finite_diff_inputs)
outputs_third = self.third_deriv(finite_diff_inputs)
outputs_forth = self.forth_deriv(finite_diff_inputs)
outputs = inputs
if self.double_cast:
dtype = torch.get_default_dtype()
for key, value in outputs_first.items():
outputs_first[key] = value.type(dtype)
for key, value in outputs_second.items():
outputs_second[key] = value.type(dtype)
for key, value in outputs_third.items():
outputs_third[key] = value.type(dtype)
for key, value in outputs_forth.items():
outputs_forth[key] = value.type(dtype)
outputs = {
**inputs,
**outputs_first,
**outputs_second,
**outputs_third,
**outputs_forth,
}
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_pop()
return outputs
@property
def dx(self):
if hasattr(self._dx, "__call__"):
return self._dx(self.count)
else:
return self._dx
def _get_stencil_input(
self, inputs: Dict[str, Tensor], stencil_strs: List[str]
) -> Dict[str, Tensor]:
"""Creates a copy of the inputs tensor and adjusts its values based on
the stencil str.
Parameters
----------
inputs : Dict[str, Tensor]
Input tensor dictionary
stencil_strs : List[str]
batch list of stencil string from derivative class
Returns
-------
Dict[str, Tensor]
Modified input tensor dictionary
Example
-------
A stencil string `x::1` will modify inputs['x'] = inputs['x'] + dx
A stencil string `y::-1,z::1` will modify inputs['y'] = inputs['y'] - dx, inputs['z'] = inputs['z'] + dx
"""
if self.input_keys is None:
outputs = inputs.copy()
else:
outputs = {str(key): inputs[str(key)].clone() for key in self.input_keys}
for key, value in outputs.items():
outputs[key] = value.repeat(1, len(stencil_strs))
for i, stencil_str in enumerate(stencil_strs):
# Loop through points
for point in stencil_str.split("&&"):
var_name = point.split("::")[0]
spacing = int(point.split("::")[1])
outputs[var_name][:, i] = outputs[var_name][:, i] + spacing * self.dx
for key, value in outputs.items():
outputs[key] = value.view(-1, 1)
return outputs
@classmethod
def make_node(
cls,
node_model: Union[Node, torch.nn.Module],
derivatives: List[Key],
dx: Union[float, Callable],
order: int = 2,
max_batch_size: Union[int, None] = None,
name: str = None,
double_cast: bool = True,
input_keys: Union[List[Key], List[str], None] = None,
):
"""Makes a meshless finite derivative node.
Parameters
----------
node_model : Union[Node, torch.nn.Module]
Node or torch.nn.Module for computing FD stencil values.
Part of the inputs to this model should consist of the independent
variables and output the functional value
derivatives : List[Key]
List of derivatives to be computed
dx : Union[float, Callable]
Spatial discretization for finite diff calcs, can be function
order : int, optional
Order of accuracy of finite diff calcs, by default 2
max_batch_size : Union[int, None], optional
Maximum batch size to used with the stenicl foward passes, by default None
name : str, optional
Name of node, by default None
double_cast : bool, optional
Cast tensors to double precision for derivatives, by default True
input_keys : Union[List[Key], List[str], None], optional
List of input keys to be used for input of forward model.
Should be used if node_model is not a :obj:`Node`, by default None
"""
# We have two sets of input keys:
# input_keys: which are the list of inputs to the model for stencil points
# mfd_input_keys: input keys for the MFD node
if input_keys is None:
input_keys = []
mfd_input_keys = []
else:
input_keys = [str(key) for key in input_keys]
mfd_input_keys = [str(key) for key in input_keys]
for derivative in derivatives:
mfd_input_keys.append(derivative.name)
for dstr in derivative.derivatives:
mfd_input_keys.append(dstr.name)
input_keys.append(dstr.name)
if isinstance(node_model, Node):
model = node_model.evaluate
input_keys = input_keys + [str(key) for key in node_model.inputs]
else:
model = node_model
# Remove duplicate keys
mfd_input_keys = Key.convert_list(list(set(mfd_input_keys)))
input_keys = Key.convert_list(list(set(input_keys)))
evaluate = cls(
model,
derivatives,
dx=dx,
order=order,
max_batch_size=max_batch_size,
double_cast=double_cast,
input_keys=input_keys,
)
derivative_node = Node(
mfd_input_keys,
derivatives,
evaluate,
name=(
"Meshless-Finite-Derivative Node" + "" if name is None else f": {name}"
),
)
return derivative_node
| modulus-sym-main | modulus/sym/eq/derivatives.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Maxwell's equation
"""
from sympy import Symbol, Function, Number
import numpy as np
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
# helper functions computing curl
def _curl(v):
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
dim = len(v)
if dim == 3:
vx, vy, vz = v
return [
vz.diff(y) - vy.diff(z),
vx.diff(z) - vz.diff(x),
vy.diff(x) - vx.diff(y),
]
elif dim == 2:
vx, vy = v
return [vy.diff(x) - vx.diff(y)]
elif dim == 1:
return [v[0].diff(y), -v[0].diff(x)]
else:
raise Exception("Input dimension for Curl operator must be 1, 2 or 3!")
# helper functions computing cross product
def _cross(a, b):
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
dim = len(a)
if dim == 3:
a1, a2, a3 = a
b1, b2, b3 = b
return [a2 * b3 - a3 * b2, a3 * b1 - a1 * b3, a1 * b2 - a2 * b1]
elif dim == 2:
a1, a2 = a
b1, b2 = b
return [a1 * b2 - a2 * b1]
else:
raise Exception("Input dimension for cross product must be 2 or 3!")
class MaxwellFreqReal(PDE):
"""
Frequency domain Maxwell's equation
Parameters
==========
ux : str
Ex
uy : str
Ey
uz : str
Ez
k : float, Sympy Symbol/Expr, str
Wave number. If `k` is a str then it is
converted to Sympy Function of form 'k(x,y,z,t)'.
If 'k' is a Sympy Symbol or Expression then this
is substituted into the equation.
mixed_form: bool
If True, use the mixed formulation of the diffusion equations.
"""
name = "MaxwellFreqReal"
def __init__(self, ux="ux", uy="uy", uz="uz", k=1.0, mixed_form=False):
# set params
self.ux = ux
self.uy = uy
self.uz = uz
self.mixed_form = mixed_form
if self.mixed_form:
raise NotImplementedError(
"Maxwell's equation is not implemented in mixed form"
)
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# wave speed coefficient
if isinstance(k, str):
k = Function(k)(*input_variables)
elif isinstance(k, (float, int)):
k = Number(k)
# E field
assert isinstance(ux, str), "uz needs to be string"
ux = Function(ux)(*input_variables)
assert isinstance(uy, str), "uy needs to be string"
uy = Function(uy)(*input_variables)
assert isinstance(uz, str), "uz needs to be string"
uz = Function(uz)(*input_variables)
# compute del X (del X E)
c2ux, c2uy, c2uz = _curl(_curl([ux, uy, uz]))
# set equations
self.equations = {}
self.equations["Maxwell_Freq_real_x"] = c2ux - k**2 * ux
self.equations["Maxwell_Freq_real_y"] = c2uy - k**2 * uy
self.equations["Maxwell_Freq_real_z"] = c2uz - k**2 * uz
class SommerfeldBC(PDE):
"""
Frequency domain ABC, Sommerfeld radiation condition
Only for real part
Equation: 'n x _curl(E) = 0'
Parameters
==========
ux : str
Ex
uy : str
Ey
uz : str
Ez
"""
name = "SommerfeldBC"
def __init__(self, ux="ux", uy="uy", uz="uz"):
# set params
self.ux = ux
self.uy = uy
self.uz = uz
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# make input variables, t is wave number
input_variables = {"x": x, "y": y, "z": z}
# E field
assert isinstance(ux, str), "uz needs to be string"
ux = Function(ux)(*input_variables)
assert isinstance(uy, str), "uy needs to be string"
uy = Function(uy)(*input_variables)
assert isinstance(uz, str), "uz needs to be string"
uz = Function(uz)(*input_variables)
# compute cross product of curl for sommerfeld bc
n = [normal_x, normal_y, normal_z]
u = [ux, uy, uz]
bcs = _cross(n, _curl(u))
# set equations
self.equations = {}
self.equations["SommerfeldBC_real_x"] = bcs[0]
self.equations["SommerfeldBC_real_y"] = bcs[1]
self.equations["SommerfeldBC_real_z"] = bcs[2]
class PEC(PDE):
"""
Perfect Electric Conduct BC for
Parameters
==========
ux : str
Ex
uy : str
Ey
uz : str
Ez
dim : int
Dimension of the equations (2, or 3). Default is 3.
"""
name = "PEC_3D"
def __init__(self, ux="ux", uy="uy", uz="uz", dim=3):
# set params
self.ux = ux
self.uy = uy
self.uz = uz
self.dim = dim
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# make input variables, t is wave number
input_variables = {"x": x, "y": y, "z": z}
# E field
assert isinstance(ux, str), "uz needs to be string"
ux = Function(ux)(*input_variables)
assert isinstance(uy, str), "uy needs to be string"
uy = Function(uy)(*input_variables)
if self.dim == 3:
assert isinstance(uz, str), "uz needs to be string"
uz = Function(uz)(*input_variables)
# compute cross of electric field
if self.dim == 2:
n = [normal_x, normal_y]
u = [ux, uy]
elif self.dim == 3:
n = [normal_x, normal_y, normal_z]
u = [ux, uy, uz]
else:
raise ValueError("'dim' needs to be 2 or 3")
bcs = _cross(n, u)
# set equations
self.equations = {}
self.equations["PEC_x"] = bcs[0]
if self.dim == 3:
self.equations["PEC_y"] = bcs[1]
self.equations["PEC_z"] = bcs[2]
| modulus-sym-main | modulus/sym/eq/pdes/electromagnetic.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diffusion equation
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class Diffusion(PDE):
"""
Diffusion equation
Parameters
==========
T : str
The dependent variable.
D : float, Sympy Symbol/Expr, str
Diffusivity. If `D` is a str then it is
converted to Sympy Function of form 'D(x,y,z,t)'.
If 'D' is a Sympy Symbol or Expression then this
is substituted into the equation.
Q : float, Sympy Symbol/Expr, str
The source term. If `Q` is a str then it is
converted to Sympy Function of form 'Q(x,y,z,t)'.
If 'Q' is a Sympy Symbol or Expression then this
is substituted into the equation. Default is 0.
dim : int
Dimension of the diffusion equation (1, 2, or 3).
Default is 3.
time : bool
If time-dependent equations or not. Default is True.
mixed_form: bool
If True, use the mixed formulation of the diffusion equations.
Examples
========
>>> diff = Diffusion(D=0.1, Q=1, dim=2)
>>> diff.pprint()
diffusion_T: T__t - 0.1*T__x__x - 0.1*T__y__y - 1
>>> diff = Diffusion(T='u', D='D', Q='Q', dim=3, time=False)
>>> diff.pprint()
diffusion_u: -D*u__x__x - D*u__y__y - D*u__z__z - Q - D__x*u__x - D__y*u__y - D__z*u__z
"""
name = "Diffusion"
def __init__(self, T="T", D="D", Q=0, dim=3, time=True, mixed_form=False):
# set params
self.T = T
self.dim = dim
self.time = time
self.mixed_form = mixed_form
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Temperature
assert type(T) == str, "T needs to be string"
T = Function(T)(*input_variables)
# Diffusivity
if type(D) is str:
D = Function(D)(*input_variables)
elif type(D) in [float, int]:
D = Number(D)
# Source
if type(Q) is str:
Q = Function(Q)(*input_variables)
elif type(Q) in [float, int]:
Q = Number(Q)
# set equations
self.equations = {}
if not self.mixed_form:
self.equations["diffusion_" + self.T] = (
T.diff(t)
- (D * T.diff(x)).diff(x)
- (D * T.diff(y)).diff(y)
- (D * T.diff(z)).diff(z)
- Q
)
elif self.mixed_form:
T_x = Function("T_x")(*input_variables)
T_y = Function("T_y")(*input_variables)
if self.dim == 3:
T_z = Function("T_z")(*input_variables)
else:
T_z = Number(0)
self.equations["diffusion_" + self.T] = (
T.diff(t)
- (D * T_x).diff(x)
- (D * T_y).diff(y)
- (D * T_z).diff(z)
- Q
)
self.equations["compatibility_T_x"] = T.diff(x) - T_x
self.equations["compatibility_T_y"] = T.diff(y) - T_y
self.equations["compatibility_T_z"] = T.diff(z) - T_z
self.equations["compatibility_T_xy"] = T_x.diff(y) - T_y.diff(x)
self.equations["compatibility_T_xz"] = T_x.diff(z) - T_z.diff(x)
self.equations["compatibility_T_yz"] = T_y.diff(z) - T_z.diff(y)
if self.dim == 2:
self.equations.pop("compatibility_T_z")
self.equations.pop("compatibility_T_xz")
self.equations.pop("compatibility_T_yz")
class DiffusionInterface(PDE):
"""
Matches the boundary conditions at an interface
Parameters
==========
T_1, T_2 : str
Dependent variables to match the boundary conditions at the interface.
D_1, D_2 : float
Diffusivity at the interface.
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is True.
Example
========
>>> diff = DiffusionInterface('theta_s', 'theta_f', 0.1, 0.05, dim=2)
>>> diff.pprint()
diffusion_interface_dirichlet_theta_s_theta_f: -theta_f + theta_s
diffusion_interface_neumann_theta_s_theta_f: -0.05*normal_x*theta_f__x
+ 0.1*normal_x*theta_s__x - 0.05*normal_y*theta_f__y
+ 0.1*normal_y*theta_s__y
"""
name = "DiffusionInterface"
def __init__(self, T_1, T_2, D_1, D_2, dim=3, time=True):
# set params
self.T_1 = T_1
self.T_2 = T_2
self.D_1 = D_1
self.D_2 = D_2
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# variables to match the boundary conditions (example Temperature)
T_1 = Function(T_1)(*input_variables)
T_2 = Function(T_2)(*input_variables)
# set equations
self.equations = {}
self.equations["diffusion_interface_dirichlet_" + self.T_1 + "_" + self.T_2] = (
T_1 - T_2
)
flux_1 = self.D_1 * (
normal_x * T_1.diff(x) + normal_y * T_1.diff(y) + normal_z * T_1.diff(z)
)
flux_2 = self.D_2 * (
normal_x * T_2.diff(x) + normal_y * T_2.diff(y) + normal_z * T_2.diff(z)
)
self.equations["diffusion_interface_neumann_" + self.T_1 + "_" + self.T_2] = (
flux_1 - flux_2
)
| modulus-sym-main | modulus/sym/eq/pdes/diffusion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Energy equation
Reference:
https://www.comsol.com/multiphysics/heat-transfer-conservation-of-energy
http://dl.icdst.org/pdfs/files1/2fe68e957cdf09a4862088ed279f00b0.pdf
http://farside.ph.utexas.edu/teaching/336L/Fluidhtml/node14.html#e4.67
"""
from sympy import Symbol, Function, Number
from sympy import *
from modulus.sym.eq.pde import PDE
from ..constants import diff
class EnergyFluid(PDE): # TODO clean function simlar to others
"""
Energy equation
Supports compressible flow.
For Ideal gases only (uses the assumption that beta*T = 1).
No heat/energy source added.
Parameters
==========
cp : str
The specific heat.
kappa : str
The conductivity.
rho : Sympy Symbol/Expr, str
The density. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation.
nu : Sympy Symbol/Expr, str
The kinematic viscosity. If `nu` is a str then it is
converted to Sympy Function of form 'nu(x,y,z,t)'.
If 'nu' is a Sympy Symbol or Expression then this
is substituted into the equation.
visc_heating : bool
If viscous heating is applied or not. Default is False.
dim : int
Dimension of the energy equation (2 or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is False.
mixed_form: bool
If True, use the mixed formulation of the diffusion equations.
Examples
========
>>> ene = EnergyFluid(nu=0.1, rho='rho', cp=2.0, kappa=5, dim=2, time=False, visc_heating=False)
>>> ene.pprint()
temperauture_fluid: 2.0*(u(x, y)*Derivative(T(x, y), x) + v(x, y)*Derivative(T(x, y), y))*rho(x, y) - u(x, y)*Derivative(p(x, y), x) - v(x, y)*Derivative(p(x, y), y) - 5*Derivative(T(x, y), (x, 2)) - 5*Derivative(T(x, y), (y, 2))
"""
def __init__(
self,
cp="cp",
kappa="kappa",
rho="rho",
nu="nu",
visc_heating=False,
dim=3,
time=False,
mixed_form=False,
):
# set params
self.dim = dim
self.time = time
self.nu = nu
self.rho = rho
self.visc_heating = visc_heating
self.mixed_form = mixed_form
# specific heat
self.cp = cp
# conductivity
self.kappa = kappa
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# velocity componets
u = Function("u")(x, y, z, t)
v = Function("v")(x, y, z, t)
w = Function("w")(x, y, z, t)
# Density
rho = Function("rho")(x, y, z, t)
# kinematic viscosity
nu = Function("nu")(x, y, z, y)
# pressure
p = Function("p")(x, y, z, t)
# Temperature
T = Function("T")(x, y, z, t)
# viscous heat dissipation
vel = [u, v, w]
coord = [x, y, z]
visc_h = 0 * x
if visc_heating == True:
for i, j in zip(range(0, 3), range(0, 3)):
visc_h = visc_h + (
vel[i].diff(coord[j]) * vel[i].diff(coord[j])
+ vel[i].diff(coord[j]) * vel[j].diff(coord[i])
- 2 / 3 * vel[i].diff(coord[i]) * vel[j].diff(coord[j])
)
visc_h = nu * rho * visc_h
# pressure work
p_work = (
0 * x
if type(self.rho) == float
else (p.diff(t) + u * (p.diff(x)) + v * (p.diff(y)) + w * (p.diff(z)))
)
# set equations
self.equations = {}
if not self.mixed_form:
self.equations["temperauture_fluid"] = (
rho
* cp
* (T.diff(t) + u * (T.diff(x)) + v * (T.diff(y)) + w * (T.diff(z)))
- kappa * (T.diff(x)).diff(x)
- kappa * (T.diff(y)).diff(y)
- kappa * (T.diff(z)).diff(z)
- p_work
- visc_h
)
elif self.mixed_form:
T_x = Function("T_x")(x, y, z, t)
T_y = Function("T_y")(x, y, z, t)
if self.dim == 3:
T_z = Function("T_z")(x, y, z, t)
else:
T_z = Number(0)
self.equations["temperauture_fluid"] = (
rho
* cp
* (T.diff(t) + u * (T.diff(x)) + v * (T.diff(y)) + w * (T.diff(z)))
- kappa * (T_x).diff(x)
- kappa * (T_y).diff(y)
- kappa * (T_z).diff(z)
- p_work
- visc_h
)
self.equations["compatibility_T_x"] = T.diff(x) - T_x
self.equations["compatibility_T_y"] = T.diff(y) - T_y
self.equations["compatibility_T_z"] = T.diff(z) - T_z
self.equations["compatibility_T_xy"] = T_x.diff(y) - T_y.diff(x)
self.equations["compatibility_T_xz"] = T_x.diff(z) - T_z.diff(x)
self.equations["compatibility_T_yz"] = T_y.diff(z) - T_z.diff(y)
if self.dim == 2:
self.equations.pop("compatibility_T_z")
self.equations.pop("compatibility_T_xz")
self.equations.pop("compatibility_T_yz")
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
self.subs(u, Function("u")(*input_variables))
self.subs(v, Function("v")(*input_variables))
self.subs(w, Function("w")(*input_variables))
self.subs(T, Function("T")(*input_variables))
self.subs(p, Function("p")(*input_variables))
self.subs(nu, Function("nu")(*input_variables))
self.subs(rho, Function("rho")(*input_variables))
if type(self.rho) == float:
self.subs(Function("rho")(*input_variables), self.rho)
if type(self.nu) == float:
self.subs(Function("nu")(*input_variables), self.nu)
if self.mixed_form:
self.subs(T_x, Function("T_x")(*input_variables))
self.subs(T_y, Function("T_y")(*input_variables))
if self.dim == 3:
self.subs(T_z, Function("T_z")(*input_variables))
| modulus-sym-main | modulus/sym/eq/pdes/energy_equation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class NavierStokes(PDE):
"""
Compressible Navier Stokes equations
Reference:
https://turbmodels.larc.nasa.gov/implementrans.html
Parameters
==========
nu : float, Sympy Symbol/Expr, str
The kinematic viscosity. If `nu` is a str then it is
converted to Sympy Function of form `nu(x,y,z,t)`.
If `nu` is a Sympy Symbol or Expression then this
is substituted into the equation. This allows for
variable viscosity.
rho : float, Sympy Symbol/Expr, str
The density of the fluid. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation to allow for
compressible Navier Stokes. Default is 1.
dim : int
Dimension of the Navier Stokes (2 or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is True.
mixed_form: bool
If True, use the mixed formulation of the Navier-Stokes equations.
Examples
========
>>> ns = NavierStokes(nu=0.01, rho=1, dim=2)
>>> ns.pprint()
continuity: u__x + v__y
momentum_x: u*u__x + v*u__y + p__x + u__t - 0.01*u__x__x - 0.01*u__y__y
momentum_y: u*v__x + v*v__y + p__y + v__t - 0.01*v__x__x - 0.01*v__y__y
>>> ns = NavierStokes(nu='nu', rho=1, dim=2, time=False)
>>> ns.pprint()
continuity: u__x + v__y
momentum_x: -nu*u__x__x - nu*u__y__y + u*u__x + v*u__y - 2*nu__x*u__x - nu__y*u__y - nu__y*v__x + p__x
momentum_y: -nu*v__x__x - nu*v__y__y + u*v__x + v*v__y - nu__x*u__y - nu__x*v__x - 2*nu__y*v__y + p__y
"""
name = "NavierStokes"
def __init__(self, nu, rho=1, dim=3, time=True, mixed_form=False):
# set params
self.dim = dim
self.time = time
self.mixed_form = mixed_form
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
if self.dim == 3:
w = Function("w")(*input_variables)
else:
w = Number(0)
# pressure
p = Function("p")(*input_variables)
# kinematic viscosity
if isinstance(nu, str):
nu = Function(nu)(*input_variables)
elif isinstance(nu, (float, int)):
nu = Number(nu)
# density
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# dynamic viscosity
mu = rho * nu
# set equations
self.equations = {}
self.equations["continuity"] = (
rho.diff(t) + (rho * u).diff(x) + (rho * v).diff(y) + (rho * w).diff(z)
)
if not self.mixed_form:
curl = Number(0) if rho.diff(x) == 0 else u.diff(x) + v.diff(y) + w.diff(z)
self.equations["momentum_x"] = (
(rho * u).diff(t)
+ (
u * ((rho * u).diff(x))
+ v * ((rho * u).diff(y))
+ w * ((rho * u).diff(z))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u.diff(x)).diff(x)
- (mu * u.diff(y)).diff(y)
- (mu * u.diff(z)).diff(z)
- (mu * (curl).diff(x))
- mu.diff(x) * u.diff(x)
- mu.diff(y) * v.diff(x)
- mu.diff(z) * w.diff(x)
)
self.equations["momentum_y"] = (
(rho * v).diff(t)
+ (
u * ((rho * v).diff(x))
+ v * ((rho * v).diff(y))
+ w * ((rho * v).diff(z))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v.diff(x)).diff(x)
- (mu * v.diff(y)).diff(y)
- (mu * v.diff(z)).diff(z)
- (mu * (curl).diff(y))
- mu.diff(x) * u.diff(y)
- mu.diff(y) * v.diff(y)
- mu.diff(z) * w.diff(y)
)
self.equations["momentum_z"] = (
(rho * w).diff(t)
+ (
u * ((rho * w).diff(x))
+ v * ((rho * w).diff(y))
+ w * ((rho * w).diff(z))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w.diff(x)).diff(x)
- (mu * w.diff(y)).diff(y)
- (mu * w.diff(z)).diff(z)
- (mu * (curl).diff(z))
- mu.diff(x) * u.diff(z)
- mu.diff(y) * v.diff(z)
- mu.diff(z) * w.diff(z)
)
if self.dim == 2:
self.equations.pop("momentum_z")
elif self.mixed_form:
u_x = Function("u_x")(*input_variables)
u_y = Function("u_y")(*input_variables)
u_z = Function("u_z")(*input_variables)
v_x = Function("v_x")(*input_variables)
v_y = Function("v_y")(*input_variables)
v_z = Function("v_z")(*input_variables)
if self.dim == 3:
w_x = Function("w_x")(*input_variables)
w_y = Function("w_y")(*input_variables)
w_z = Function("w_z")(*input_variables)
else:
w_x = Number(0)
w_y = Number(0)
w_z = Number(0)
u_z = Number(0)
v_z = Number(0)
curl = Number(0) if rho.diff(x) == 0 else u_x + v_y + w_z
self.equations["momentum_x"] = (
(rho * u).diff(t)
+ (
u * ((rho * u.diff(x)))
+ v * ((rho * u.diff(y)))
+ w * ((rho * u.diff(z)))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u_x).diff(x)
- (mu * u_y).diff(y)
- (mu * u_z).diff(z)
- (mu * (curl).diff(x))
- mu.diff(x) * u.diff(x)
- mu.diff(y) * v.diff(x)
- mu.diff(z) * w.diff(x)
)
self.equations["momentum_y"] = (
(rho * v).diff(t)
+ (
u * ((rho * v.diff(x)))
+ v * ((rho * v.diff(y)))
+ w * ((rho * v.diff(z)))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v_x).diff(x)
- (mu * v_y).diff(y)
- (mu * v_z).diff(z)
- (mu * (curl).diff(y))
- mu.diff(x) * u.diff(y)
- mu.diff(y) * v.diff(y)
- mu.diff(z) * w.diff(y)
)
self.equations["momentum_z"] = (
(rho * w).diff(t)
+ (
u * ((rho * w.diff(x)))
+ v * ((rho * w.diff(y)))
+ w * ((rho * w.diff(z)))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w_x).diff(x)
- (mu * w_y).diff(y)
- (mu * w_z).diff(z)
- (mu * (curl).diff(z))
- mu.diff(x) * u.diff(z)
- mu.diff(y) * v.diff(z)
- mu.diff(z) * w.diff(z)
)
self.equations["compatibility_u_x"] = u.diff(x) - u_x
self.equations["compatibility_u_y"] = u.diff(y) - u_y
self.equations["compatibility_u_z"] = u.diff(z) - u_z
self.equations["compatibility_v_x"] = v.diff(x) - v_x
self.equations["compatibility_v_y"] = v.diff(y) - v_y
self.equations["compatibility_v_z"] = v.diff(z) - v_z
self.equations["compatibility_w_x"] = w.diff(x) - w_x
self.equations["compatibility_w_y"] = w.diff(y) - w_y
self.equations["compatibility_w_z"] = w.diff(z) - w_z
self.equations["compatibility_u_xy"] = u_x.diff(y) - u_y.diff(x)
self.equations["compatibility_u_xz"] = u_x.diff(z) - u_z.diff(x)
self.equations["compatibility_u_yz"] = u_y.diff(z) - u_z.diff(y)
self.equations["compatibility_v_xy"] = v_x.diff(y) - v_y.diff(x)
self.equations["compatibility_v_xz"] = v_x.diff(z) - v_z.diff(x)
self.equations["compatibility_v_yz"] = v_y.diff(z) - v_z.diff(y)
self.equations["compatibility_w_xy"] = w_x.diff(y) - w_y.diff(x)
self.equations["compatibility_w_xz"] = w_x.diff(z) - w_z.diff(x)
self.equations["compatibility_w_yz"] = w_y.diff(z) - w_z.diff(y)
if self.dim == 2:
self.equations.pop("momentum_z")
self.equations.pop("compatibility_u_z")
self.equations.pop("compatibility_v_z")
self.equations.pop("compatibility_w_x")
self.equations.pop("compatibility_w_y")
self.equations.pop("compatibility_w_z")
self.equations.pop("compatibility_u_xz")
self.equations.pop("compatibility_u_yz")
self.equations.pop("compatibility_v_xz")
self.equations.pop("compatibility_v_yz")
self.equations.pop("compatibility_w_xy")
self.equations.pop("compatibility_w_xz")
self.equations.pop("compatibility_w_yz")
class GradNormal(PDE):
"""
Implementation of the gradient boundary condition
Parameters
==========
T : str
The dependent variable.
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is True.
Examples
========
>>> gn = ns = GradNormal(T='T')
>>> gn.pprint()
normal_gradient_T: normal_x*T__x + normal_y*T__y + normal_z*T__z
"""
name = "GradNormal"
def __init__(self, T, dim=3, time=True):
self.T = T
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# variables to set the gradients (example Temperature)
T = Function(T)(*input_variables)
# set equations
self.equations = {}
self.equations["normal_gradient_" + self.T] = (
normal_x * T.diff(x) + normal_y * T.diff(y) + normal_z * T.diff(z)
)
class Curl(PDE):
"""
del cross vector operator
Parameters
==========
vector : tuple of 3 Sympy Exprs, floats, ints or strings
This will be the vector to take the curl of.
curl_name : tuple of 3 strings
These will be the output names of the curl operations.
Examples
========
>>> c = Curl((0,0,'phi'), ('u','v','w'))
>>> c.pprint()
u: phi__y
v: -phi__x
w: 0
"""
name = "Curl"
def __init__(self, vector, curl_name=["u", "v", "w"]):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# vector
v_0 = vector[0]
v_1 = vector[1]
v_2 = vector[2]
# make funtions
if type(v_0) is str:
v_0 = Function(v_0)(*input_variables)
elif type(v_0) in [float, int]:
v_0 = Number(v_0)
if type(v_1) is str:
v_1 = Function(v_1)(*input_variables)
elif type(v_1) in [float, int]:
v_1 = Number(v_1)
if type(v_2) is str:
v_2 = Function(v_2)(*input_variables)
elif type(v_2) in [float, int]:
v_2 = Number(v_2)
# curl
curl_0 = v_2.diff(y) - v_1.diff(z)
curl_1 = v_0.diff(z) - v_2.diff(x)
curl_2 = v_1.diff(x) - v_0.diff(y)
# set equations
self.equations = {}
self.equations[curl_name[0]] = curl_0
self.equations[curl_name[1]] = curl_1
self.equations[curl_name[2]] = curl_2
class CompressibleIntegralContinuity(PDE):
"""
Compressible Integral Continuity
Parameters
==========
rho : float, Sympy Symbol/Expr, str
The density of the fluid. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation to allow for
compressibility. Default is 1.
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
"""
name = "CompressibleIntegralContinuity"
def __init__(self, rho=1, vec=["u", "v", "w"]):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
self.dim = len(vec)
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
# normal
normal = [Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z")]
# density
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# make input variables
self.equations = {}
self.equations["integral_continuity"] = 0
for v, n in zip(vec, normal):
self.equations["integral_continuity"] += Symbol(v) * n * rho
class FluxContinuity(PDE):
"""
Flux Continuity for arbitrary variable. Includes advective and diffusive flux
Parameters
==========
T : str
The dependent variable.
rho : float, Sympy Symbol/Expr, str
The density of the fluid. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation to allow for
compressibility. Default is 1.
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
"""
name = "FluxContinuity"
def __init__(self, T="T", D="D", rho=1, vec=["u", "v", "w"]):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
self.dim = len(vec)
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
# normal
normal = [Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z")]
# density
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# diffusion coefficient
if isinstance(D, str):
D = Function(D)(*input_variables)
elif isinstance(D, (float, int)):
D = Number(D)
# variables to set the flux (example Temperature)
T = Function(T)(*input_variables)
gradient = [T.diff(x), T.diff(y), T.diff(z)]
# make input variables
self.equations = {}
self.equations[str(T) + "_flux"] = 0
for v, n, g in zip(vec, normal, gradient):
self.equations[str(T) + "_flux"] += (
Symbol(v) * n * rho * T - rho * D * n * g
)
| modulus-sym-main | modulus/sym/eq/pdes/navier_stokes.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to linear elasticity
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class LinearElasticity(PDE):
"""
Linear elasticity equations.
Use either (E, nu) or (lambda_, mu) to define the material properties.
Parameters
==========
E : float, Sympy Symbol/Expr, str
The Young's modulus
nu : float, Sympy Symbol/Expr, str
The Poisson's ratio
lambda_: float, Sympy Symbol/Expr, str
Lamé's first parameter
mu: float, Sympy Symbol/Expr, str
Lamé's second parameter (shear modulus)
rho: float, Sympy Symbol/Expr, str
Mass density.
dim : int
Dimension of the linear elasticity (2 or 3). Default is 3.
Example
========
>>> elasticity_equations = LinearElasticity(E=10, nu=0.3, dim=2)
>>> elasticity_equations.pprint()
navier_x: -13.4615384615385*u__x__x - 3.84615384615385*u__y__y - 9.61538461538461*v__x__y
navier_y: -3.84615384615385*v__x__x - 13.4615384615385*v__y__y - 9.61538461538461*u__x__y
stress_disp_xx: -sigma_xx + 13.4615384615385*u__x + 5.76923076923077*v__y
stress_disp_yy: -sigma_yy + 5.76923076923077*u__x + 13.4615384615385*v__y
stress_disp_xy: -sigma_xy + 3.84615384615385*u__y + 3.84615384615385*v__x
equilibrium_x: -sigma_xx__x - sigma_xy__y
equilibrium_y: -sigma_xy__x - sigma_yy__y
traction_x: normal_x*sigma_xx + normal_y*sigma_xy
traction_y: normal_x*sigma_xy + normal_y*sigma_yy
"""
name = "LinearElasticity"
def __init__(
self, E=None, nu=None, lambda_=None, mu=None, rho=1, dim=3, time=False
):
# set params
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# displacement componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
sigma_xx = Function("sigma_xx")(*input_variables)
sigma_yy = Function("sigma_yy")(*input_variables)
sigma_xy = Function("sigma_xy")(*input_variables)
if self.dim == 3:
w = Function("w")(*input_variables)
sigma_zz = Function("sigma_zz")(*input_variables)
sigma_xz = Function("sigma_xz")(*input_variables)
sigma_yz = Function("sigma_yz")(*input_variables)
else:
w = Number(0)
sigma_zz = Number(0)
sigma_xz = Number(0)
sigma_yz = Number(0)
# material properties
if lambda_ is None:
if isinstance(nu, str):
nu = Function(nu)(*input_variables)
elif isinstance(nu, (float, int)):
nu = Number(nu)
if isinstance(E, str):
E = Function(E)(*input_variables)
elif isinstance(E, (float, int)):
E = Number(E)
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
else:
if isinstance(lambda_, str):
lambda_ = Function(lambda_)(*input_variables)
elif isinstance(lambda_, (float, int)):
lambda_ = Number(lambda_)
if isinstance(mu, str):
mu = Function(mu)(*input_variables)
elif isinstance(mu, (float, int)):
mu = Number(mu)
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# set equations
self.equations = {}
# Stress equations
self.equations["stress_disp_xx"] = (
lambda_ * (u.diff(x) + v.diff(y) + w.diff(z))
+ 2 * mu * u.diff(x)
- sigma_xx
)
self.equations["stress_disp_yy"] = (
lambda_ * (u.diff(x) + v.diff(y) + w.diff(z))
+ 2 * mu * v.diff(y)
- sigma_yy
)
self.equations["stress_disp_zz"] = (
lambda_ * (u.diff(x) + v.diff(y) + w.diff(z))
+ 2 * mu * w.diff(z)
- sigma_zz
)
self.equations["stress_disp_xy"] = mu * (u.diff(y) + v.diff(x)) - sigma_xy
self.equations["stress_disp_xz"] = mu * (u.diff(z) + w.diff(x)) - sigma_xz
self.equations["stress_disp_yz"] = mu * (v.diff(z) + w.diff(y)) - sigma_yz
# Equations of equilibrium
self.equations["equilibrium_x"] = rho * ((u.diff(t)).diff(t)) - (
sigma_xx.diff(x) + sigma_xy.diff(y) + sigma_xz.diff(z)
)
self.equations["equilibrium_y"] = rho * ((v.diff(t)).diff(t)) - (
sigma_xy.diff(x) + sigma_yy.diff(y) + sigma_yz.diff(z)
)
self.equations["equilibrium_z"] = rho * ((w.diff(t)).diff(t)) - (
sigma_xz.diff(x) + sigma_yz.diff(y) + sigma_zz.diff(z)
)
# Traction equations
self.equations["traction_x"] = (
normal_x * sigma_xx + normal_y * sigma_xy + normal_z * sigma_xz
)
self.equations["traction_y"] = (
normal_x * sigma_xy + normal_y * sigma_yy + normal_z * sigma_yz
)
self.equations["traction_z"] = (
normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz
)
# Navier equations
self.equations["navier_x"] = (
rho * ((u.diff(t)).diff(t))
- (lambda_ + mu) * (u.diff(x) + v.diff(y) + w.diff(z)).diff(x)
- mu * ((u.diff(x)).diff(x) + (u.diff(y)).diff(y) + (u.diff(z)).diff(z))
)
self.equations["navier_y"] = (
rho * ((v.diff(t)).diff(t))
- (lambda_ + mu) * (u.diff(x) + v.diff(y) + w.diff(z)).diff(y)
- mu * ((v.diff(x)).diff(x) + (v.diff(y)).diff(y) + (v.diff(z)).diff(z))
)
self.equations["navier_z"] = (
rho * ((w.diff(t)).diff(t))
- (lambda_ + mu) * (u.diff(x) + v.diff(y) + w.diff(z)).diff(z)
- mu * ((w.diff(x)).diff(x) + (w.diff(y)).diff(y) + (w.diff(z)).diff(z))
)
if self.dim == 2:
self.equations.pop("navier_z")
self.equations.pop("stress_disp_zz")
self.equations.pop("stress_disp_xz")
self.equations.pop("stress_disp_yz")
self.equations.pop("equilibrium_z")
self.equations.pop("traction_z")
class LinearElasticityPlaneStress(PDE):
"""
Linear elasticity plane stress equations.
Use either (E, nu) or (lambda_, mu) to define the material properties.
Parameters
==========
E : float, Sympy Symbol/Expr, str
The Young's modulus
nu : float, Sympy Symbol/Expr, str
The Poisson's ratio
lambda_: float, Sympy Symbol/Expr, str
Lamé's first parameter.
mu: float, Sympy Symbol/Expr, str
Lamé's second parameter (shear modulus)
rho: float, Sympy Symbol/Expr, str
Mass density.
Example
========
>>> plane_stress_equations = LinearElasticityPlaneStress(E=10, nu=0.3)
>>> plane_stress_equations.pprint()
stress_disp_xx: -sigma_xx + 10.989010989011*u__x + 3.2967032967033*v__y
stress_disp_yy: -sigma_yy + 3.2967032967033*u__x + 10.989010989011*v__y
stress_disp_xy: -sigma_xy + 3.84615384615385*u__y + 3.84615384615385*v__x
equilibrium_x: -sigma_xx__x - sigma_xy__y
equilibrium_y: -sigma_xy__x - sigma_yy__y
traction_x: normal_x*sigma_xx + normal_y*sigma_xy
traction_y: normal_x*sigma_xy + normal_y*sigma_yy
"""
name = "LinearElasticityPlaneStress"
def __init__(self, E=None, nu=None, lambda_=None, mu=None, rho=1, time=False):
# set params
self.time = time
# coordinates
x, y = Symbol("x"), Symbol("y")
normal_x, normal_y = Symbol("normal_x"), Symbol("normal_y")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "t": t}
if not self.time:
input_variables.pop("t")
# displacement componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
sigma_xx = Function("sigma_xx")(*input_variables)
sigma_yy = Function("sigma_yy")(*input_variables)
sigma_xy = Function("sigma_xy")(*input_variables)
# material properties
if lambda_ is None:
if isinstance(nu, str):
nu = Function(nu)(*input_variables)
elif isinstance(nu, (float, int)):
nu = Number(nu)
if isinstance(E, str):
E = Function(E)(*input_variables)
elif isinstance(E, (float, int)):
E = Number(E)
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
else:
if isinstance(lambda_, str):
lambda_ = Function(lambda_)(*input_variables)
elif isinstance(lambda_, (float, int)):
lambda_ = Number(lambda_)
if isinstance(mu, str):
mu = Function(mu)(*input_variables)
elif isinstance(mu, (float, int)):
mu = Number(mu)
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# set equations
self.equations = {}
# Stress equations
w_z = -lambda_ / (lambda_ + 2 * mu) * (u.diff(x) + v.diff(y))
self.equations["stress_disp_xx"] = (
lambda_ * (u.diff(x) + v.diff(y) + w_z) + 2 * mu * u.diff(x) - sigma_xx
)
self.equations["stress_disp_yy"] = (
lambda_ * (u.diff(x) + v.diff(y) + w_z) + 2 * mu * v.diff(y) - sigma_yy
)
self.equations["stress_disp_xy"] = mu * (u.diff(y) + v.diff(x)) - sigma_xy
# Equations of equilibrium
self.equations["equilibrium_x"] = rho * ((u.diff(t)).diff(t)) - (
sigma_xx.diff(x) + sigma_xy.diff(y)
)
self.equations["equilibrium_y"] = rho * ((v.diff(t)).diff(t)) - (
sigma_xy.diff(x) + sigma_yy.diff(y)
)
# Traction equations
self.equations["traction_x"] = normal_x * sigma_xx + normal_y * sigma_xy
self.equations["traction_y"] = normal_x * sigma_xy + normal_y * sigma_yy
| modulus-sym-main | modulus/sym/eq/pdes/linear_elasticity.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Advection diffusion equation
Reference:
https://en.wikipedia.org/wiki/Convection%E2%80%93diffusion_equation
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class AdvectionDiffusion(PDE):
"""
Advection diffusion equation
Parameters
==========
T : str
The dependent variable.
D : float, Sympy Symbol/Expr, str
Diffusivity. If `D` is a str then it is
converted to Sympy Function of form 'D(x,y,z,t)'.
If 'D' is a Sympy Symbol or Expression then this
is substituted into the equation.
Q : float, Sympy Symbol/Expr, str
The source term. If `Q` is a str then it is
converted to Sympy Function of form 'Q(x,y,z,t)'.
If 'Q' is a Sympy Symbol or Expression then this
is substituted into the equation. Default is 0.
rho : float, Sympy Symbol/Expr, str
The density. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation to allow for
compressible Navier Stokes.
dim : int
Dimension of the diffusion equation (1, 2, or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is False.
mixed_form: bool
If True, use the mixed formulation of the wave equation.
Examples
========
>>> ad = AdvectionDiffusion(D=0.1, rho=1.)
>>> ad.pprint()
advection_diffusion: u*T__x + v*T__y + w*T__z - 0.1*T__x__x - 0.1*T__y__y - 0.1*T__z__z
>>> ad = AdvectionDiffusion(D='D', rho=1, dim=2, time=True)
>>> ad.pprint()
advection_diffusion: -D*T__x__x - D*T__y__y + u*T__x + v*T__y - D__x*T__x - D__y*T__y + T__t
"""
name = "AdvectionDiffusion"
def __init__(
self, T="T", D="D", Q=0, rho="rho", dim=3, time=False, mixed_form=False
):
# set params
self.T = T
self.dim = dim
self.time = time
self.mixed_form = mixed_form
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
w = Function("w")(*input_variables)
# Temperature
assert type(T) == str, "T needs to be string"
T = Function(T)(*input_variables)
# Diffusivity
if type(D) is str:
D = Function(D)(*input_variables)
elif type(D) in [float, int]:
D = Number(D)
# Source
if type(Q) is str:
Q = Function(Q)(*input_variables)
elif type(Q) in [float, int]:
Q = Number(Q)
# Density
if type(rho) is str:
rho = Function(rho)(*input_variables)
elif type(rho) in [float, int]:
rho = Number(rho)
# set equations
self.equations = {}
advection = (
rho * u * (T.diff(x)) + rho * v * (T.diff(y)) + rho * w * (T.diff(z))
)
if not self.mixed_form:
diffusion = (
(rho * D * T.diff(x)).diff(x)
+ (rho * D * T.diff(y)).diff(y)
+ (rho * D * T.diff(z)).diff(z)
)
self.equations["advection_diffusion_" + self.T] = (
T.diff(t) + advection - diffusion - Q
)
elif self.mixed_form:
T_x = Function(self.T + "_x")(*input_variables)
T_y = Function(self.T + "_y")(*input_variables)
if self.dim == 3:
T_z = Function(self.T + "_z")(*input_variables)
else:
T_z = Number(0)
diffusion = (
(rho * D * T_x).diff(x)
+ (rho * D * T_y).diff(y)
+ (rho * D * T_z).diff(z)
)
self.equations["compatibility_" + self.T + "_x"] = T.diff(x) - T_x
self.equations["compatibility_" + self.T + "_y"] = T.diff(y) - T_y
self.equations["compatibility_" + self.T + "_z"] = T.diff(z) - T_z
self.equations["compatibility_" + self.T + "_xy"] = T_x.diff(y) - T_y.diff(
x
)
self.equations["compatibility_" + self.T + "_xz"] = T_x.diff(z) - T_z.diff(
x
)
self.equations["compatibility_" + self.T + "_yz"] = T_y.diff(z) - T_z.diff(
y
)
if self.dim == 2:
self.equations.pop("compatibility_" + self.T + "_z")
self.equations.pop("compatibility_" + self.T + "_xz")
self.equations.pop("compatibility_" + self.T + "_yz")
self.equations["advection_diffusion_" + self.T] = (
T.diff(t) + advection - diffusion - Q
)
| modulus-sym-main | modulus/sym/eq/pdes/advection_diffusion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wave equation
Reference: https://en.wikipedia.org/wiki/Wave_equation
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
class WaveEquation(PDE):
"""
Wave equation
Parameters
==========
u : str
The dependent variable.
c : float, Sympy Symbol/Expr, str
Wave speed coefficient. If `c` is a str then it is
converted to Sympy Function of form 'c(x,y,z,t)'.
If 'c' is a Sympy Symbol or Expression then this
is substituted into the equation.
dim : int
Dimension of the wave equation (1, 2, or 3). Default is 2.
time : bool
If time-dependent equations or not. Default is True.
mixed_form: bool
If True, use the mixed formulation of the wave equation.
Examples
========
>>> we = WaveEquation(c=0.8, dim=3)
>>> we.pprint()
wave_equation: u__t__t - 0.64*u__x__x - 0.64*u__y__y - 0.64*u__z__z
>>> we = WaveEquation(c='c', dim=2, time=False)
>>> we.pprint()
wave_equation: -c**2*u__x__x - c**2*u__y__y - 2*c*c__x*u__x - 2*c*c__y*u__y
"""
name = "WaveEquation"
def __init__(self, u="u", c="c", dim=3, time=True, mixed_form=False):
# set params
self.u = u
self.dim = dim
self.time = time
self.mixed_form = mixed_form
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Scalar function
assert type(u) == str, "u needs to be string"
u = Function(u)(*input_variables)
# wave speed coefficient
if type(c) is str:
c = Function(c)(*input_variables)
elif type(c) in [float, int]:
c = Number(c)
# set equations
self.equations = {}
if not self.mixed_form:
self.equations["wave_equation"] = (
u.diff(t, 2)
- c**2 * u.diff(x, 2)
- c**2 * u.diff(y, 2)
- c**2 * u.diff(z, 2)
)
elif self.mixed_form:
u_x = Function("u_x")(*input_variables)
u_y = Function("u_y")(*input_variables)
if self.dim == 3:
u_z = Function("u_z")(*input_variables)
else:
u_z = Number(0)
if self.time:
u_t = Function("u_t")(*input_variables)
else:
u_t = Number(0)
self.equations["wave_equation"] = (
u_t.diff(t)
- c**2 * u_x.diff(x)
- c**2 * u_y.diff(y)
- c**2 * u_z.diff(z)
)
self.equations["compatibility_u_x"] = u.diff(x) - u_x
self.equations["compatibility_u_y"] = u.diff(y) - u_y
self.equations["compatibility_u_z"] = u.diff(z) - u_z
self.equations["compatibility_u_xy"] = u_x.diff(y) - u_y.diff(x)
self.equations["compatibility_u_xz"] = u_x.diff(z) - u_z.diff(x)
self.equations["compatibility_u_yz"] = u_y.diff(z) - u_z.diff(y)
if self.dim == 2:
self.equations.pop("compatibility_u_z")
self.equations.pop("compatibility_u_xz")
self.equations.pop("compatibility_u_yz")
class HelmholtzEquation(PDE):
name = "HelmholtzEquation"
def __init__(self, u, k, dim=3, mixed_form=False):
"""
Helmholtz equation
Parameters
==========
u : str
The dependent variable.
k : float, Sympy Symbol/Expr, str
Wave number. If `k` is a str then it is
converted to Sympy Function of form 'k(x,y,z,t)'.
If 'k' is a Sympy Symbol or Expression then this
is substituted into the equation.
dim : int
Dimension of the wave equation (1, 2, or 3). Default is 2.
mixed_form: bool
If True, use the mixed formulation of the Helmholtz equation.
"""
# set params
self.u = u
self.dim = dim
self.mixed_form = mixed_form
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
# Scalar function
assert type(u) == str, "u needs to be string"
u = Function(u)(*input_variables)
# wave speed coefficient
if type(k) is str:
k = Function(k)(*input_variables)
elif type(k) in [float, int]:
k = Number(k)
# set equations
self.equations = {}
if not self.mixed_form:
self.equations["helmholtz"] = -(
k**2 * u + u.diff(x, 2) + u.diff(y, 2) + u.diff(z, 2)
)
elif self.mixed_form:
u_x = Function("u_x")(*input_variables)
u_y = Function("u_y")(*input_variables)
if self.dim == 3:
u_z = Function("u_z")(*input_variables)
else:
u_z = Number(0)
self.equations["helmholtz"] = -(
k**2 * u + u_x.diff(x) + u_y.diff(y) + u_z.diff(z)
)
self.equations["compatibility_u_x"] = u.diff(x) - u_x
self.equations["compatibility_u_y"] = u.diff(y) - u_y
self.equations["compatibility_u_z"] = u.diff(z) - u_z
self.equations["compatibility_u_xy"] = u_x.diff(y) - u_y.diff(x)
self.equations["compatibility_u_xz"] = u_x.diff(z) - u_z.diff(x)
self.equations["compatibility_u_yz"] = u_y.diff(z) - u_z.diff(y)
if self.dim == 2:
self.equations.pop("compatibility_u_z")
self.equations.pop("compatibility_u_xz")
self.equations.pop("compatibility_u_yz")
| modulus-sym-main | modulus/sym/eq/pdes/wave_equation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
# from .advection_diffusion import AdvectionDiffusion, IntegralAdvection
# from .diffusion import Diffusion, DiffusionInterface, IntegralDiffusion
# from .energy_equation import EnergyFluid
# from .navier_stokes import NavierStokes, IntegralContinuity, GradNormal
# from .signed_distance_function import ScreenedPoissonDistance
# from .turbulence_k_epsilon import KEpsilon
# from .turbulence_zero_eq import ZeroEquation
# from .wave_equation import WaveEquation
# __all__ = [
# "AdvectionDiffusion",
# "IntegralAdvection",
# "Diffusion",
# "DiffusionInterface",
# "IntegralDiffusion",
# "EnergyFluid",
# "NavierStokes",
# "IntegralContinuity",
# "GradNormal",
# "ScreenedPoissonDistance",
# "KEpsilon",
# "ZeroEquation",
# "WaveEquation",
# ]
| modulus-sym-main | modulus/sym/eq/pdes/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Screened Poisson Distance
Equation taken from,
https://www.researchgate.net/publication/266149392_Dynamic_Distance-Based_Shape_Features_for_Gait_Recognition,
Equation 6 in paper.
"""
from sympy import Symbol, Function, sqrt
from modulus.sym.eq.pde import PDE
class ScreenedPoissonDistance(PDE):
"""
Screened Poisson Distance
Parameters
==========
distance : str
A user-defined variable for distance.
Default is "normal_distance".
tau : float
A small, positive parameter. Default is 0.1.
dim : int
Dimension of the Screened Poisson Distance (1, 2, or 3).
Default is 3.
Example
========
>>> s = ScreenedPoissonDistance(tau=0.1, dim=2)
>>> s.pprint()
screened_poisson_normal_distance: -normal_distance__x**2
+ 0.316227766016838*normal_distance__x__x - normal_distance__y**2
+ 0.316227766016838*normal_distance__y__y + 1
"""
name = "ScreenedPoissonDistance"
def __init__(self, distance="normal_distance", tau=0.1, dim=3):
# set params
self.distance = distance
self.dim = dim
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
# distance u
assert type(distance) == str, "distance needs to be string"
distance = Function(distance)(*input_variables)
# set equations
self.equations = {}
sdf_grad = (
1 - distance.diff(x) ** 2 - distance.diff(y) ** 2 - distance.diff(z) ** 2
)
poisson = sqrt(tau) * (
distance.diff(x, 2) + distance.diff(y, 2) + distance.diff(z, 2)
)
self.equations["screened_poisson_" + self.distance] = sdf_grad + poisson
| modulus-sym-main | modulus/sym/eq/pdes/signed_distance_function.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic equations
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class NormalDotVec(PDE):
"""
Normal dot velocity
Parameters
==========
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
"""
name = "NormalDotVec"
def __init__(self, vec=["u", "v", "w"]):
# normal
normal = [Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z")]
# make input variables
self.equations = {}
self.equations["normal_dot_vel"] = 0
for v, n in zip(vec, normal):
self.equations["normal_dot_vel"] += Symbol(v) * n
class GradNormal(PDE):
"""
Implementation of the gradient boundary condition
Parameters
==========
T : str
The dependent variable.
dim : int
Dimension of the equations (1, 2, or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is True.
Examples
========
>>> gn = ns = GradNormal(T='T')
>>> gn.pprint()
normal_gradient_T: normal_x*T__x + normal_y*T__y + normal_z*T__z
"""
name = "GradNormal"
def __init__(self, T, dim=3, time=True):
self.T = T
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# variables to set the gradients (example Temperature)
T = Function(T)(*input_variables)
# set equations
self.equations = {}
self.equations["normal_gradient_" + self.T] = (
normal_x * T.diff(x) + normal_y * T.diff(y) + normal_z * T.diff(z)
)
class Curl(PDE):
"""
del cross vector operator
Parameters
==========
vector : tuple of 3 Sympy Exprs, floats, ints or strings
This will be the vector to take the curl of.
curl_name : tuple of 3 strings
These will be the output names of the curl operations.
Examples
========
>>> c = Curl((0,0,'phi'), ('u','v','w'))
>>> c.pprint()
u: phi__y
v: -phi__x
w: 0
"""
name = "Curl"
def __init__(self, vector, curl_name=["u", "v", "w"]):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# vector
v_0 = vector[0]
v_1 = vector[1]
v_2 = vector[2]
# make funtions
if type(v_0) is str:
v_0 = Function(v_0)(*input_variables)
elif type(v_0) in [float, int]:
v_0 = Number(v_0)
if type(v_1) is str:
v_1 = Function(v_1)(*input_variables)
elif type(v_1) in [float, int]:
v_1 = Number(v_1)
if type(v_2) is str:
v_2 = Function(v_2)(*input_variables)
elif type(v_2) in [float, int]:
v_2 = Number(v_2)
# curl
curl_0 = v_2.diff(y) - v_1.diff(z)
curl_1 = v_0.diff(z) - v_2.diff(x)
curl_2 = v_1.diff(x) - v_0.diff(y)
# set equations
self.equations = {}
self.equations[curl_name[0]] = curl_0
self.equations[curl_name[1]] = curl_1
self.equations[curl_name[2]] = curl_2
| modulus-sym-main | modulus/sym/eq/pdes/basic.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Zero Equation Turbulence model
References:
https://www.eureka.im/954.html
https://knowledge.autodesk.com/support/cfd/learn-explore/caas/CloudHelp/cloudhelp/2019/ENU/SimCFD-Learning/files/GUID-BBA4E008-8346-465B-9FD3-D193CF108AF0-htm.html
"""
from sympy import Symbol, Function, sqrt, Number, Min
from modulus.sym.eq.pde import PDE
class ZeroEquation(PDE):
"""
Zero Equation Turbulence model
Parameters
==========
nu : float
The kinematic viscosity of the fluid.
max_distance : float
The maximum wall distance in the flow field.
rho : float, Sympy Symbol/Expr, str
The density. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation. Default is 1.
dim : int
Dimension of the Zero Equation Turbulence model (2 or 3).
Default is 3.
time : bool
If time-dependent equations or not. Default is True.
Example
========
>>> zeroEq = ZeroEquation(nu=0.1, max_distance=2.0, dim=2)
>>> kEp.pprint()
nu: sqrt((u__y + v__x)**2 + 2*u__x**2 + 2*v__y**2)
*Min(0.18, 0.419*normal_distance)**2 + 0.1
"""
name = "ZeroEquation"
def __init__(
self, nu, max_distance, rho=1, dim=3, time=True
): # TODO add density into model
# set params
self.dim = dim
self.time = time
# model coefficients
self.max_distance = max_distance
self.karman_constant = 0.419
self.max_distance_ratio = 0.09
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
if self.dim == 3:
w = Function("w")(*input_variables)
else:
w = Number(0)
# density
if type(rho) is str:
rho = Function(rho)(*input_variables)
elif type(rho) in [float, int]:
rho = Number(rho)
# wall distance
normal_distance = Function("sdf")(*input_variables)
# mixing length
mixing_length = Min(
self.karman_constant * normal_distance,
self.max_distance_ratio * self.max_distance,
)
G = (
2 * u.diff(x) ** 2
+ 2 * v.diff(y) ** 2
+ 2 * w.diff(z) ** 2
+ (u.diff(y) + v.diff(x)) ** 2
+ (u.diff(z) + w.diff(x)) ** 2
+ (v.diff(z) + w.diff(y)) ** 2
)
# set equations
self.equations = {}
self.equations["nu"] = nu + rho * mixing_length**2 * sqrt(G)
| modulus-sym-main | modulus/sym/eq/pdes/turbulence_zero_eq.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pathlib
from torch.autograd import Function
from typing import Dict, List, Set, Optional, Union, Callable
Tensor = torch.Tensor
# Finite difference coefficnets from:
# https://en.wikipedia.org/wiki/Finite_difference_coefficient
class FirstDerivO2_f(Function):
# [0.5, -0.5]
@staticmethod
def forward(ctx, tensor0, tensor1, dx):
ctx.c0 = 0.5 / (dx)
ctx.c1 = -0.5 / (dx)
return ctx.c0 * tensor0 + ctx.c1 * tensor1
@staticmethod
def backward(ctx, grad_output):
return ctx.c0 * grad_output, ctx.c1 * grad_output, None
class FirstDerivO4_f(Function):
# [-1.0 / 12.0, 8.0 / 12.0, -8.0 / 12.0, 1.0 / 12.0]
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, tensor3, dx):
ctx.c0 = -1.0 / (dx * 12.0)
ctx.c1 = 8.0 / (dx * 12.0)
ctx.c2 = -8.0 / (dx * 12.0)
ctx.c3 = 1.0 / (dx * 12.0)
return ctx.c0 * tensor0 + ctx.c1 * tensor1 + ctx.c2 * tensor2 + ctx.c3 * tensor3
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c2 * grad_output,
ctx.c3 * grad_output,
None,
)
class SecondDerivO2_f(Function):
# [1.0, -2.0, 1.0]
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, dx):
ctx.c0 = 1.0 / (dx**2)
ctx.c1 = -2.0 / (dx**2)
return ctx.c0 * tensor0 + ctx.c1 * tensor1 + ctx.c0 * tensor2
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c0 * grad_output,
None,
)
class SecondDerivO4_f(Function):
# [-1/12, 4/3, -5/2, 4/3, -1/12]
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, tensor3, tensor4, dx):
ctx.c0 = -1.0 / (12.0 * dx**2)
ctx.c1 = 4.0 / (3.0 * dx**2)
ctx.c2 = -5.0 / (2.0 * dx**2)
return (
ctx.c0 * tensor0
+ ctx.c1 * tensor1
+ ctx.c2 * tensor2
+ ctx.c1 * tensor3
+ ctx.c0 * tensor4
)
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c2 * grad_output,
ctx.c1 * grad_output,
ctx.c0 * grad_output,
None,
)
class MixedSecondDerivO2_f(Function):
# Ref: https://onlinelibrary.wiley.com/doi/pdf/10.1002/9781119083405.app1
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, tensor3, dx):
ctx.c0 = 0.25 / (dx**2)
ctx.c1 = -0.25 / (dx**2)
return ctx.c0 * tensor0 + ctx.c1 * tensor1 + ctx.c1 * tensor2 + ctx.c0 * tensor3
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c1 * grad_output,
ctx.c0 * grad_output,
None,
)
class ThirdDerivO2_f(Function):
# [1/2, -1.0, 1.0, -1/2]
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, tensor3, dx):
ctx.c0 = 0.5 / (dx**3)
ctx.c1 = -1.0 / (dx**3)
ctx.c2 = 1.0 / (dx**3)
ctx.c3 = -0.5 / (dx**3)
return ctx.c0 * tensor0 + ctx.c1 * tensor1 + ctx.c2 * tensor2 + ctx.c3 * tensor3
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c2 * grad_output,
ctx.c3 * grad_output,
None,
)
class ForthDerivO2_f(Function):
# [1.0, -4.0, 6.0, -4.0, 1.0]
@staticmethod
def forward(ctx, tensor0, tensor1, tensor2, tensor3, tensor4, dx):
ctx.c0 = 1.0 / (dx**4)
ctx.c1 = -4.0 / (dx**4)
ctx.c2 = 6.0 / (dx**4)
ctx.c3 = -4.0 / (dx**4)
ctx.c4 = 1.0 / (dx**4)
return (
ctx.c0 * tensor0
+ ctx.c1 * tensor1
+ ctx.c2 * tensor2
+ ctx.c3 * tensor3
+ ctx.c4 * tensor4
)
@staticmethod
def backward(ctx, grad_output):
return (
ctx.c0 * grad_output,
ctx.c1 * grad_output,
ctx.c2 * grad_output,
ctx.c3 * grad_output,
ctx.c4 * grad_output,
None,
)
| modulus-sym-main | modulus/sym/eq/mfd/functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from .functions import *
from modulus.sym.key import Key
from typing import Dict, List, Set, Optional, Union, Callable
Tensor = torch.Tensor
class FirstDerivO2(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 1
), f"Key must have one derivative for first derivative calc"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = FirstDerivO2_f.apply(
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
dx,
)
return outputs
class FirstDerivO4(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 1
), f"Key must have one derivative for first derivative calc"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = FirstDerivO4_f.apply(
inputs[f"{self.var}>>{self.indep_var}::2"],
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
inputs[f"{self.var}>>{self.indep_var}::-2"],
dx,
)
return outputs
class SecondDerivO2(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 2
), f"Key must have two derivatives for second derivative calc"
assert (
derivative_key.derivatives[0] == derivative_key.derivatives[1]
), f"Derivatives keys should be the same"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = SecondDerivO2_f.apply(
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
dx,
)
return outputs
class SecondDerivO4(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 2
), f"Key must have two derivatives for second derivative calc"
assert (
derivative_key.derivatives[0] == derivative_key.derivatives[1]
), f"Derivatives keys should be the same"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = SecondDerivO4_f.apply(
inputs[f"{self.var}>>{self.indep_var}::2"],
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
inputs[f"{self.var}>>{self.indep_var}::-2"],
dx,
)
return outputs
class MixedSecondDerivO2(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 2
), f"Key must have two derivatives for second derivative calc"
self.var = derivative_key.name
self.indep_vars = [
str(derivative_key.derivatives[0]),
str(derivative_key.derivatives[1]),
]
self.indep_vars.sort()
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = MixedSecondDerivO2_f.apply(
inputs[f"{self.var}>>{self.indep_vars[0]}::1&&{self.indep_vars[1]}::1"],
inputs[f"{self.var}>>{self.indep_vars[0]}::-1&&{self.indep_vars[1]}::1"],
inputs[f"{self.var}>>{self.indep_vars[0]}::1&&{self.indep_vars[1]}::-1"],
inputs[f"{self.var}>>{self.indep_vars[0]}::-1&&{self.indep_vars[1]}::-1"],
dx,
)
return outputs
class ThirdDerivO2(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 3
), f"Key must have three derivatives for third derivative calc"
assert (
derivative_key.derivatives[0]
== derivative_key.derivatives[1]
== derivative_key.derivatives[2]
), f"Derivatives keys should be the same"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = ThirdDerivO2_f.apply(
inputs[f"{self.var}>>{self.indep_var}::2"],
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
inputs[f"{self.var}>>{self.indep_var}::-2"],
dx,
)
return outputs
class ForthDerivO2(torch.nn.Module):
def __init__(self, derivative_key: Key) -> None:
super().__init__()
assert (
len(derivative_key.derivatives) == 4
), f"Key must have three derivatives for forth derivative calc"
assert (
derivative_key.derivatives[0]
== derivative_key.derivatives[1]
== derivative_key.derivatives[2]
== derivative_key.derivatives[3]
), f"Derivatives keys should be the same"
self.var = derivative_key.name
self.indep_var = str(derivative_key.derivatives[0])
self.out_name = str(derivative_key)
self.register_buffer(
"coeff",
torch.Tensor([1.0, -4.0, 6.0, -4.0, 1.0]).double().unsqueeze(-1),
persistent=False,
)
def forward(self, inputs: Dict[str, Tensor], dx: float) -> Dict[str, Tensor]:
outputs = {}
outputs[self.out_name] = ForthDerivO2_f.apply(
inputs[f"{self.var}>>{self.indep_var}::2"],
inputs[f"{self.var}>>{self.indep_var}::1"],
inputs[f"{self.var}"],
inputs[f"{self.var}>>{self.indep_var}::-1"],
inputs[f"{self.var}>>{self.indep_var}::-2"],
dx,
)
return outputs
class DerivBase(torch.nn.Module):
def __init__(
self, derivative_keys: List[Key], dx: float, order: int = 2, jit: bool = True
) -> None:
super().__init__()
self.derivative_keys = derivative_keys
self.dx = dx
self.order = order
# Create stencil set of points we need
eval_list = []
self._stencil = set()
@property
def stencil(self) -> List[str]:
"""Returns list of stencil strings for this derivative
Returns
-------
List[str]
List of stencil strings
Example
-------
Central 2nd derivative will return: `['x::1','x::0','x::-1']`
"""
return list(self._stencil)
def forward(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""Forward pass that calculates the finite difference gradient
Parameters
----------
inputs : Dict[str, Tensor]
Input tensor dictionary, should include points in FD stencil
Returns
-------
Dict[str, Tensor]
Output gradients
"""
outputs = {}
for module in self._eval:
outputs.update(module(inputs, self.dx))
return outputs
class FirstDeriv(DerivBase):
def __init__(
self, derivative_keys: List[Key], dx: float, order: int = 2, jit: bool = True
) -> None:
super().__init__(derivative_keys, dx, order, jit)
assert (
len(derivative_keys) == 0 or order == 2 or order == 4
), "Second and forth order first derivatives supported"
for key in derivative_keys:
assert (
len(key.derivatives) == 1
), f"Key with {len(key.derivatives)} derivs supplied to first order deriv"
# Create stencil set of points we need
eval_list = []
self._stencil = set()
for key in self.derivative_keys:
indep_vars = key.derivatives
if order == 2:
self._stencil = self._stencil.union(
{f"{indep_vars[0]}::-1", f"{indep_vars[0]}::1"}
)
eval_list.append(FirstDerivO2(key))
elif order == 4:
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-2",
f"{indep_vars[0]}::-1",
f"{indep_vars[0]}::1",
f"{indep_vars[0]}::2",
}
)
eval_list.append(FirstDerivO4(key))
self._eval = torch.nn.ModuleList(eval_list)
class SecondDeriv(DerivBase):
def __init__(
self, derivative_keys: List[Key], dx: float, order: int = 2, jit: bool = True
) -> None:
super().__init__(derivative_keys, dx, order, jit)
assert (
len(derivative_keys) == 0 or order == 2 or order == 4
), "Second and forth order second derivatives supported"
for key in derivative_keys:
assert (
len(key.derivatives) == 2
), f"Key with {len(key.derivatives)} deriv keys supplied to second deriv"
# Create stencil set of points we need
eval_list = []
self._stencil = set()
for key in self.derivative_keys:
indep_vars = key.derivatives
if indep_vars[0] == indep_vars[1]:
if order == 2:
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-1",
f"{indep_vars[0]}::0",
f"{indep_vars[0]}::1",
}
)
eval_list.append(SecondDerivO2(key))
elif order == 4:
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-2",
f"{indep_vars[0]}::-1",
f"{indep_vars[0]}::0",
f"{indep_vars[0]}::1",
f"{indep_vars[0]}::2",
}
)
eval_list.append(SecondDerivO4(key))
# Mixed derivative
else:
if order == 2:
indep_vars = [str(var) for var in indep_vars]
indep_vars.sort() # Avoid redundent points like (z::-1&&y::1 and y::1&&z::-1)
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-1&&{indep_vars[1]}::-1",
f"{indep_vars[0]}::1&&{indep_vars[1]}::-1",
f"{indep_vars[0]}::-1&&{indep_vars[1]}::1",
f"{indep_vars[0]}::1&&{indep_vars[1]}::1",
}
)
eval_list.append(MixedSecondDerivO2(key))
elif order == 4:
raise NotImplementedError(
"Fourth order mixed second derivatives not supported"
)
self._eval = torch.nn.ModuleList(eval_list)
class ThirdDeriv(DerivBase):
def __init__(
self, derivative_keys: List[Key], dx: float, order: int = 2, jit: bool = True
) -> None:
super().__init__(derivative_keys, dx, order, jit)
assert (
len(derivative_keys) == 0 or order == 2
), "Second order third derivatives supported"
for key in derivative_keys:
assert (
len(key.derivatives) == 3
), f"Key with {len(key.derivatives)} deriv keys supplied to third deriv"
assert (
key.derivatives[0] == key.derivatives[1] == key.derivatives[2]
), f"Mixed third derivatives not supported"
# Create stencil set of points we need
eval_list = []
self._stencil = set()
for key in self.derivative_keys:
indep_vars = key.derivatives
if order == 2:
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-2",
f"{indep_vars[0]}::-1",
f"{indep_vars[0]}::1",
f"{indep_vars[0]}::2",
}
)
eval_list.append(ThirdDerivO2(key))
self._eval = torch.nn.ModuleList(eval_list)
class ForthDeriv(DerivBase):
def __init__(
self, derivative_keys: List[Key], dx: float, order: int = 2, jit: bool = True
) -> None:
super().__init__(derivative_keys, dx, order, jit)
assert (
len(derivative_keys) == 0 or order == 2
), "Second order forth derivatives supported"
for key in derivative_keys:
assert (
len(key.derivatives) == 4
), f"Key with {len(key.derivatives)} deriv keys supplied to forth deriv"
assert (
key.derivatives[0]
== key.derivatives[1]
== key.derivatives[2]
== key.derivatives[3]
), f"Mixed forth derivatives not supported"
# Create stencil set of points we need
eval_list = []
self._stencil = set()
for key in self.derivative_keys:
indep_vars = key.derivatives
if order == 2:
self._stencil = self._stencil.union(
{
f"{indep_vars[0]}::-2",
f"{indep_vars[0]}::-1",
f"{indep_vars[0]}::0",
f"{indep_vars[0]}::1",
f"{indep_vars[0]}::2",
}
)
eval_list.append(ForthDerivO2(key))
self._eval = torch.nn.ModuleList(eval_list)
| modulus-sym-main | modulus/sym/eq/mfd/finite_derivatives.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .finite_derivatives import FirstDeriv, SecondDeriv, ThirdDeriv, ForthDeriv
| modulus-sym-main | modulus/sym/eq/mfd/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-sym-main | modulus/sym/utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Any, Callable, Optional
import torch
from torch.profiler import record_function, ProfilerActivity
def timeit(
func: Callable,
*args,
steps: int = 100,
warmup: int = 10,
run_profile: bool = False,
verbose: bool = True,
label: Optional[str] = None,
label_padding: int = 35,
cpu_timing: bool = False,
):
"""
Returns time/step in ms.
If run_profile is True, then return (time/step in ms, a captured cuda events table)
"""
if label is None:
assert func.__name__, "please provide a label for this benchmark"
label = func.__name__
# warmup
torch.cuda.nvtx.range_push(f"{label}_warmup")
for _ in range(warmup):
func(*args)
torch.cuda.nvtx.range_pop() # pop label_warmup
# start timer
if cpu_timing:
torch.cuda.synchronize()
start = time.time()
else:
start_event = torch.cuda.Event(enable_timing=True)
start_event.record()
torch.cuda.nvtx.range_push(f"{label}")
if run_profile:
if verbose:
print("\n" + "=" * 70 + " " + label + " " + "=" * 70)
with torch.profiler.profile(activities=[ProfilerActivity.CUDA]) as prof:
with record_function("run_total"):
for i in range(steps):
torch.cuda.nvtx.range_push(f"{i}th_iteration")
func(*args)
torch.cuda.nvtx.range_pop()
events = prof.key_averages()
if verbose:
print(
events.table(
sort_by="self_cuda_time_total",
max_src_column_width=200,
row_limit=15,
)
)
else:
events = None
for i in range(steps):
torch.cuda.nvtx.range_push(f"{i}th_iteration")
func(*args)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_pop() # pop label
# stop timer
if cpu_timing:
torch.cuda.synchronize()
time_ms = ((time.time() - start) / steps) * 1000
else:
end_event = torch.cuda.Event(enable_timing=True)
end_event.record()
end_event.synchronize()
time_ms = start_event.elapsed_time(end_event) / steps
if verbose:
print(f"{label.ljust(label_padding)}: {time_ms:.3f} ms/step")
if run_profile:
return time_ms, events
else:
return time_ms
def profile(
func: Callable,
*args,
**kwargs,
):
"""
Simply a convenient wrapper of the timeit function with run_profile=True.
Returns: (time/step in ms, a captured cuda events table)
"""
return timeit(func, *args, run_profile=True, **kwargs)
| modulus-sym-main | modulus/sym/utils/benchmark/benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .benchmark import timeit, profile
| modulus-sym-main | modulus/sym/utils/benchmark/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""simple Sympy helper functions
"""
from symengine import sqrt
def line(x, point_x_1, point_y_1, point_x_2, point_y_2):
"""
line function from point intercepts
Parameters
----------
x : Sympy Symbol/Exp
the `x` in equation `y=a*x+b`
point_x_1 : Sympy Symbol/Exp, float, int
first intercept x position
point_y_1 : Sympy Symbol/Exp, float, int
first intercept y position
point_x_2 : Sympy Symbol/Exp, float, int
second intercept x position
point_y_2 : Sympy Symbol/Exp, float, int
second intercept y position
Returns
-------
y : Sympy Expr
`y=slope*x+intercept`
"""
slope = (point_y_1 - point_y_2) / (point_x_1 - point_x_2)
intercept = point_y_1 - slope * point_x_1
return slope * x + intercept
def parabola(x, inter_1, inter_2, height):
"""
parabola from point intercepts
Parameters
----------
x : Sympy Symbol/Exp
the `x` in equation `y=a*x*2+b*x+c`
inter_1 : Sympy Symbol/Exp, float, int
first intercept such that `y=0` when `x=inter_1`
inter_2 : Sympy Symbol/Exp, float, int
second intercept such that `y=0` when `x=inter_1`
height : Sympy Symbol/Exp, float, int
max height of parabola
Returns
-------
y : Sympy Expr
`y=factor*(x-inter_1)*(x-+inter_2)`
"""
factor = (4 * height) / (-(inter_1**2) - inter_2**2 + 2 * inter_1 * inter_2)
return factor * (x - inter_1) * (x - inter_2)
def parabola2D(x, y, inter_1_x, inter_2_x, inter_1_y, inter_2_y, height):
"""
square parabola from point intercepts
Parameters
----------
x : Sympy Symbol/Exp
the `x` in equation `z=parabola(x)*parabola(y)`
y : Sympy Symbol/Exp
the `y` in equation `z=a*x**2+b*y**2+c*xy+d*y+e*x+f`
inter_1_x : Sympy Symbol/Exp, float, int
first intercept such that `z=0` when `x=inter_1_x`
inter_2_x : Sympy Symbol/Exp, float, int
second intercept such that `z=0` when `x=inter_2_x`
inter_1_y : Sympy Symbol/Exp, float, int
first intercept such that `z=0` when `y=inter_1_y`
inter_2_y : Sympy Symbol/Exp, float, int
second intercept such that `z=0` when `y=inter_2_y`
height : Sympy Symbol/Exp, float, int
max height of parabola
Returns
-------
y : Sympy Expr
`y=factor*(x-inter_1)*(x-+inter_2)`
"""
parabola_x = parabola(x, inter_1_x, inter_2_x, sqrt(height))
parabola_y = parabola(y, inter_1_y, inter_2_y, sqrt(height))
return parabola_x * parabola_y
| modulus-sym-main | modulus/sym/utils/sympy/functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .numpy_printer import np_lambdify
from .torch_printer import torch_lambdify, SympyToTorch
| modulus-sym-main | modulus/sym/utils/sympy/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for converting sympy equations to pytorch
"""
from sympy import lambdify, Symbol, Derivative, Function, Basic, Add, Max, Min
from sympy.printing.str import StrPrinter
import torch
import numpy as np
import functools
from typing import List, Dict
from modulus.sym.constants import diff_str, tf_dt
def torch_lambdify(f, r, separable=False):
"""
generates a PyTorch function from a sympy equation
Parameters
----------
f : Sympy Exp, float, int, bool
the equation to convert to torch.
If float, int, or bool this gets converted
to a constant function of value `f`.
r : list, dict
A list of the arguments for `f`. If dict then
the keys of the dict are used.
Returns
-------
torch_f : PyTorch function
"""
try:
f = float(f)
except:
pass
if isinstance(f, (float, int, bool)): # constant function
def loop_lambda(constant):
return lambda **x: torch.zeros_like(next(iter(x.items()))[1]) + constant
lambdify_f = loop_lambda(f)
else:
vars = [k for k in r] if separable else [[k for k in r]]
try: # NOTE this fixes a very odd bug in SymPy TODO add issue to SymPy
lambdify_f = lambdify(vars, f, [TORCH_SYMPY_PRINTER])
except:
lambdify_f = lambdify(vars, f, [TORCH_SYMPY_PRINTER])
return lambdify_f
def _where_torch(conditions, x, y):
if isinstance(x, (int, float)):
x = float(x) * torch.ones(*conditions.get_shape())
if isinstance(y, (int, float)):
y = float(y) * torch.ones(*conditions.get_shape())
return torch.where(conditions, x, y)
def _heaviside_torch(x):
return torch.maximum(torch.sign(x), torch.zeros(1, device=x.device))
def _sqrt_torch(x):
return torch.sqrt((x - 1e-6) * _heaviside_torch(x - 1e-6) + 1e-6)
# TODO: Add jit version here
def _or_torch(*x):
return_value = x[0]
for value in x:
return_value = torch.logical_or(return_value, value)
return return_value
# TODO: Add jit version here
def _and_torch(*x):
return_value = x[0]
for value in x:
return_value = torch.logical_and(return_value, value)
return return_value
@torch.jit.script
def _min_jit(x: List[torch.Tensor]):
assert len(x) > 0
min_tensor = x[0]
for i in range(1, len(x)):
min_tensor = torch.minimum(min_tensor, x[i])
return min_tensor
def _min_torch(*x):
# get tensor shape
for value in x:
if not isinstance(value, (int, float)):
tensor_shape = list(map(int, value.shape))
device = value.device
# convert all floats and ints to tensor
x_only_tensors = []
for value in x:
if isinstance(value, (int, float)):
value = torch.zeros(tensor_shape, device=device) + value
x_only_tensors.append(value)
# reduce min
min_tensor, _ = torch.min(torch.stack(x_only_tensors, -1), -1)
return min_tensor
# jit option
# return _min_jit(x_only_tensors)
# TODO: benchmark this other option that avoids stacking and extra memory movement
# Update: cannot jit this because TorchScript doesn't support functools.reduce
# return functools.reduce(torch.minimum, x)
@torch.jit.script
def _max_jit(x: List[torch.Tensor]):
assert len(x) > 0
max_tensor = x[0]
for i in range(1, len(x)):
max_tensor = torch.maximum(max_tensor, x[i])
return max_tensor
def _max_torch(*x):
# get tensor shape
for value in x:
if not isinstance(value, (int, float)):
tensor_shape = list(map(int, value.shape))
device = value.device
# convert all floats and ints to tensor
x_only_tensors = []
for value in x:
if isinstance(value, (int, float)):
value = (torch.zeros(tensor_shape) + value).to(device)
x_only_tensors.append(value)
# reduce max
max_tensor, _ = torch.max(torch.stack(x_only_tensors, -1), -1)
return max_tensor
# jit option
# return _max_jit(x_only_tensors)
def _dirac_delta_torch(x):
return torch.eq(x, 0.0).to(tf_dt)
TORCH_SYMPY_PRINTER = {
"abs": torch.abs,
"Abs": torch.abs,
"sign": torch.sign,
"ceiling": torch.ceil,
"floor": torch.floor,
"log": torch.log,
"exp": torch.exp,
"sqrt": _sqrt_torch,
"cos": torch.cos,
"acos": torch.acos,
"sin": torch.sin,
"asin": torch.asin,
"tan": torch.tan,
"atan": torch.atan,
"atan2": torch.atan2,
"cosh": torch.cosh,
"acosh": torch.acosh,
"sinh": torch.sinh,
"asinh": torch.asinh,
"tanh": torch.tanh,
"atanh": torch.atanh,
"erf": torch.erf,
"loggamma": torch.lgamma,
"Min": _min_torch,
"Max": _max_torch,
"Heaviside": _heaviside_torch,
"DiracDelta": _dirac_delta_torch,
"logical_or": _or_torch,
"logical_and": _and_torch,
"where": _where_torch,
"pi": np.pi,
"conjugate": torch.conj,
}
class CustomDerivativePrinter(StrPrinter):
def _print_Function(self, expr):
"""
Custom printing of the SymPy Derivative class.
Instead of:
D(x(t), t)
We will print:
x__t
"""
return expr.func.__name__
def _print_Derivative(self, expr):
"""
Custom printing of the SymPy Derivative class.
Instead of:
D(x(t), t)
We will print:
x__t
"""
prefix = str(expr.args[0].func)
for expr in expr.args[1:]:
prefix += expr[1] * (diff_str + str(expr[0]))
return prefix
def _subs_derivatives(expr):
while True:
try:
deriv = expr.atoms(Derivative).pop()
new_fn_name = str(deriv)
expr = expr.subs(deriv, Function(new_fn_name)(*deriv.free_symbols))
except:
break
while True:
try:
fn = {
fn for fn in expr.atoms(Function) if fn.class_key()[1] == 0
}.pop() # check if standard Sympy Eq (TODO better check)
new_symbol_name = str(fn)
expr = expr.subs(fn, Symbol(new_symbol_name))
except:
break
return expr
# Override the __str__ method of to use CustromStrPrinter
Basic.__str__ = lambda self: CustomDerivativePrinter().doprint(self)
# Class to compile and evaluate a sympy expression in PyTorch
# Cannot currently script this module because self.torch_expr is unknown
class SympyToTorch(torch.nn.Module):
def __init__(
self,
sympy_expr,
name: str,
freeze_terms: List[int] = [],
detach_names: List[str] = [],
):
super().__init__()
# Sort keys to guarantee ordering
self.keys = sorted([k.name for k in sympy_expr.free_symbols])
self.freeze_terms = freeze_terms
if not self.freeze_terms:
self.torch_expr = torch_lambdify(sympy_expr, self.keys)
else:
assert all(
x < len(Add.make_args(sympy_expr)) for x in freeze_terms
), "The freeze term index cannot be larger than the total terms in the expression"
self.torch_expr = []
for i in range(len(Add.make_args(sympy_expr))):
self.torch_expr.append(
torch_lambdify(Add.make_args(sympy_expr)[i], self.keys)
)
self.freeze_list = list(self.torch_expr[i] for i in freeze_terms)
self.name = name
self.detach_names = detach_names
def forward(self, var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
args = [
var[k].detach() if k in self.detach_names else var[k] for k in self.keys
]
if not self.freeze_terms:
output = self.torch_expr(args)
else:
output = torch.zeros_like(var[self.keys[0]])
for i, expr in enumerate(self.torch_expr):
if expr in self.freeze_list:
output += expr(args).detach()
else:
output += expr(args)
return {self.name: output}
| modulus-sym-main | modulus/sym/utils/sympy/torch_printer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper functions for converting sympy equations to numpy
"""
import types
import inspect
import numpy as np
import symengine as se
import sympy as sp
NP_LAMBDA_STORE = {}
def np_lambdify(f, r):
"""
generates a numpy function from a sympy equation
Parameters
----------
f : Sympy Exp, float, int, bool or list of the previous
the equation to convert to a numpy function.
If float, int, or bool this gets converted
to a constant function of value `f`. If f is a list
then output for each element in list is is
concatenated on axis -1.
r : list, dict
A list of the arguments for `f`. If dict then
the keys of the dict are used.
Returns
-------
np_f : numpy function
"""
# possibly lambdify list of f
if not isinstance(f, list):
f = [f]
# convert r to a list if dictionary
# break up any tuples to elements in list
if isinstance(r, dict):
r = list(r.keys())
no_tuple_r = []
for key in r:
if isinstance(key, tuple):
for k in key:
no_tuple_r.append(k)
else:
no_tuple_r.append(key)
# lambidfy all functions in list
lambdify_f = []
for f_i in f:
# check if already a numpy function
if isinstance(f_i, types.FunctionType):
# add r inputs to function
args = inspect.getargspec(f_i).args
def lambdify_f_i(**x):
return f_i(**{key: x[key] for key in args})
else:
# check if already lambdified equation
if (f_i, tuple(no_tuple_r)) in NP_LAMBDA_STORE.keys():
lambdify_f_i = NP_LAMBDA_STORE[(f_i, tuple(no_tuple_r))]
else: # if not lambdify it
try:
if not isinstance(f_i, bool):
f_i = float(f_i)
except:
pass
if isinstance(f_i, (float, int)): # constant function
def loop_lambda(constant):
return (
lambda **x: np.zeros_like(next(iter(x.items()))[1])
+ constant
)
lambdify_f_i = loop_lambda(f_i)
elif type(f_i) in [
type((se.Symbol("x") > 0).subs(se.Symbol("x"), 1)),
type((se.Symbol("x") > 0).subs(se.Symbol("x"), -1)),
bool,
]: # TODO hacky sympy boolian check
def loop_lambda(constant):
if constant:
return lambda **x: np.ones_like(
next(iter(x.items()))[1], dtype=bool
)
else:
return lambda **x: np.zeros_like(
next(iter(x.items()))[1], dtype=bool
)
lambdify_f_i = loop_lambda(f_i)
else:
try: # first try to compile with Symengine
kk = []
for k in no_tuple_r:
if isinstance(k, str):
kk.append(se.Symbol(k))
else:
kk.append(k)
kk = [se.Symbol(name) for name in sorted([x.name for x in kk])]
se_lambdify_f_i = se.lambdify(kk, [f_i], backend="llvm")
def lambdify_f_i(**x):
if len(x) == 1:
v = list(x.values())[0]
else:
v = np.stack(
[v for v in dict(sorted(x.items())).values()],
axis=-1,
)
out = se_lambdify_f_i(v)
if isinstance(out, list):
out = np.concatenate(out, axis=-1)
return out
except: # fall back on older SymPy compile
sp_lambdify_f_i = sp.lambdify(
[k for k in no_tuple_r], f_i, [NP_SYMPY_PRINTER, "numpy"]
)
def lambdify_f_i(**x):
v = sp_lambdify_f_i(**x)
if isinstance(v, list):
v = np.concatenate(v, axis=-1)
return v
# add new lambdified function to dictionary
NP_LAMBDA_STORE[(f_i, tuple(no_tuple_r))] = lambdify_f_i
# add new list of lambda functions
lambdify_f.append(lambdify_f_i)
# construct master lambda function for all
def loop_grouped_lambda(lambdify_f):
def grouped_lambda(**invar):
output = []
for lambdify_f_i in lambdify_f:
output.append(lambdify_f_i(**invar))
return np.concatenate(output, axis=-1)
return grouped_lambda
return loop_grouped_lambda(lambdify_f)
def _xor_np(x):
return np.logical_xor(x)
def _min_np(x):
return_value = x[0]
for value in x:
return_value = np.minimum(return_value, value)
return return_value
def _max_np(x):
return_value = x[0]
for value in x:
return_value = np.maximum(return_value, value)
return return_value
def _heaviside_np(x):
return np.heaviside(x, 0)
def _equal_np(x, y):
return np.isclose(x, y)
NP_SYMPY_PRINTER = {
"amin": _min_np,
"amax": _max_np,
"Heaviside": _heaviside_np,
"equal": _equal_np,
"Xor": _xor_np,
}
SYMENGINE_BLACKLIST = [sp.Heaviside, sp.DiracDelta]
| modulus-sym-main | modulus/sym/utils/sympy/numpy_printer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from typing import Any, Dict
class StopCriterion:
"""
Stop criterion for training
Parameters
----------
metric : str
Metric to be monitored during the training
min_delta : float
minimum required change in the metric to qualify as a training improvement
patience : float
Number of training steps to wait for a training improvement to happen
mode: str
Choose 'min' if the metric is to be minimized, or 'max' if the metric is to be maximized
freq: int
Frequency of evaluating the stop criterion
strict: bool
If True, raises an error in case the metric is not valid.
monitor_freq: Any
Frequency of evaluating the monitor domain
validation_freq: Any
Frequency of evaluating the validation domain
"""
def __init__(
self,
metric: str,
min_delta: float = 0.0,
patience: float = 0,
mode: str = "min",
freq: int = 1000,
strict: bool = True,
monitor_freq: Any = None,
validation_freq: Any = None,
):
self.metric = metric
self.min_delta = min_delta
self.patience = patience
self.mode = mode
self.freq = freq
self.strict = strict
self.monitor_freq = monitor_freq
self.validation_freq = validation_freq
self.best_score = None
self.counter = 0
self.check_freqs = True
if self.freq > self.patience:
raise RuntimeError(
"Stop criterion patience should be greater than or equal to the freq for stopping criterion"
)
self.mode_dict = {"min": np.less, "max": np.greater}
if self.mode not in self.mode_dict.keys():
raise RuntimeError("Stop criterion mode can be either min or max")
self.mode_op = self.mode_dict[self.mode]
self.min_delta *= 1 if self.mode == "max" else -1
def evaluate(self, metric_dict: Dict[str, float]) -> bool:
"""
Evaluate the stop criterion
Parameters
----------
metric_dict : str
Dictionary of available metrics to compute
"""
if self.check_freqs:
self._check_frequencies(metric_dict)
score = self._get_score(metric_dict, self.target_key)
if self.best_score is None:
self.best_score = score
elif self.mode_op(self.best_score + self.min_delta, score):
if self.mode_op(score, self.best_score):
self.best_score = score
self.counter += self.freq
if self.counter >= self.patience:
return True
else:
self.best_score = score
self.counter = 0
return False
def _check_frequencies(self, metric_dict):
found_metric = False
for key in metric_dict.keys():
for k in metric_dict[key].keys():
if self.metric == k:
self.target_key = key
found_metric = True
break
if not found_metric and self.strict:
raise RuntimeError(
"[modulus.sym.stop_criterion] the specified metric for stopping criterion is not valid"
)
if self.target_key == "monitor" and (
self.freq % self.monitor_freq != 0 or self.freq == 0
):
raise RuntimeError(
"Stop criterion frequency should be a multiple of the monitor frequency"
)
elif self.target_key == "validation" and (
self.freq % self.validation_freq != 0 or self.freq == 0
):
raise RuntimeError(
"Stop criterion frequency should be a multiple of the validation frequency"
)
self.check_freqs = False
def _get_score(self, metric_dict, target_key):
return metric_dict[target_key][self.metric]
| modulus-sym-main | modulus/sym/utils/training/stop_criterion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import sympy as sp
import scipy
from scipy import interpolate
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
# functions to plot a variable
def plot_field(
var,
save_name,
coordinates=["x", "y"],
bounds_var=None,
criteria=None,
plot_title="",
resolution=128,
figsize=(8, 8),
):
plt.figure(figsize=figsize)
nr_plots = len(list(var.keys())) - 2 # not plotting x or y
plot_index = 1
for key, value in var.items():
if key in coordinates:
continue
X, Y = _make_mesh(var, coordinates, bounds_var, resolution)
pos = np.concatenate([var[coordinates[0]], var[coordinates[1]]], axis=1)
value_star = interpolate.griddata(pos, value.flatten(), (X, Y), method="linear")
if criteria is not None:
np_criteria = _compile_criteria(coordinates, criteria)
nan_mask = np.where(np_criteria(X, Y), 0.0, np.nan)
value_star += nan_mask
plt.subplot(nr_plots, 1, plot_index)
plt.title(plot_title + ": " + key)
plt.imshow(
np.flip(value_star, axis=0),
cmap="jet",
extent=[np.min(X), np.max(X), np.min(Y), np.max(Y)],
)
plt.xlabel(coordinates[0])
plt.ylabel(coordinates[1])
plt.colorbar()
plot_index += 1
plt.savefig(save_name + ".png")
plt.close()
# functions to plot true and pred variables with diff plot
def plot_field_compare(
true_var,
pred_var,
save_name,
coordinates=["x", "y"],
bounds_var=None,
criteria=None,
resolution=128,
figsize=(12, 10),
same_colorbar=False,
):
plt.figure(figsize=figsize)
nr_plots = len(list(true_var.keys())) - 2 # not plotting x or y
plot_index = 1
for key in true_var.keys():
if key in coordinates:
continue
X, Y = _make_mesh(true_var, coordinates, bounds_var, resolution)
pos = np.concatenate(
[true_var[coordinates[0]], true_var[coordinates[1]]], axis=1
)
true_field_star = interpolate.griddata(
pos, true_var[key].flatten(), (X, Y), method="linear"
)
pred_field_star = interpolate.griddata(
pos, pred_var[key].flatten(), (X, Y), method="linear"
)
if criteria is not None:
np_criteria = _compile_criteria(coordinates, criteria)
nan_mask = np.where(np_criteria(X, Y), 0.0, np.nan)
true_field_star += nan_mask
pred_field_star += nan_mask
if same_colorbar:
vmax = np.max(true_var[key])
vmin = np.min(true_var[key])
else:
vmax = None
vmin = None
# pred plot
plt.subplot(nr_plots, 3, plot_index)
plt.title("Predicted: " + key)
plt.imshow(pred_field_star, cmap="jet", vmax=vmax, vmin=vmin)
plt.colorbar()
plot_index += 1
# true plot
plt.subplot(nr_plots, 3, plot_index)
plt.title("True: " + key)
plt.imshow(true_field_star, cmap="jet", vmax=vmax, vmin=vmin)
plt.colorbar()
plot_index += 1
# diff plot
plt.subplot(nr_plots, 3, plot_index)
plt.title("Difference: " + key)
plt.imshow((true_field_star - pred_field_star), cmap="jet")
plt.colorbar()
plot_index += 1
plt.savefig(save_name + ".png")
plt.close()
def _make_mesh(var, coordinates, bounds_var, resolution):
if bounds_var is not None:
x_min = bounds_var[coordinates[0] + "_min"]
x_max = bounds_var[coordinates[0] + "_max"]
y_min = bounds_var[coordinates[1] + "_min"]
y_max = bounds_var[coordinates[1] + "_max"]
else:
x_min = np.min(var[coordinates[0]])
x_max = np.max(var[coordinates[0]])
y_min = np.min(var[coordinates[1]])
y_max = np.max(var[coordinates[1]])
x_len = x_max - x_min
y_len = y_max - y_min
len_min = max(x_len, y_len)
nn_x = int((x_len / len_min) * resolution)
nn_y = int((y_len / len_min) * resolution)
if bounds_var is not None:
x = np.linspace(
bounds_var[coordinates[0] + "_min"],
bounds_var[coordinates[0] + "_max"],
nn_x,
)
y = np.linspace(
bounds_var[coordinates[1] + "_min"],
bounds_var[coordinates[1] + "_max"],
nn_y,
)
else:
x = np.linspace(np.min(var[coordinates[0]]), np.max(var[coordinates[0]]), nn_x)
y = np.linspace(np.min(var[coordinates[1]]), np.max(var[coordinates[1]]), nn_y)
return np.meshgrid(x, y)
def _compile_criteria(coordinates, criteria):
np_criteria = sp.lambdify([sp.Symbol(k) for k in coordinates], criteria, "numpy")
return np_criteria
# functions to plot a variable
def _var_to_mesh(
var, key, coordinates=["x", "y"], bounds_var=None, criteria=None, resolution=128
):
X, Y = _make_mesh(var, coordinates, bounds_var, resolution)
pos = np.concatenate([var[coordinates[0]], var[coordinates[1]]], axis=1)
value_star = interpolate.griddata(pos, var[key].flatten(), (X, Y), method="linear")
if criteria is not None:
np_criteria = _compile_criteria(coordinates, criteria)
nan_mask = np.where(np_criteria(X, Y), 0.0, np.nan)
value_star += nan_mask
# value_star = value_star * nan_mask
return value_star
| modulus-sym-main | modulus/sym/utils/io/field.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
def plot_time_series(var, base_name, time_axis="step"):
for plot_var in var.keys():
if plot_var != time_axis:
plt.plot(var[time_axis][:, 0], var[plot_var][:, 0], label=plot_var)
plt.legend()
plt.xlabel(time_axis)
plt.savefig(base_name + ".png")
plt.close()
| modulus-sym-main | modulus/sym/utils/io/time_series.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .vtk import (
VTKUniformGrid,
VTKRectilinearGrid,
VTKStructuredGrid,
VTKUnstructuredGrid,
VTKPolyData,
VTKFromFile,
var_to_polyvtk,
grid_to_vtk,
)
from .plotter import (
ValidatorPlotter,
InferencerPlotter,
GridValidatorPlotter,
DeepONetValidatorPlotter,
)
from .csv_rw import csv_to_dict, dict_to_csv
| modulus-sym-main | modulus/sym/utils/io/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines helper Plotter class for adding plots to tensorboard summaries
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
from typing import Dict
class _Plotter:
def __call__(self, *args):
raise NotImplementedError
def _add_figures(self, group, name, results_dir, writer, step, *args):
"Try to make plots and write them to tensorboard summary"
# catch exceptions on (possibly user-defined) __call__
try:
fs = self(*args)
except Exception as e:
print(f"error: {self}.__call__ raised an exception:", str(e))
else:
for f, tag in fs:
f.savefig(
results_dir + name + "_" + tag + ".png",
bbox_inches="tight",
pad_inches=0.1,
)
writer.add_figure(group + "/" + name + "/" + tag, f, step, close=True)
plt.close("all")
def _interpolate_2D(self, size, invar, *outvars):
"Interpolate 2D outvar solutions onto a regular mesh"
assert len(invar) == 2
# define regular mesh to interpolate onto
xs = [invar[k][:, 0] for k in invar]
extent = (xs[0].min(), xs[0].max(), xs[1].min(), xs[1].max())
xyi = np.meshgrid(
np.linspace(extent[0], extent[1], size),
np.linspace(extent[2], extent[3], size),
indexing="ij",
)
# interpolate outvars onto mesh
outvars_interp = []
for outvar in outvars:
outvar_interp = {}
for k in outvar:
outvar_interp[k] = scipy.interpolate.griddata(
(xs[0], xs[1]), outvar[k][:, 0], tuple(xyi)
)
outvars_interp.append(outvar_interp)
return [extent] + outvars_interp
class ValidatorPlotter(_Plotter):
"Default plotter class for validator"
def __call__(self, invar, true_outvar, pred_outvar):
"Default function for plotting validator data"
ndim = len(invar)
if ndim > 2:
print("Default plotter can only handle <=2 input dimensions, passing")
return []
# interpolate 2D data onto grid
if ndim == 2:
extent, true_outvar, pred_outvar = self._interpolate_2D(
100, invar, true_outvar, pred_outvar
)
# make plots
dims = list(invar.keys())
fs = []
for k in pred_outvar:
f = plt.figure(figsize=(3 * 5, 4), dpi=100)
for i, (o, tag) in enumerate(
zip(
[true_outvar[k], pred_outvar[k], true_outvar[k] - pred_outvar[k]],
["true", "pred", "diff"],
)
):
plt.subplot(1, 3, 1 + i)
if ndim == 1:
plt.plot(invar[dims[0]][:, 0], o[:, 0])
plt.xlabel(dims[0])
elif ndim == 2:
plt.imshow(o.T, origin="lower", extent=extent)
plt.xlabel(dims[0])
plt.ylabel(dims[1])
plt.colorbar()
plt.title(f"{k}_{tag}")
plt.tight_layout()
fs.append((f, k))
return fs
class InferencerPlotter(_Plotter):
"Default plotter class for inferencer"
def __call__(self, invar, outvar):
"Default function for plotting inferencer data"
ndim = len(invar)
if ndim > 2:
print("Default plotter can only handle <=2 input dimensions, passing")
return []
# interpolate 2D data onto grid
if ndim == 2:
extent, outvar = self._interpolate_2D(100, invar, outvar)
# make plots
dims = list(invar.keys())
fs = []
for k in outvar:
f = plt.figure(figsize=(5, 4), dpi=100)
if ndim == 1:
plt.plot(invar[dims[0]][:, 0], outvar[:, 0])
plt.xlabel(dims[0])
elif ndim == 2:
plt.imshow(outvar[k].T, origin="lower", extent=extent)
plt.xlabel(dims[0])
plt.ylabel(dims[1])
plt.colorbar()
plt.title(k)
plt.tight_layout()
fs.append((f, k))
return fs
class GridValidatorPlotter(_Plotter):
"""Grid validation plotter for structured data"""
def __init__(self, n_examples: int = 1):
self.n_examples = n_examples
def __call__(
self,
invar: Dict[str, np.array],
true_outvar: Dict[str, np.array],
pred_outvar: Dict[str, np.array],
):
ndim = next(iter(invar.values())).ndim - 2
if ndim > 3:
print("Default plotter can only handle <=3 input dimensions, passing")
return []
# get difference
diff_outvar = {}
for k, v in true_outvar.items():
diff_outvar[k] = true_outvar[k] - pred_outvar[k]
fs = []
for ie in range(self.n_examples):
f = self._make_plot(ndim, ie, invar, true_outvar, pred_outvar, diff_outvar)
fs.append((f, f"prediction_{ie}"))
return fs
def _make_plot(self, ndim, ie, invar, true_outvar, pred_outvar, diff_outvar):
# make plot
nrows = max(len(invar), len(true_outvar))
f = plt.figure(figsize=(4 * 5, nrows * 4), dpi=100)
for ic, (d, tag) in enumerate(
zip(
[invar, true_outvar, pred_outvar, diff_outvar],
["in", "true", "pred", "diff"],
)
):
for ir, k in enumerate(d):
plt.subplot2grid((nrows, 4), (ir, ic))
if ndim == 1:
plt.plot(d[k][ie, 0, :])
elif ndim == 2:
plt.imshow(d[k][ie, 0, :, :].T, origin="lower")
else:
z = d[k].shape[-1] // 2 # Z slice
plt.imshow(d[k][ie, 0, :, :, z].T, origin="lower")
plt.title(f"{k}_{tag}")
plt.colorbar()
plt.tight_layout()
return f
class DeepONetValidatorPlotter(_Plotter):
"""DeepONet validation plotter for structured data"""
def __init__(self, n_examples: int = 1):
self.n_examples = n_examples
def __call__(
self,
invar: Dict[str, np.array],
true_outvar: Dict[str, np.array],
pred_outvar: Dict[str, np.array],
):
ndim = next(iter(invar.values())).shape[-1]
if ndim > 3:
print("Default plotter can only handle <=2 input dimensions, passing")
return []
# get difference
diff_outvar = {}
for k, v in true_outvar.items():
diff_outvar[k] = true_outvar[k] - pred_outvar[k]
fs = []
for ie in range(self.n_examples):
f = self._make_plot(ndim, ie, invar, true_outvar, pred_outvar, diff_outvar)
fs.append((f, f"prediction_{ie}"))
return fs
def _make_plot(self, ndim, ie, invar, true_outvar, pred_outvar, diff_outvar):
# make plot
# invar: input of trunk net. Dim: N*P*ndim
# outvar: output of DeepONet. Dim: N*P
nrows = max(len(invar), len(true_outvar))
f = plt.figure(figsize=(4 * 5, nrows * 4), dpi=100)
invar_data = next(iter(invar.values()))
for ic, (d, tag) in enumerate(
zip(
[true_outvar, pred_outvar, diff_outvar],
["true", "pred", "diff"],
)
):
for ir, k in enumerate(d):
plt.subplot2grid((nrows, 4), (ir, ic))
if ndim == 1:
plt.plot(invar_data[ie, :].flatten(), d[k][ie, :])
elif ndim == 2:
plt.scatter(
x=invar_data[ie, :, 0],
y=invar_data[ie, :, 1],
c=d[k][ie, :],
s=0.5,
origin="lower",
cmap="jet",
)
plt.colorbar()
plt.title(f"{k}_{tag}")
plt.tight_layout()
return f
| modulus-sym-main | modulus/sym/utils/io/plotter.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
simple helper functions for reading and
saving CSV files
"""
import csv
import numpy as np
def csv_to_dict(filename, mapping=None, delimiter=","):
"""
reads a csv file to a dictionary of columns
Parameters
----------
filename : str
The file name to load from
mapping : None, dict
If None load entire csv file and store
every column as a key in the dict. If
`mapping` is not none use this to map
keys from CSV to keys in dict.
delimiter: str
The string used for separating values.
Returns
-------
data : dict of numpy arrays
numpy arrays have shape [N, 1].
"""
# Load csv file
values = np.loadtxt(filename, skiprows=1, delimiter=delimiter, unpack=False)
# get column keys
csvfile = open(filename)
reader = csv.reader(csvfile, delimiter=delimiter)
first_line = next(iter(reader))
# set dictionary
csv_dict = {}
for i, name in enumerate(first_line):
if mapping is not None:
if name.strip() in mapping.keys():
csv_dict[mapping[name.strip()]] = values[:, i : i + 1]
else:
csv_dict[name.strip()] = values[:, i : i + 1]
return csv_dict
def dict_to_csv(dictonary, filename):
"""
saves a dict of numpy arrays to csv file
Parameters
----------
dictionary : dict
dictionary of numpy arrays. The numpy
arrays have a shape of [N, 1].
filename : str
The file name to save too
"""
# add csv to filename
if filename[-4:] != ".csv":
filename += ".csv"
# save np arrays
csvfile = open(filename, "w+")
csvfile.write(",".join(['"' + str(x) + '"' for x in list(dictonary.keys())]) + "\n")
for i in range(next(iter(dictonary.values())).shape[0]):
csvfile.write(",".join([str(x[i, 0]) for x in dictonary.values()]) + "\n")
csvfile.close()
| modulus-sym-main | modulus/sym/utils/io/csv_rw.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for generating vtk files
"""
import time
import torch
import scipy
import numpy as np
import matplotlib
import sympy as sp
import logging
import vtk
from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy
from pathlib import Path
import pathlib
from typing import List, Dict, Union, Tuple
logger = logging.getLogger(__name__)
class VTKBase:
# Only supports working with point data
def __init__(self, file_name: str, file_dir: str):
self.file_name = file_name
self.file_dir = file_dir
self.ext = ".vtk"
self.vtk_obj = None
self.writer = None
self.export_map = {}
def save_vtk(self):
raise NotImplementedError("Implement in VTK subclass")
def get_points(self):
raise NotImplementedError("Implement in VTK subclass")
def get_cells(self):
raise NotImplementedError("Implement in VTK subclass")
def set_points(self):
raise NotImplementedError("Implement in VTK subclass")
def set_cells(self):
raise NotImplementedError("Implement in VTK subclass")
def get_array_names(self):
narrays = self.vtk_obj.GetPointData().GetNumberOfArrays()
names = []
for i in range(narrays):
names.append(self.vtk_obj.GetPointData().GetArrayName(i))
return names
def get_array(self, name: str, dim: Union[None, int] = None):
if name not in self.get_array_names():
logger.warn(f"{name} not found in data arrays")
return None
data_array = vtk_to_numpy(self.vtk_obj.GetPointData().GetArray(name))
# Expand last dim for scalars for consistency
if data_array.ndim == 1:
data_array = data_array[:, np.newaxis]
elif dim is not None:
# Get component of data array
if dim > data_array.shape[1]:
raise ValueError(
f"Dimension requested of VTK dataarray {name}:{dim} is too large. Data-array size: {data_array.shape}"
)
data_array = data_array[:, dim : dim + 1]
return data_array
def get_data_from_map(self, vtk_data_map: Dict[str, List[str]]):
data_dict = {}
coord_map = {"x": 0, "y": 1, "z": 2}
# Loop through input map values
for input_key, vtk_keys in vtk_data_map.items():
input_array = []
for vtk_key in vtk_keys:
# Check if coordinate array
if vtk_key in coord_map:
input_array0 = self.get_points(dims=[coord_map[vtk_key]])
input_array.append(input_array0)
# Check if data array
elif vtk_key.split(":")[0] in self.get_array_names():
if len(vtk_key.split(":")) > 1:
input_array0 = self.get_array(
name=vtk_key.split(":")[0], dim=int(vtk_key.split(":")[1])
)
else:
input_array0 = self.get_array(name=vtk_key)
input_array.append(input_array0)
data_dict[input_key] = np.concatenate(input_array, axis=1)
return data_dict
def var_to_vtk(
self,
data_vars: Dict[str, np.array],
file_name: str = None,
file_dir: str = None,
step: int = None,
):
if file_name is None:
file_name = self.file_name
if file_dir is None:
file_dir = self.file_dir
if step is not None:
file_name = file_name + f"{step:06}"
# Convert any non list values in input map to lists
for input_key, vtk_keys in self.export_map.items():
if isinstance(vtk_keys, str):
self.export_map[input_key] = [vtk_keys]
# Apply vtk mask, to compose multidim variables
out_var = {}
for key, data_keys in self.export_map.items():
vtk_array = []
for data_key in data_keys:
if data_key in data_vars:
if data_vars[data_key].ndim == 1:
vtk_array.append(data_vars[data_key][:, np.newaxis])
else:
vtk_array.append(data_vars[data_key])
elif data_key is None:
vtk_array.append(
np.zeros((self.vtk_obj.GetNumberOfPoints(), 1), dtype=np.short)
)
# If we recieved any data that fits the map
if len(vtk_array) > 0:
out_var[key] = np.squeeze(np.concatenate(vtk_array, axis=1))
# Add data to vtk file
# TODO: Only save points inside class and create vtk obj on save call
for key, data in out_var.items():
self.add_point_array(key, data.astype(np.float32))
self.save_vtk(file_name, file_dir)
def save_vtk(
self,
file_name: str = None,
file_dir: str = None,
compression: int = 1,
data_mode: int = 1,
):
# Compression level: 1 (worst compression, fastest) ... 9 (best compression, slowest).
# https://vtk.org/doc/nightly/html/classvtkXMLWriterBase.html
# Data mode: 0 = ascii, 1 = binary
if file_name is None:
file_name = self.file_name
if file_dir is None:
file_dir = self.file_dir
Path(file_dir).mkdir(parents=True, exist_ok=True)
file_path = Path(file_dir) / Path(file_name + self.ext)
self.writer.SetFileName(file_path)
self.writer.SetCompressorTypeToZLib()
self.writer.SetCompressionLevel(compression)
self.writer.SetDataMode(data_mode)
self.writer.SetInputData(self.vtk_obj)
self.writer.Write()
def add_point_array(self, name: str, data: np.array):
"""Adds point array data into VTK file
Parameters
----------
name : str
data array name
data : np.array
1D or 2D numpy data array
"""
assert (
data.shape[0] == self.vtk_obj.GetNumberOfPoints()
), f"Input array incorrect size. Got {data.shape[0]} instead of {self.vtk_obj.GetNumberOfPoints()}"
assert data.ndim < 3, "1D and 2D arrays supported"
data_array = numpy_to_vtk(data, deep=True)
if data.ndim == 2:
data_array.SetNumberOfComponents(data.shape[1])
data_array.SetName(name)
self.vtk_obj.GetPointData().AddArray(data_array)
def remove_point_array(self, name: str):
if name in self.get_array_names():
self.vtk_obj.GetPointData().RemoveArray(name)
else:
logger.warn(f"Point data {name} not present in VTK object")
class VTKUniformGrid(VTKBase):
"""vtkUniformGrid wrapper class
Parameters
----------
bounds : List[List[int]]
Domain bounds of each dimension
npoints : List[int]
List of number of points in each dimension
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
init_vtk : bool, optional
Initialize new VTK object from parameters (used by VTKFromFile), by default True
"""
def __init__(
self,
bounds: List[List[int]],
npoints: List[int],
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
init_vtk: bool = True,
):
super().__init__(file_name, file_dir)
self.vtk_obj = vtk.vtkUniformGrid()
self.writer = vtk.vtkXMLImageDataWriter()
self.ext = ".vti"
self.export_map = export_map
if init_vtk:
self.init_points(bounds, npoints)
def init_points(
self,
bounds: List[List[int]],
npoints: List[int],
):
assert len(bounds) == len(
npoints
), f"Bounds and npoints must be same length {len(bounds)}, {len(npoints)}"
assert (
len(bounds) > 0 and len(bounds) < 4
), "Only 1, 2, 3 grid dimensionality allowed"
# Padd for missing dimensions
npoints = np.array(npoints + [1, 1])
bounds = np.array(bounds + [[0, 0], [0, 0]])
dx = abs(bounds[:, 0] - bounds[:, 1]) / np.maximum(
np.ones_like(npoints), npoints - 1
)
# This is unique to uniform grid since it uses the imgdata backend
self.vtk_obj.SetOrigin(
bounds[0][0], bounds[1][0], bounds[2][0]
) # default values
self.vtk_obj.SetSpacing(dx[0], dx[1], dx[2])
self.vtk_obj.SetDimensions(npoints[0], npoints[1], npoints[2])
def get_points(self, dims: List[int] = [0, 1, 2]):
# Slow but VTK Image data does not explicitly store point coords
points = []
for i in range(self.vtk_obj.GetNumberOfPoints()):
points.append(self.vtk_obj.GetPoint(i))
points = np.array(points)
return np.concatenate([points[:, i : i + 1] for i in dims], axis=1)
def set_points(self, points: np.array):
raise NotImplementedError("Cannot set points on vtkUniformGrid")
def set_cells(self):
raise AttributeError("Cannot set the cells of a vtkStructuredPoints")
@classmethod
def init_from_obj(
cls,
vtk_obj,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
):
vtk_wrapper = VTKUniformGrid(
None,
None,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
init_vtk=False,
)
vtk_wrapper.vtk_obj = vtk_obj
return vtk_wrapper
class VTKRectilinearGrid(VTKBase):
"""vtkRectilinearGrid wrapper class
Parameters
----------
axis_coords : List[np.array]
List of arrays that define points on each axis
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
init_vtk : bool, optional
Initialize new VTK object from parameters (used by VTKFromFile), by default True
"""
def __init__(
self,
axis_coords: List[np.array],
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
init_vtk: bool = True,
):
super().__init__(file_name, file_dir)
self.vtk_obj = vtk.vtkRectilinearGrid()
self.writer = vtk.vtkXMLRectilinearGridWriter()
self.ext = ".vtr"
self.export_map = export_map
if init_vtk:
self.init_points(axis_coords)
def init_points(self, coords: List[np.array]):
assert len(coords) < 4, "Maximum of 3 spacial coordinate arrays accepted"
# Padd for missing dimensions
coords = coords + [np.array([0]), np.array([0])]
# This is unique to vtkRectilinearGrid since points are not explicit
self.vtk_obj.SetDimensions(
coords[0].shape[0], coords[1].shape[0], coords[2].shape[0]
)
self.vtk_obj.SetXCoordinates(numpy_to_vtk(coords[0]))
self.vtk_obj.SetYCoordinates(numpy_to_vtk(coords[1]))
self.vtk_obj.SetZCoordinates(numpy_to_vtk(coords[2]))
def get_points(self, dims: List[int] = [0, 1, 2]):
# GetPoint in vtkRectilinearGrid takes in point container to populate since
# it does not have one internally
# https://vtk.org/doc/nightly/html/classvtkRectilinearGrid.html
points = vtk.vtkPoints()
self.vtk_obj.GetPoints(points)
# Now we can convert to numpy
points = vtk_to_numpy(points.GetData())
return np.concatenate([points[:, i : i + 1] for i in dims], axis=1)
def set_points(self, points: np.array):
raise AttributeError("Cannot set the points of a vtkRectilinearGrid explicitly")
def set_cells(self):
raise AttributeError("Cannot set the cells of a vtkRectilinearGrid explicitly")
@classmethod
def init_from_obj(
cls,
vtk_obj,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
):
vtk_wrapper = VTKRectilinearGrid(
None,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
init_vtk=False,
)
vtk_wrapper.vtk_obj = vtk_obj
return vtk_wrapper
class VTKStructuredGrid(VTKBase):
"""vtkStructuredGrid wrapper class
Parameters
----------
points : np.array
Mesh grid of points in 'ij' format
dims : List[int]
Number of points in each dimension
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
init_vtk : bool, optional
Initialize new VTK object from parameters (used by VTKFromFile), by default True
"""
def __init__(
self,
points: np.array,
dims: List[int],
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
init_vtk: bool = True,
):
super().__init__(file_name, file_dir)
self.vtk_obj = vtk.vtkStructuredGrid()
self.writer = vtk.vtkXMLStructuredGridWriter()
self.ext = ".vts"
self.export_map = export_map
if init_vtk:
self.init_points(points, dims)
def init_points(self, points: np.array, dims: List[int]):
assert points.ndim == 2, "Points array must have 2 dimensions [npoints, dim]"
assert points.shape[1] < 4, "Maximum of 3 spacial point arrays accepted"
assert len(dims) == points.shape[1], "Domain dimension must match dim of points"
# Padd for missing dimensions
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
dims = dims + [1, 1]
assert (
dims[0] * dims[1] * dims[2] == points.shape[0]
), "Number of points do not match provided dimensions"
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(points.shape[0])
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetDimensions(dims[:3])
self.vtk_obj.SetPoints(pts)
def get_points(self, dims: List[int] = [0, 1, 2]):
points = vtk_to_numpy(self.vtk_obj.GetPoints().GetData())
return np.concatenate([points[:, i : i + 1] for i in dims], axis=1)
def set_points(self, points: np.array, dims: List[int]):
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
dims = dims + [1, 1]
assert (
dims[0] * dims[1] * dims[2] == points.shape[0]
), "Number of points do not match provided dimensions"
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(points.shape[0])
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetDimensions(dims[:3])
self.vtk_obj.SetPoints(pts)
def set_cells(self):
raise AttributeError("Cannot set the cells of a vtkStructuredGrid explicitly")
@classmethod
def init_from_obj(
cls,
vtk_obj,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
):
vtk_wrapper = VTKStructuredGrid(
None,
None,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
init_vtk=False,
)
vtk_wrapper.vtk_obj = vtk_obj
return vtk_wrapper
# ===================
# VTK Unstructured Grid
# ===================
class VTKUnstructuredGrid(VTKBase):
"""vtkUnstructuredGrid wrapper class
Parameters
----------
points : np.array
Array of point locations [npoints, (1,2 or 3)]
cell_index : Tuple[ np.array, np.array ]
Tuple of (cell_offsets, cell_connectivity) arrays.
Cell offsets is a 1D array denoting how many points make up a face for each cell.
Cell connectivity is a 1D array that contains verticies of each cell face in order
cell_types : np.array
Array of cell vtk types:
https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
init_vtk : bool, optional
Initialize new VTK object from parameters (used by VTKFromFile), by default True
"""
def __init__(
self,
points: np.array,
cell_index: Tuple[np.array, np.array],
cell_types: np.array,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
init_vtk: bool = True,
):
super().__init__(file_name, file_dir)
self.vtk_obj = vtk.vtkUnstructuredGrid()
self.writer = vtk.vtkXMLUnstructuredGridWriter()
self.ext = ".vtu"
self.export_map = export_map
if init_vtk:
self.init_points(points, cell_index, cell_types)
def init_points(
self,
points: np.array,
cell_index: Tuple[np.array, np.array],
cell_types: np.array,
):
assert points.ndim == 2, "Points array must have 2 dimensions [npoints, dim]"
assert points.shape[1] < 4, "Maximum of 3 spacial point arrays accepted"
assert (
len(cell_index) == 2
), "Cell index must be tuple of numpy arrays containing [offsets, connectivity]"
# Could check cell type and cell index are consistent, but we assume the user
# knows what they are doing
# Padd for missing dimensions
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(points.shape[0])
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetPoints(pts)
vtk_celltypes = vtk.vtkIntArray()
vtk_celltypes.SetNumberOfComponents(1)
vtk_celltypes = numpy_to_vtk(
cell_types.astype(int), array_type=vtk.vtkUnsignedCharArray().GetDataType()
)
vtk_cells = vtk.vtkCellArray()
vtk_offsets = numpy_to_vtk(
cell_index[0], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_connectivity = numpy_to_vtk(
cell_index[1], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_cells.SetData(vtk_offsets, vtk_connectivity)
self.vtk_obj.SetCells(vtk_celltypes, vtk_cells)
def get_points(self, dims: List[int] = [0, 1, 2]):
points = vtk_to_numpy(self.vtk_obj.GetPoints().GetData())
return np.concatenate([points[:, i : i + 1] for i in dims], axis=1)
# points = vtk_to_numpy(self.vtk_obj.GetPoints().GetData())
# points = [points[:, 0:1], points[:, 1:2], points[:, 2:3]]
# return [points[i] for i in dims]
def get_cells(self):
cells = self.vtk_obj.GetCells()
# Get cells data contains array [nedges, v1, v2, v3, ..., nedges, v1, v2, v3,...]
# Need to seperate offset and connectivity array for practical use
cell_connectivity = vtk_to_numpy(cells.GetConnectivityArray())
cell_offsets = vtk_to_numpy(cells.GetOffsetsArray())
return cell_offsets, cell_connectivity
def get_celltypes(self):
cell_types = vtk_to_numpy(self.vtk_obj.GetCellTypesArray())
return cell_types
def set_points(self, points: np.array):
assert points.ndim == 2, "Points array must have 2 dimensions [npoints, dim]"
assert points.shape[1] < 4, "Maximum of 3 spacial point arrays accepted"
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(points.shape[0])
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetPoints(pts)
def set_cells(self, cell_index: Tuple[np.array, np.array], cell_types: np.array):
assert (
len(cell_index) == 2
), "Cell index must be tuple of numpy arrays containing [offsets, connectivity]"
vtk_celltypes = vtk.vtkIntArray()
vtk_celltypes.SetNumberOfComponents(1)
vtk_celltypes = numpy_to_vtk(
cell_types.astype(int), array_type=vtk.vtkUnsignedCharArray().GetDataType()
)
vtk_cells = vtk.vtkCellArray()
vtk_offsets = numpy_to_vtk(
cell_index[0], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_connectivity = numpy_to_vtk(
cell_index[1], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_cells.SetData(vtk_offsets, vtk_connectivity)
self.vtk_obj.SetCells(vtk_celltypes, vtk_cells)
@classmethod
def init_from_obj(
cls,
vtk_obj,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
):
vtk_wrapper = VTKUnstructuredGrid(
None,
None,
None,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
init_vtk=False,
)
vtk_wrapper.vtk_obj = vtk_obj
return vtk_wrapper
# ===================
# VTK Polydata
# ===================
class VTKPolyData(VTKBase):
"""vtkPolyData wrapper class
Parameters
----------
points : np.array
Array of point locations [npoints, (1,2 or 3)]
line_index : np.array, optional
Array of line connections [nedges, 2], by default None
poly_index : Tuple[poly_offsets, poly_connectivity]
Tuple of polygon offsets and polygon connectivity arrays.
Polygon offsets is a 1D array denoting how many points make up a face for each polygon.
Polygon connectivity is a 1D array that contains verticies of each polygon face in order, by default None
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
init_vtk : bool, optional
Initialize new VTK object from parameters (used by VTKFromFile), by default True
"""
def __init__(
self,
points: np.array,
line_index: np.array = None,
poly_index: Tuple[
np.array, np.array
] = None, # Tuple[poly_offsets, poly_connectivity]
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
init_vtk: bool = True,
):
super().__init__(file_name, file_dir)
self.vtk_obj = vtk.vtkPolyData()
self.writer = vtk.vtkXMLPolyDataWriter()
self.ext = ".vtp"
self.export_map = export_map
if init_vtk:
self.init_points(points, line_index, poly_index)
def init_points(
self,
points: np.array,
line_index: np.array = None,
poly_index: Tuple[np.array, np.array] = None,
):
assert points.ndim == 2, "Points array must have 2 dimensions [npoints, dim]"
assert points.shape[1] < 4, "Maximum of 3 spacial point arrays accepted"
# Padd for missing dimensions
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(int(points.shape[0]))
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetPoints(pts)
# Add cell array for verts
vert_cells = vtk.vtkCellArray()
for i in range(points.shape[0]):
vert_cells.InsertNextCell(1)
vert_cells.InsertCellPoint(i)
self.vtk_obj.SetVerts(vert_cells)
if line_index is not None:
self.set_lines(line_index)
if poly_index is not None:
self.set_polys(poly_index)
def get_points(self, dims: List[int] = [0, 1, 2]):
points = vtk_to_numpy(self.vtk_obj.GetPoints().GetData())
return np.concatenate([points[:, i : i + 1] for i in dims], axis=1)
def get_lines(self):
lines = vtk_to_numpy(self.vtk_obj.GetLines().GetData())
line_index = np.stack([lines[1::3], lines[2::3]], axis=1)
return line_index
def get_polys(self):
polys = self.vtk_obj.GetPolys()
# Poly data contains array [nedges, v1, v2, v3, ..., nedges, v1, v2, v3,...]
# Need to seperate offset and connectivity array for practical use
poly_connectivity = vtk_to_numpy(polys.GetConnectivityArray())
poly_offsets = vtk_to_numpy(polys.GetOffsetsArray())
return poly_offsets, poly_connectivity
def get_cells(self):
raise AttributeError("vtkPolyData has polys not cells, call get_polys instead")
def set_points(self, points: np.array):
assert points.ndim == 2, "Points array must have 2 dimensions [npoints, dim]"
assert points.shape[1] < 4, "Maximum of 3 spacial point arrays accepted"
points = np.concatenate(
[points, np.zeros((points.shape[0], 2), dtype=np.short)], axis=1
)
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(points.shape[0])
pts.SetData(numpy_to_vtk(points[:, :3]))
self.vtk_obj.SetPoints(pts)
# Add cell array for verts
vert_cells = vtk.vtkCellArray()
for i in range(points.shape[0]):
vert_cells.InsertNextCell(1)
vert_cells.InsertCellPoint(i)
self.vtk_obj.SetVerts(vert_cells)
def set_lines(self, edge_index: np.array):
assert (
edge_index.ndim == 2 and edge_index.shape[1] == 2
), "Edge index array must have 2 dimensions [npoints, 2]"
lines = vtk.vtkCellArray()
for i in range(edge_index.shape[0]):
lines.InsertNextCell(2)
lines.InsertCellPoint(edge_index[i, 0])
lines.InsertCellPoint(edge_index[i, 1])
self.vtk_obj.SetLines(lines)
def set_polys(self, poly_index: Tuple[np.array, np.array]):
assert (
len(poly_index) == 2
), "poly_index should be tuple of (poly_offsets, poly_connectivity)"
vtk_polys = vtk.vtkCellArray()
vtk_offsets = numpy_to_vtk(
poly_index[0], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_connectivity = numpy_to_vtk(
poly_index[1], array_type=vtk.vtkTypeInt64Array().GetDataType()
)
vtk_polys.SetData(vtk_offsets, vtk_connectivity)
self.vtk_obj.SetPolys(vtk_polys)
def set_cells(self):
raise AttributeError("vtkPolyData has polys not cells, call set_polys instead")
@classmethod
def init_from_obj(
cls,
vtk_obj,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
):
vtk_wrapper = VTKPolyData(
None,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
init_vtk=False,
)
vtk_wrapper.vtk_obj = vtk_obj
return vtk_wrapper
class VTKFromFile(object):
"""Reads VTK file into memory and constructs corresponding VTK object
Parameters
----------
file_path : str
File directory/name of input vtk file
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1 to 1 mapping if none is provided, by default {}
file_name : str, optional
File name of output vtk file, by default "vtk_output"
file_dir : str, optional
File directory of output vtk file, by default "."
force_legacy : bool, optional
Force a legacy only read, by default False
"""
def __new__(
cls,
file_path: str,
export_map: Dict[str, List[str]] = {},
file_name: str = "vtk_output",
file_dir: str = ".",
force_legacy: bool = False,
) -> None:
assert Path(file_path).is_file(), f"Provided VTK file {file_path} not found"
read_success = False
# Attempt to create XML reader
if not force_legacy:
try:
vtk_reader = cls.readXMLVTK(file_path)
read_success = True
except:
logger.warn("VTK file not valid XML format, will attempt legacy load")
# If failed or legacy force, create VTK Reader
if not read_success:
try:
vtk_reader = cls.readLegacyVTK(file_path)
read_success = True
except:
logger.warn("VTK file not valid VTK format")
# Hopefully VTK reader is loaded
assert read_success, "Failed to load VTK file in either XML or Legacy format"
logger.info(f"Read {Path(file_path).name} file successfully")
return cls.extractVTKObject(
vtk_reader=vtk_reader,
export_map=export_map,
file_name=file_name,
file_dir=file_dir,
)
@classmethod
def extractVTKObject(cls, vtk_reader, **kwargs) -> VTKBase:
# Get vtk object from reader
vtk_obj = vtk_reader.GetOutput()
# Create modulus.sym.VTK wrapper
if vtk_obj.__vtkname__ == "vtkImageData":
vtk_wrapper = VTKUniformGrid.init_from_obj(vtk_obj, **kwargs)
elif vtk_obj.__vtkname__ == "vtkRectilinearGrid":
vtk_wrapper = VTKRectilinearGrid.init_from_obj(vtk_obj, **kwargs)
elif vtk_obj.__vtkname__ == "vtkStructuredGrid":
vtk_wrapper = VTKStructuredGrid.init_from_obj(vtk_obj, **kwargs)
elif vtk_obj.__vtkname__ == "vtkUnstructuredGrid":
vtk_wrapper = VTKUnstructuredGrid.init_from_obj(vtk_obj, **kwargs)
elif vtk_obj.__vtkname__ == "vtkPolyData":
vtk_wrapper = VTKPolyData.init_from_obj(vtk_obj, **kwargs)
else:
raise ValueError("Unsupported vtk data type read")
logger.info(f"Loaded {vtk_obj.__vtkname__} object from file")
return vtk_wrapper
@classmethod
def readXMLVTK(cls, file_path: str):
# vtk.vtkXMLGenericDataObjectReader does not seem to work
# Could read first like of XML and check VTKFile type=...
file_path = Path(file_path)
if file_path.suffix == ".vti":
vtk_reader = vtk.vtkXMLImageDataReader()
elif file_path.suffix == ".vtr":
vtk_reader = vtk.vtkXMLRectilinearGridReader()
elif file_path.suffix == ".vts":
vtk_reader = vtk.vtkXMLStructuredGridReader()
elif file_path.suffix == ".vtu":
vtk_reader = vtk.vtkXMLUnstructuredGridReader()
elif file_path.suffix == ".vtp":
vtk_reader = vtk.vtkXMLPolyDataReader()
else:
raise ValueError("Unsupported XML VTK format")
vtk_reader.SetFileName(file_path)
vtk_reader.Update()
return vtk_reader
@classmethod
def readLegacyVTK(cls, file_path: str):
vtk_reader = vtk.vtkGenericDataObjectReader()
vtk_reader.SetFileName(file_path)
vtk_reader.ReadAllScalarsOn()
vtk_reader.ReadAllVectorsOn()
vtk_reader.Update()
return vtk_reader
def var_to_polyvtk(
var_dict: Dict[str, np.array], file_path: str, coordinates=["x", "y", "z"]
):
"""Helper method for nodes to export thier variables to a vtkPolyData file
Should be avoided when possible as other VTK formats can save on memory.
Parameters
----------
var_dict : Dict[str, np.array]
Dictionary of variables in the array format [nstates, dim]
file_path : str
File directory/name of output vtk file
coordinates : list, optional
Variable names that corresponds to point positions, by default ["x", "y", "z"]
"""
# Extract point locations
points = []
for axis in coordinates:
if axis not in var_dict.keys():
data0 = next(iter(var_dict.values()))
points.append(np.zeros((data0.shape[0], 1), dtype=np.short))
else:
points.append(var_dict[axis])
del var_dict[axis]
points = np.concatenate(points, axis=1)
# Create 1:1 export map
export_map = {}
for key in var_dict.keys():
export_map[key] = [key]
file_path = Path(file_path)
vtk_obj = VTKPolyData(
points=points,
export_map=export_map,
file_name=file_path.stem,
file_dir=file_path.parents[0],
)
vtk_obj.var_to_vtk(data_vars=var_dict)
def grid_to_vtk(var_dict: Dict[str, np.array], file_path: str, batch_index: int = 0):
"""Helper method for nodes to export image/grid data to vtkUniformData file.
Arrays should be in the numpy 'ij' layout (element [0,0] is origin)
Parameters
----------
var_dict : Dict[str, np.array]
Dictionary of variables in the array format [batch, dim, xdim, ydim, zdim]
file_path : str
File directory/name of output vtk file
batch_index : int, optional
Batch index to write to file, by default 0
"""
# convert keys to strings
var = {str(key): value for key, value in var_dict.items()}
shape = np.shape(next(iter(var.values())))
assert len(shape) > 2 and len(shape) < 6, "Input variables must be dim 3, 4, 5"
# Padd for any missing dims
bsize = shape[0]
cdim = shape[1]
grid_shape = list(shape[2:])
bounds = [[0, i - 1] for i in grid_shape]
# Flatten data and select batch
shaped_dict = {}
for key in var_dict.keys():
shaped_dict[key] = var_dict[key][batch_index]
cdim = shaped_dict[key].shape[0]
shaped_dict[key] = shaped_dict[key].reshape(cdim, -1).T
# Create 1:1 export map
export_map = {}
for key in shaped_dict.keys():
export_map[key] = [key]
file_path = Path(file_path)
vtk_obj = VTKUniformGrid(
bounds=bounds,
npoints=grid_shape,
export_map=export_map,
file_name=file_path.stem,
file_dir=file_path.parents[0],
)
vtk_obj.var_to_vtk(data_vars=shaped_dict)
| modulus-sym-main | modulus/sym/utils/io/vtk.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper functions and classes for integration
"""
import torch
import quadpy as qd
import numpy as np
def tensor_int(w, v, u=None):
# u is a N*1 tensor
# v is a N*M tensor
# w is a N*1 tensor, quadrature (cubature) weights
# N is the number of points
# M is the number of (test) functions
# return: 1*M tensor: integrals of u*v[i] if u is not None
# return: 1*M tensor: integrals of v[i] if u is None
if u is None:
return torch.einsum("ik,ij->jk", v, w)
else:
return torch.einsum("ij,ik,ij->jk", u, v, w)
# class template for the quadrature
class Quadrature:
def __init__(self, scheme, trans, jac):
# scheme is the quadpy scheme
# trans is the transform from reference domain to target domain
# jac is the jacobian of trans, SHOULD BE 1D NUMPY ARRAY!
# points_ref and weights_ref are on the reference domain
self.scheme = scheme
self.trans = trans
self.jac = jac
self.points_ref = scheme.points
self.weights_ref = scheme.weights
self.make_numpy()
# self.make_tensor()
self.N_points = self.points_numpy.shape[0]
def make_numpy(self):
# points_numpy and weights_numpy are N*d numpy arrays, where N is the # of points and d is the dimension
# The approximated integral value is given by np.dot(f(p[:,0],p[:,1],p[:,2]),w)
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = (
self.weights_ref * self.jac
) # check here, should be 1D*1D numpy array, or 1D*constant
def make_tensor(self):
# points_tensor and weights_tensor are N*d tf tensors, where N is the # of points and d is the dimension
self.points_tensor = torch.tensor(self.points_numpy, dtype=torch.float32)
self.weights_tensor = torch.tensor(
self.weights_numpy.reshape((-1, 1)), dtype=torch.float32
)
class Quadrature_Data:
def __init__(self, points_numpy, weights_numpy):
self.points_numpy = points_numpy
self.weights_numpy = weights_numpy
# self.make_tensor()
def make_tensor(self):
# points_tensor and weights_tensor are N*d tf tensors, where N is the # of points and d is the dimension
self.points_tensor = torch.tensor(self.points_numpy, dtype=torch.float32)
self.weights_tensor = torch.tensor(
self.weights_numpy.reshape((-1, 1)), dtype=torch.float32
)
def Quad_Collection(quad_class, paras):
points_tmp = []
weights_tmp = []
for para in paras:
quad_tmp = quad_class(*para)
points_tmp.append(quad_tmp.points_numpy)
weights_tmp.append(quad_tmp.weights_numpy)
return Quadrature_Data(np.vstack(points_tmp), np.hstack(weights_tmp))
# 1D classes. (Quad_Line can be used in nD)
class Quad_Line(Quadrature):
def __init__(self, p0, p1, n, scheme_fcn=qd.c1.gauss_legendre):
self.p0 = np.reshape(np.array(p0), (1, -1))
self.p1 = np.reshape(np.array(p1), (1, -1))
super().__init__(
scheme=scheme_fcn(n),
trans=lambda t: 0.5 * (self.p0 + self.p1)
+ 0.5 * (self.p1 - self.p0) * np.reshape(t, (-1, 1)),
jac=np.linalg.norm(self.p1 - self.p0) / 2,
)
# 2D curves
class Quad_Circle(Quadrature):
def __init__(self, r, c, n, scheme_fcn=qd.u2.get_good_scheme):
self.r = np.array(r)
self.c = np.array(c)
def my_trans(x):
rr = np.multiply.outer(self.r, x)
rr = np.swapaxes(rr, 0, -2)
return rr + self.c
super().__init__(scheme=scheme_fcn(n), trans=my_trans, jac=2 * np.pi * self.r)
# 2D domains
class Quad_Tri(Quadrature):
def __init__(self, v, n, scheme_fcn=qd.t2.get_good_scheme):
from quadpy.tn._helpers import get_vol
self.v = np.array(v) # 3x2 numpy array
if self.v.shape != (3, 2):
self.v = self.v.T
assert self.v.shape == (3, 2), "Vertices must be a 3 by 2 list or numpy array!"
self.vol = get_vol(self.v)
super().__init__(
scheme=scheme_fcn(n), trans=lambda x: x.T @ self.v, jac=self.vol
)
class Quad_Disk(Quadrature):
def __init__(self, r, c, n, scheme_fcn=qd.s2.get_good_scheme):
self.r = np.array(r)
self.c = np.array(c)
def my_trans(x):
rr = np.multiply.outer(self.r, x.T)
rr = np.swapaxes(rr, 0, -2)
return rr + self.c
super().__init__(scheme=scheme_fcn(n), trans=my_trans, jac=np.pi * self.r**2)
class Quad_Rect(Quadrature):
"""
The points are specified in an array of shape (2, 2, ...) such that arr[0][0] is the lower left corner, arr[1][1] the upper right, and set region_type=False.
If your c2 has its sides aligned with the coordinate axes, you can use v=[[x0, x1], [y0, y1]], and set region_type=True (default).
"""
def __init__(self, v, n, region_type=True, scheme_fcn=qd.c2.get_good_scheme):
from quadpy.cn._helpers import transform, get_detJ
if region_type:
from quadpy.c2 import rectangle_points
self.v = rectangle_points(*v)
else:
self.v = v
super().__init__(
scheme=scheme_fcn(n),
trans=lambda x: transform(x, self.v),
jac=lambda x: np.abs(get_detJ(x, self.v))
* 2 ** np.prod(len(self.v.shape) - 1),
)
def make_numpy(self):
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = self.weights_ref * self.jac(
self.points_ref
) # check here, should be 1D*1D numpy array, or 1D*constant
# 3D surfaces
class Quad_Sphere(Quadrature):
def __init__(self, r, c, n, scheme_fcn=qd.u3.get_good_scheme):
self.r = np.array(r)
self.c = np.array(c)
super().__init__(
scheme=scheme_fcn(n),
trans=lambda x: x.T * self.r + self.c,
jac=4 * np.pi * self.r**2,
)
# 3D domain
class Quad_Ball(Quadrature):
def __init__(self, r, c, n, scheme_fcn=qd.s3.get_good_scheme):
assert (
n <= 7
), "The degree of the cubature is not more than 7. Otherwise use nD ball scheme!"
self.r = np.array(r)
self.c = np.array(c)
def my_trans(x):
rr = np.multiply.outer(self.r, x.T)
rr = np.swapaxes(rr, 0, -2)
return rr + self.c
super().__init__(
scheme=scheme_fcn(n), trans=my_trans, jac=4 / 3 * np.pi * self.r**3
)
class Quad_Tet(Quadrature):
def __init__(self, v, n, scheme_fcn=qd.t3.get_good_scheme):
assert (
n <= 14
), "The degree of the cubature is not more than 14. Otherwise use nD simplex scheme!"
self.v = np.array(v)
if self.v.shape != (4, 3):
self.v = self.v.T
assert self.v.shape == (4, 3), "Vertices must be a 4 by 3 list or numpy array!"
from quadpy.tn._helpers import transform, get_vol
self.vol = get_vol(self.v)
super().__init__(
scheme=scheme_fcn(n), trans=lambda x: transform(x, self.v.T).T, jac=self.vol
)
class Quad_Cube(Quadrature):
def __init__(self, v, n, region_type=True, scheme_fcn=qd.c3.get_good_scheme):
from quadpy.cn._helpers import transform, get_detJ
assert (
n <= 11
), "The degree of the cubature is not more than 11. Otherwise use nD cube scheme!"
if region_type:
from quadpy.c3 import cube_points
self.v = cube_points(*v)
else:
self.v = v
super().__init__(
scheme=scheme_fcn(n),
trans=lambda x: transform(x, self.v),
jac=lambda x: np.abs(get_detJ(x, self.v))
* 2 ** np.prod(len(self.v.shape) - 1),
)
def make_numpy(self):
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = self.weights_ref * self.jac(
self.points_ref
) # check here, should be 1D*1D numpy array, or 1D*constant
class Quad_Pyramid(Quadrature):
def __init__(self, v, scheme_fcn=qd.p3.felippa_5):
from quadpy.p3._helpers import _transform, _get_det_J
self.v = v
super().__init__(
scheme=scheme_fcn(),
trans=lambda x: _transform(x.T, self.v).T,
jac=lambda x: np.abs(_get_det_J(self.v, x.T)),
)
def make_numpy(self):
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = self.weights_ref * self.jac(
self.points_ref
) # check here, should be 1D*1D numpy array, or 1D*constant
class Quad_Wedge(Quadrature):
def __init__(self, v, scheme_fcn=qd.w3.felippa_6):
from quadpy.w3._helpers import _transform, _get_detJ
self.v = np.array(v)
super().__init__(
scheme=scheme_fcn(),
trans=lambda x: _transform(x.T, self.v).T,
jac=lambda x: np.abs(_get_detJ(x.T, self.v)),
)
def make_numpy(self):
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = self.weights_ref * self.jac(
self.points_ref
) # check here, should be 1D*1D numpy array, or 1D*constant
# nD manifold
class Quad_nD_Sphere(Quadrature):
def __init__(self, r, c, dim, scheme_fcn=qd.un.dobrodeev_1978):
import ndim
self.r = np.array(r)
self.c = np.array(c)
self.dim = dim
def my_trans(x):
rr = np.multiply.outer(self.r, x)
rr = np.swapaxes(rr, 0, -2)
return rr + self.c
self.vol = ndim.nsphere.volume(self.dim, r=self.r)
super().__init__(scheme=scheme_fcn(self.dim), trans=my_trans, jac=self.vol)
class Quad_nD_Simplex(Quadrature):
def __init__(self, v, dim, n, scheme_fcn=qd.tn.grundmann_moeller):
from quadpy.tn._helpers import transform, get_vol
self.v = np.array(v)
self.dim = dim
self.vol = get_vol(self.v)
super().__init__(
scheme=scheme_fcn(self.dim, n),
trans=lambda x: transform(x, self.v.T).T,
jac=self.vol,
)
class Quad_nD_Ball(Quadrature):
def __init__(self, r, c, dim, scheme_fcn=qd.sn.dobrodeev_1970):
import ndim
self.r = np.array(r)
self.c = np.array(c)
self.dim = dim
self.vol = ndim.nball.volume(self.dim, r=self.r, symbolic=False)
def my_trans(x):
rr = np.multiply.outer(self.r, x.T)
rr = np.swapaxes(rr, 0, -2)
return rr + self.c
super().__init__(scheme=scheme_fcn(self.dim), trans=my_trans, jac=self.vol)
class Quad_nD_Cube(Quadrature):
def __init__(self, v, dim, region_type=True, scheme_fcn=qd.cn.stroud_cn_3_3):
from quadpy.cn._helpers import transform, get_detJ
self.dim = dim
if region_type:
from quadpy.cn._helpers import ncube_points
self.v = ncube_points(*v)
else:
self.v = v
super().__init__(
scheme=scheme_fcn(self.dim),
trans=lambda x: transform(x, self.v),
jac=lambda x: 2 ** np.prod(len(self.v.shape) - 1)
* np.abs(get_detJ(x, self.v)),
)
def make_numpy(self):
self.points_numpy = self.trans(self.points_ref)
self.weights_numpy = self.weights_ref * self.jac(
self.points_ref
) # check here, should be 1D*1D numpy array, or 1D*constant
# 2D cubature based on mesh
def domain_weights_and_points_2D(P, T, n=5, scheme=None):
# P is the point info
# T is the triangle info
# n is the cubature order, if applicable
T = T.astype(np.int)
Nt = T.shape[0]
if scheme is None:
scheme = qd.t2._lether.lether(n)
p_ref = scheme.points
w_ref = scheme.weights
xy_tmp = []
w_tmp = []
for i in range(1, Nt):
idp = T[i, :]
tri = np.vstack((P[idp[0], :], P[idp[1], :], P[idp[2], :]))
S = 0.5 * np.abs(np.linalg.det(np.hstack((tri, np.ones((3, 1))))))
xy_tmp.append(p_ref.T @ tri)
w_tmp.append(S * w_ref)
xy = np.vstack(xy_tmp)
w = np.hstack(w_tmp)
return w.astype(np.float32), xy.astype(np.float32)
# 3D cubature based on mesh
def domain_weights_and_points_3D(P, T, n=5, scheme=None):
# P is the point info
# T is the triangle info
# n is the cubature order, if applicable
T = T.astype(np.int)
Nt = T.shape[0]
if scheme is None:
scheme = qd.t3.get_good_scheme(n)
p_ref = scheme.points
w_ref = scheme.weights
xyz_tmp = []
w_tmp = []
for i in range(0, Nt):
idp = T[i, :]
tet = np.vstack((P[idp[0], :], P[idp[1], :], P[idp[2], :], P[idp[3], :]))
V = np.abs(np.linalg.det(np.hstack((tet, np.ones((4, 1)))))) / 6
xyz_tmp.append(p_ref.T @ tet)
w_tmp.append(V * w_ref)
xyz = np.vstack(xyz_tmp)
w = np.hstack(w_tmp)
return w.astype(np.float32), xyz.astype(np.float32)
# Householder reflector
def Householder_reflector(u0, v0):
# u and v are unit vectors
# Hu=v, Hv=u
u = u0.reshape((-1, 1)) / np.linalg.norm(u0)
v = v0.reshape((-1, 1)) / np.linalg.norm(v0)
return np.eye(3) + (u @ v.T + v @ u.T - u @ u.T - v @ v.T) / (1 - u.T @ v)
| modulus-sym-main | modulus/sym/utils/vpinn/integral.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .integral import *
from .test_functions import *
| modulus-sym-main | modulus/sym/utils/vpinn/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Helper functions for and classes for making test functions used in VPINNs
"""
import torch
import numpy as np
import sympy as sp
from sympy import I
import random, itertools
from modulus.sym.utils.sympy.torch_printer import torch_lambdify
x, y, z = sp.symbols("x, y ,z", real=True)
class meta_test_function:
def __init__(self, name, interval1d, sympy_fcn, is_real=True):
self.name = name
self.interval1d = interval1d
self.sympy_fcn = sympy_fcn
self.is_real = is_real
def my_trig(n, x):
return sp.exp(I * sp.pi * (n + 1) * x)
Legendre_test = meta_test_function("Legendre", [-1, 1], sp.legendre)
Chebyshev_T_test = meta_test_function("Chebyshev_T", [-1, 1], sp.chebyshevt)
Chebyshev_U_test = meta_test_function("Chebyshev_U", [-1, 1], sp.chebyshevu)
Trig_test = meta_test_function("Trig", [-1, 1], my_trig, False)
class Degree_nk:
def __init__(self, dim):
self.dim = dim
self.L = 0
self.last_degrees = [None, None]
def __iter__(self):
return self
def __next__(self):
dim = self.dim
if self.L == 0:
degrees = np.array([np.zeros(dim, dtype=int)])
else:
degrees = []
mask0 = np.ones(len(self.last_degrees[0]), dtype=bool)
if self.L > 1:
mask1 = np.ones(len(self.last_degrees[1]), dtype=bool)
for i in range(dim):
deg = self.last_degrees[0][mask0]
deg[:, i] += 1
degrees.append(deg)
mask0 &= self.last_degrees[0][:, i] == 0
if self.L > 1:
mask1 &= self.last_degrees[1][:, i] == 0
degrees = np.concatenate(degrees)
self.last_degrees[1] = self.last_degrees[0]
self.last_degrees[0] = degrees
self.L += 1
return degrees
class Test_Function:
def __init__(
self,
name_ord_dict=None,
box=None,
diff_list=None,
weight_fcn=None,
simplify=None,
):
# name_ord_dict: list of name and order of test functions. E.G. {Legendre_test:[1,2,3], sin_test:[1,5]}
# 0 order Legendre is recommended, as it is constant 1, which is very helpful in most problems
# box: the lower and upper limit of the domain. It also gives the dimension of the domain and functions.
# diff_list: the list of derivatives of test functions need to return, E.G. [[1,0,0],[0,2,0],'grad','Delta']
if diff_list is None:
diff_list = ["grad", "Delta"]
if box is None:
box = [[0, 0], [1, 1]]
if name_ord_dict is None:
name_ord_dict = {Legendre_test: [0, 1], Trig_test: [0, 1, 2, 3]}
if weight_fcn is None:
weight_fcn = 1.0
if simplify is None:
simplify = False
self.name_ord_dict = name_ord_dict
self.lb = box[0]
self.ub = box[1]
self.diff_list = diff_list
self.weight_fcn = weight_fcn
self.simplify = simplify
if self.simplify:
self.simplify_fcn = sp.simplify
else:
self.simplify_fcn = lambda x: x
self.dim = len(self.lb)
self.initialize()
self.make_fcn_list()
self.lambdify_fcn_list()
def initialize(self):
self.test_sympy_dict = {"v": []}
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"] = []
if self.dim >= 2:
self.test_sympy_dict["vy"] = []
if self.dim == 3:
self.test_sympy_dict["vz"] = []
elif k == "Delta":
self.test_sympy_dict["dv"] = []
else:
my_str = "v" + "x" * k[0]
if self.dim >= 2:
my_str += "y" * k[1]
if self.dim == 3:
my_str += "z" * k[2]
self.test_sympy_dict[my_str] = []
def generator(self, test_class):
ord_list = self.name_ord_dict[test_class]
if self.dim == 1:
x_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[0] - self.lb[0]) * (x - self.lb[0])
for k in ord_list:
if test_class.is_real:
yield self.simplify_fcn(
self.weight_fcn * test_class.sympy_fcn(k, x_trans)
)
else:
for f in test_class.sympy_fcn(k, x_trans).as_real_imag():
yield self.simplify_fcn(self.weight_fcn * f)
elif self.dim == 2:
x_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[0] - self.lb[0]) * (x - self.lb[0])
y_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[1] - self.lb[1]) * (y - self.lb[1])
ev = itertools.islice(Degree_nk(self.dim), ord_list[0], ord_list[-1] + 1)
for _ in ord_list:
ord = next(ev)
for k in ord:
if test_class.is_real:
yield self.simplify_fcn(
self.weight_fcn
* test_class.sympy_fcn(k[0], x_trans)
* test_class.sympy_fcn(k[1], y_trans)
)
else:
for fx in test_class.sympy_fcn(k[0], x_trans).as_real_imag():
for fy in test_class.sympy_fcn(
k[1], y_trans
).as_real_imag():
yield self.simplify_fcn(self.weight_fcn * fx * fy)
else:
x_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[0] - self.lb[0]) * (x - self.lb[0])
y_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[1] - self.lb[1]) * (y - self.lb[1])
z_trans = test_class.interval1d[0] + (
test_class.interval1d[1] - test_class.interval1d[0]
) / (self.ub[2] - self.lb[2]) * (z - self.lb[2])
ev = itertools.islice(Degree_nk(self.dim), ord_list[0], ord_list[-1] + 1)
for _ in ord_list:
ord = next(ev)
for k in ord:
if test_class.is_real:
yield self.simplify_fcn(
self.weight_fcn
* test_class.sympy_fcn(k[0], x_trans)
* test_class.sympy_fcn(k[1], y_trans)
* test_class.sympy_fcn(k[2], z_trans)
)
else:
for fx in test_class.sympy_fcn(k[0], x_trans).as_real_imag():
for fy in test_class.sympy_fcn(
k[1], y_trans
).as_real_imag():
for fz in test_class.sympy_fcn(
k[2], z_trans
).as_real_imag():
yield self.simplify_fcn(
self.weight_fcn * fx * fy * fz
)
return
def make_fcn_list(self):
if self.dim == 1:
for name in self.name_ord_dict.keys():
for fcn in self.generator(name):
self.test_sympy_dict["v"].append(fcn)
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"].append(
self.simplify_fcn(sp.diff(fcn, x))
)
elif k == "Delta":
self.test_sympy_dict["dv"].append(
self.simplify_fcn(sp.diff(fcn, x, 2))
)
else:
self.test_sympy_dict["v" + "x" * k[0]].append(
self.simplify_fcn(sp.diff(fcn, x, k[0]))
)
elif self.dim == 2:
for name in self.name_ord_dict.keys():
for fcn in self.generator(name):
self.test_sympy_dict["v"].append(fcn)
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"].append(
self.simplify_fcn(sp.diff(fcn, x))
)
self.test_sympy_dict["vy"].append(
self.simplify_fcn(sp.diff(fcn, y))
)
elif k == "Delta":
self.test_sympy_dict["dv"].append(
self.simplify_fcn(
sp.diff(fcn, x, 2) + sp.diff(fcn, y, 2)
)
)
else:
self.test_sympy_dict["v" + "x" * k[0] + "y" * k[1]].append(
self.simplify_fcn(sp.diff(fcn, x, k[0], y, k[1]))
)
elif self.dim == 3:
for name in self.name_ord_dict.keys():
for fcn in self.generator(name):
self.test_sympy_dict["v"].append(fcn)
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"].append(
self.simplify_fcn(sp.diff(fcn, x))
)
self.test_sympy_dict["vy"].append(
self.simplify_fcn(sp.diff(fcn, y))
)
self.test_sympy_dict["vz"].append(
self.simplify_fcn(sp.diff(fcn, z))
)
elif k == "Delta":
self.test_sympy_dict["dv"].append(
self.simplify_fcn(
sp.diff(fcn, x, 2)
+ sp.diff(fcn, y, 2)
+ sp.diff(fcn, z, 2)
)
)
else:
self.test_sympy_dict[
"v" + "x" * k[0] + "y" * k[1] + "z" * k[2]
].append(
self.simplify_fcn(
sp.diff(fcn, x, k[0], y, k[1], z, k[2])
)
)
self.num_fcn = len(self.test_sympy_dict["v"])
@staticmethod
def lambdify(f_sympy, var_list):
dim = len(var_list)
if f_sympy.is_number:
if dim == 1:
return lambda x0, f_sympy=f_sympy: torch.zeros_like(x0) + float(f_sympy)
elif dim == 2:
return lambda x0, y0, f_sympy=f_sympy: torch.zeros_like(x0) + float(
f_sympy
)
elif dim == 3:
return lambda x0, y0, z0, f_sympy=f_sympy: torch.zeros_like(x0) + float(
f_sympy
)
else:
return torch_lambdify(f_sympy, var_list, separable=True)
def lambdify_fcn_list(self):
self.test_lambda_dict = {}
if self.dim == 1:
var_list = [x]
elif self.dim == 2:
var_list = [x, y]
elif self.dim == 3:
var_list = [x, y, z]
for k in self.test_sympy_dict.keys():
self.test_lambda_dict[k] = []
for f_sympy in self.test_sympy_dict[k]:
self.test_lambda_dict[k].append(
Test_Function.lambdify(f_sympy, var_list)
) ### use torch_lambdify
def eval_test(self, ind, x, y=None, z=None):
# return N*M tensor
# N is the number of points
# M is the number of test functions
tmp_list = []
for f in self.test_lambda_dict[ind]:
if self.dim == 1:
tmp_list.append(f(x))
elif self.dim == 2:
assert y is not None, "please provide tensor y"
tmp_list.append(f(x, y))
elif self.dim == 3:
assert (y is not None) and (
z is not None
), "please provide tensor y and z"
tmp_list.append(f(x, y, z))
return torch.cat(tmp_list, 1) ### tf.concat -> torch.cat
class Vector_Test:
def __init__(self, v1, v2, v3=None, mix=None):
# 0<mix<1 is the percentage of how many test functions to generate.
# mix>=1 is the number of test functions to generate.
# self.dim: dimension of functions
# self.num: number of total functions at hand
# self.num_output: number of output functions
self.test_lambda_dict = {}
self.dim = v1.dim
self.v1 = v1
self.v2 = v2
if v3 is None:
self.num = 2
self.num_fcn = self.v1.num_fcn * self.v2.num_fcn
else:
self.num = 3
self.v3 = v3
self.num_fcn = self.v1.num_fcn * self.v2.num_fcn * self.v3.num_fcn
self.mix = mix
self.sample_vector_test()
def sample_vector_test(self):
mix = self.mix
if (mix is None) or (mix == "all") or (mix == 1):
self.mix = "all"
self.num_output = self.num_fcn
if self.num == 2:
self.output_ind = [
k
for k in itertools.product(
range(self.v1.num_fcn), range(self.v2.num_fcn)
)
]
else:
self.output_ind = [
k
for k in itertools.product(
range(self.v1.num_fcn),
range(self.v2.num_fcn),
range(self.v3.num_fcn),
)
]
elif 0 < mix < 1:
self.mix = mix
self.num_output = int(self.mix * self.num_fcn) if self.mix > 0 else 1
if self.num == 2:
self.output_ind = random.sample(
set(
itertools.product(
range(self.v1.num_fcn), range(self.v2.num_fcn)
)
),
self.num_output,
)
else:
self.output_ind = random.sample(
set(
itertools.product(
range(self.v1.num_fcn),
range(self.v2.num_fcn),
range(self.v3.num_fcn),
)
),
self.num_output,
)
elif mix >= 1:
self.mix = int(mix)
self.num_output = self.mix
if self.num == 2:
self.output_ind = random.sample(
set(
itertools.product(
range(self.v1.num_fcn), range(self.v2.num_fcn)
)
),
self.num_output,
)
else:
self.output_ind = random.sample(
set(
itertools.product(
range(self.v1.num_fcn),
range(self.v2.num_fcn),
range(self.v3.num_fcn),
)
),
self.num_output,
)
def eval_test(self, ind, x, y=None, z=None):
# return a list of N*M tensor
# N is the number of points
# M is the number of test functions
# Usage:
# v = Vector_Test(v1, v2)
# v_x, v_y = v.eval_test('v', x_tensor, y_tensor)
if self.dim == 1:
var_list = [x]
elif self.dim == 2:
var_list = [x, y]
else:
var_list = [x, y, z]
v1_val = self.v1.eval_test(ind, *var_list)
v2_val = self.v2.eval_test(ind, *var_list)
if self.num == 2:
# Cannot use cuda graphs because of this
x_ind = torch.tensor([k[0] for k in self.output_ind], device=x.device)
y_ind = torch.tensor([k[1] for k in self.output_ind], device=x.device)
return v1_val.index_select(1, x_ind), v2_val.index_select(1, y_ind)
else:
# Cannot use cuda graphs because of this
v3_val = self.v3.eval_test(ind, *var_list)
x_ind = torch.tensor([k[0] for k in self.output_ind], device=x.device)
y_ind = torch.tensor([k[1] for k in self.output_ind], device=x.device)
z_ind = torch.tensor([k[2] for k in self.output_ind], device=x.device)
return (
v1_val.index_select(1, x_ind),
v2_val.index_select(1, y_ind),
v3_val.index_select(1, z_ind),
)
class RBF_Function:
def __init__(
self, dim=2, RBF_name=None, diff_list=None, weight_fcn=None, simplify=None
):
# center is N*d array, d is dimension.
# eps is 1D array with length N.
if RBF_name is None:
self.RBF_name = "Gaussian"
else:
self.RBF_name = RBF_name
if diff_list is None:
diff_list = ["grad", "Delta"]
if weight_fcn is None:
weight_fcn = 1.0
if simplify is None:
simplify = False
self.simplify = simplify
if self.simplify:
self.simplify_fcn = sp.simplify
else:
self.simplify_fcn = lambda x: x
self.dim = dim
self.diff_list = diff_list
self.weight_fcn = weight_fcn
if self.dim == 1:
self.r_sympy = sp.Abs(x)
elif self.dim == 2:
self.r_sympy = sp.sqrt(x**2 + y**2)
else:
self.r_sympy = sp.sqrt(x**2 + y**2 + z**2)
if self.RBF_name == "Inverse quadratic":
self.RBF_prototype = 1 / (1 + self.r_sympy**2)
elif self.RBF_name == "Inverse multiquadric":
self.RBF_prototype = 1 / sp.sqrt(1 + self.r_sympy**2)
else:
self.RBF_prototype = sp.exp(-self.r_sympy**2)
self.initialize()
self.make_fcn_list()
self.lambdify_fcn_list()
def initialize(self):
self.test_sympy_dict = {"v": []}
self.pow_dict = {"v": 0}
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"] = []
self.pow_dict["vx"] = 1
if self.dim >= 2:
self.test_sympy_dict["vy"] = []
self.pow_dict["vy"] = 1
if self.dim == 3:
self.test_sympy_dict["vz"] = []
self.pow_dict["vz"] = 1
elif k == "Delta":
self.test_sympy_dict["dv"] = []
self.pow_dict["dv"] = 2
else:
my_str = "v" + "x" * k[0]
if self.dim >= 2:
my_str += "y" * k[1]
if self.dim == 3:
my_str += "z" * k[2]
self.test_sympy_dict[my_str] = []
self.pow_dict[my_str] = sum(k)
def make_fcn_list(self):
self.test_sympy_dict["v"] = self.RBF_prototype
if self.dim == 1:
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x)
)
elif k == "Delta":
self.test_sympy_dict["dv"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x, 2)
)
else:
self.test_sympy_dict["v" + "x" * k[0]] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x, k[0])
)
elif self.dim == 2:
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x)
)
self.test_sympy_dict["vy"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, y)
)
elif k == "Delta":
self.test_sympy_dict["dv"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x, 2)
+ sp.diff(self.RBF_prototype, y, 2)
)
else:
self.test_sympy_dict[
"v" + "x" * k[0] + "y" * k[1]
] = self.simplify_fcn(sp.diff(self.RBF_prototype, x, k[0], y, k[1]))
else:
for k in self.diff_list:
if k == "grad":
self.test_sympy_dict["vx"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x)
)
self.test_sympy_dict["vy"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, y)
)
self.test_sympy_dict["vz"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, z)
)
elif k == "Delta":
self.test_sympy_dict["dv"] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x, 2)
+ sp.diff(self.RBF_prototype, y, 2)
+ sp.diff(self.RBF_prototype, z, 2)
)
else:
self.test_sympy_dict[
"v" + "x" * k[0] + "y" * k[1] + "z" * k[2]
] = self.simplify_fcn(
sp.diff(self.RBF_prototype, x, k[0], y, k[1], z, k[2])
)
def lambdify_fcn_list(self):
self.test_lambda_dict = {}
if self.dim == 1:
var_list = x
elif self.dim == 2:
var_list = [x, y]
elif self.dim == 3:
var_list = [x, y, z]
for k in self.test_sympy_dict.keys():
f_sympy = self.test_sympy_dict[k]
self.test_lambda_dict[k] = torch_lambdify(f_sympy, var_list, separable=True)
def eval_test(
self,
ind,
x,
y=None,
z=None,
x_center=None,
y_center=None,
z_center=None,
eps=None,
):
# return N*M tensor
# N is the number of points
# M is the number of test functions
# eps is a real number or tensor
# all input tensors are column vectors
assert x_center is not None, "please provide x_center"
if eps is None:
eps = torch.full(
[1, x_center.shape[0]], 10.0, device=x.device
) ### tf.fill -> torch.full
elif isinstance(eps, int) or isinstance(eps, float):
eps = torch.full([1, x_center.shape[0]], np.float32(eps), device=x.device)
elif isinstance(eps, torch.Tensor):
eps = torch.reshape(eps, [1, -1])
x_center_t = torch.transpose(
x_center, 0, 1
) ### tf.transpose -> torch.transpose
if self.dim == 1:
x_new = eps * (x - x_center_t)
elif self.dim == 2:
y_center_t = torch.transpose(
y_center, 0, 1
) ### tf.transpose -> torch.transpose
x_new = eps * (x - x_center_t)
y_new = eps * (y - y_center_t)
else:
y_center_t = torch.transpose(
y_center, 0, 1
) ### tf.transpose -> torch.transpose
z_center_t = torch.transpose(
z_center, 0, 1
) ### tf.transpose -> torch.transpose
x_new = eps * (x - x_center_t)
y_new = eps * (y - y_center_t)
z_new = eps * (z - z_center_t)
fcn = self.test_lambda_dict[ind]
p = self.pow_dict[ind]
if self.dim == 1:
return fcn(x_new) * torch.pow(eps, p) ### tf.pow -> torch.pow
elif self.dim == 2:
return fcn(x_new, y_new) * torch.pow(eps, p) ### tf.pow -> torch.pow
else:
return fcn(x_new, y_new, z_new) * torch.pow(eps, p) ### tf.pow -> torch.pow
| modulus-sym-main | modulus/sym/utils/vpinn/test_functions.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from typing import List
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class RadialBasisArch(Arch):
"""
Radial Basis Neural Network.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
bounds : Dict[str, Tuple[float, float]]
Bounds to to randomly generate radial basis functions in.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
nr_centers : int = 128
number of radial basis functions to use.
sigma : float = 0.1
Sigma in radial basis kernel.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
bounds: Dict[str, List[float]],
detach_keys: List[Key] = [],
nr_centers: int = 128,
sigma: float = 0.1,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
out_features = sum(self.output_key_dict.values())
self.nr_centers = nr_centers
self.sigma = sigma
self.centers = nn.Parameter(
torch.empty(nr_centers, len(bounds)), requires_grad=False
)
with torch.no_grad():
for idx, bound in enumerate(bounds.values()):
self.centers[:, idx].uniform_(bound[0], bound[1])
self.fc_layer = layers.FCLayer(
nr_centers,
out_features,
activation_fn=layers.Activation.IDENTITY,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
# no op since no scales
x = self.process_input(x, input_dict=self.input_key_dict, dim=-1)
x = x.unsqueeze(-2)
# no need to unsqueeze(0), we could and we have to rely on broadcast to
# make BatchedTensor work
centers = self.centers
radial_activation = torch.exp(
-0.5 * torch.square(torch.norm(centers - x, p=2, dim=-1) / self.sigma)
)
x = self.fc_layer(radial_activation)
x = self.process_output(x) # no op
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars, self.input_key_dict.keys(), self.detach_key_dict, -1
)
shape = (x.size(0), self.nr_centers, x.size(1))
x = x.unsqueeze(1).expand(shape)
centers = self.centers.expand(shape)
radial_activation = torch.exp(
-0.5 * torch.square(torch.norm(centers - x, p=2, dim=-1) / self.sigma)
)
x = self.fc_layer(radial_activation)
return self.prepare_output(x, self.output_key_dict, -1)
| modulus-sym-main | modulus/sym/models/radial_basis.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch import Tensor
from typing import Dict, List, Tuple
import modulus.sym.models.fully_connected as fully_connected
import modulus.sym.models.layers as layers
from modulus.sym.models.layers import Activation
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class FourierNetArch(Arch):
"""Fourier encoding fully-connected neural network.
This network is a fully-connected neural network that encodes the input features
into Fourier space using sinesoidal activation functions. This helps reduce spectal
bias during training.
Parameters
----------
input_keys : List[Key]
Input key list.
output_keys : List[Key]
Output key list.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
frequencies : Tuple, optional
A tuple that describes the Fourier encodings to use any inputs in the list
`['x', 'y', 'z', 't']`.
The first element describes the type of frequency encoding
with options, `'gaussian', 'full', 'axis', 'diagonal'`, by default
("axis", [i for i in range(10)])
:obj:`'gaussian'` samples frequency of Fourier series from Gaussian.
:obj:`'axis'` samples along axis of spectral space with the given list range of
frequencies.
:obj:`'diagonal'` samples along diagonal of spectral space with the given list range
of frequencies.
:obj:`'full'` samples along entire spectral space for all combinations of frequencies
in given list.
frequencies_params : Tuple, optional
Same as `frequencies` used for encodings of any inputs not in the list
`['x', 'y', 'z', 't']`.
By default ("axis", [i for i in range(10)])
activation_fn : Activation, optional
Activation function, by default :obj:`Activation.SILU`
layer_size : int, optional
Layer size for every hidden layer of the model, by default 512
nr_layers : int, optional
Number of hidden layers of the model, by default 6
skip_connections : bool, optional
Apply skip connections every 2 hidden layers, by default False
weight_norm : bool, optional
Use weight norm on fully connected layers, by default True
adaptive_activations : bool, optional
Use an adaptive activation functions, by default False
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size]`
- Output variable tensor shape: :math:`[N, size]`
Example
-------
Gaussian frequencies
>>> std = 1.0; num_freq = 10
>>> model = .fourier_net.FourierNetArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> frequencies=("gaussian", std, num_freq))
Diagonal frequencies
>>> frequencies = [1.0, 2.0, 3.0, 4.0]
>>> model = .fourier_net.FourierNetArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> frequencies=("diagonal", frequencies))
Full frequencies
>>> frequencies = [1.0, 2.0, 3.0, 4.0]
>>> model = .fourier_net.FourierNetArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> frequencies=("full", frequencies))
Note
----
For information regarding adaptive activations please refer to
https://arxiv.org/abs/1906.01170.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
frequencies: Tuple = ("axis", [i for i in range(10)]),
frequencies_params: Tuple = ("axis", [i for i in range(10)]),
activation_fn: Activation = Activation.SILU,
layer_size: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
weight_norm: bool = True,
adaptive_activations: bool = False,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
if frequencies_params is None:
frequencies_params = frequencies
self.xyzt_var = [x for x in self.input_key_dict if x in ["x", "y", "z", "t"]]
# Prepare slice index
xyzt_slice_index = self.prepare_slice_index(self.input_key_dict, self.xyzt_var)
self.register_buffer("xyzt_slice_index", xyzt_slice_index, persistent=False)
self.params_var = [
x for x in self.input_key_dict if x not in ["x", "y", "z", "t"]
]
params_slice_index = self.prepare_slice_index(
self.input_key_dict, self.params_var
)
self.register_buffer("params_slice_index", params_slice_index, persistent=False)
in_features_xyzt = sum(
(v for k, v in self.input_key_dict.items() if k in self.xyzt_var)
)
in_features_params = sum(
(v for k, v in self.input_key_dict.items() if k in self.params_var)
)
in_features = in_features_xyzt + in_features_params
out_features = sum(self.output_key_dict.values())
if in_features_xyzt > 0:
self.fourier_layer_xyzt = layers.FourierLayer(
in_features=in_features_xyzt, frequencies=frequencies
)
in_features += self.fourier_layer_xyzt.out_features()
else:
self.fourier_layer_xyzt = None
if in_features_params > 0:
self.fourier_layer_params = layers.FourierLayer(
in_features=in_features_params, frequencies=frequencies_params
)
in_features += self.fourier_layer_params.out_features()
else:
self.fourier_layer_params = None
self.fc = fully_connected.FullyConnectedArchCore(
in_features=in_features,
layer_size=layer_size,
out_features=out_features,
nr_layers=nr_layers,
skip_connections=skip_connections,
activation_fn=activation_fn,
adaptive_activations=adaptive_activations,
weight_norm=weight_norm,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.slice_input(x, self.xyzt_slice_index, dim=-1)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.slice_input(x, self.params_slice_index, dim=-1)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
x = self.fc(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.prepare_input(
in_vars,
self.xyzt_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.prepare_input(
in_vars,
self.params_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
x = self.fc(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/fourier_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
from typing import Dict, Tuple, Callable, List, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
# TODO enum causes segmentation faults with current torch script. Go back to enum after torch script update
"""
@enum.unique
class InterpolationType(enum.Enum):
NEAREST_NEIGHBOR = (1, 1)
LINEAR = (2, 2)
SMOOTH_STEP_1 = (3, 2)
SMOOTH_STEP_2 = (4, 2)
GAUSSIAN = (6, 5)
def __init__(self, index, stride):
self.index = index
self.stride = stride
"""
@torch.jit.script
def linear_step(x: Tensor) -> Tensor:
return torch.clip(x, 0, 1)
@torch.jit.script
def smooth_step_1(x: Tensor) -> Tensor:
return torch.clip(3 * x**2 - 2 * x**3, 0, 1)
@torch.jit.script
def smooth_step_2(x: Tensor) -> Tensor:
return torch.clip(x**3 * (6 * x**2 - 15 * x + 10), 0, 1)
@torch.jit.script
def nearest_neighbor_weighting(dist_vec: Tensor, dx: Tensor) -> Tensor:
return torch.ones(dist_vec.shape[:-2] + [1] + [1], device=dist_vec.device)
@torch.jit.script
def _hyper_cube_weighting(lower_point: Tensor, upper_point: Tensor) -> Tensor:
dim = lower_point.shape[-1]
weights = []
weights = [upper_point[..., 0], lower_point[..., 0]]
for i in range(1, dim):
new_weights = []
for w in weights:
new_weights.append(w * upper_point[..., i])
new_weights.append(w * lower_point[..., i])
weights = new_weights
weights = torch.stack(weights, dim=-1)
return torch.unsqueeze(weights, dim=-1)
@torch.jit.script
def linear_weighting(dist_vec: Tensor, dx: Tensor) -> Tensor:
normalized_dist_vec = dist_vec / dx
lower_point = normalized_dist_vec[..., 0, :]
upper_point = -normalized_dist_vec[..., -1, :]
return _hyper_cube_weighting(lower_point, upper_point)
@torch.jit.script
def smooth_step_1_weighting(dist_vec: Tensor, dx: Tensor) -> Tensor:
normalized_dist_vec = dist_vec / dx
lower_point = smooth_step_1(normalized_dist_vec[..., 0, :])
upper_point = smooth_step_1(-normalized_dist_vec[..., -1, :])
return _hyper_cube_weighting(lower_point, upper_point)
@torch.jit.script
def smooth_step_2_weighting(dist_vec: Tensor, dx: Tensor) -> Tensor:
normalized_dist_vec = dist_vec / dx
lower_point = smooth_step_2(normalized_dist_vec[..., 0, :])
upper_point = smooth_step_2(-normalized_dist_vec[..., -1, :])
return _hyper_cube_weighting(lower_point, upper_point)
@torch.jit.script
def gaussian_weighting(dist_vec: Tensor, dx: Tensor) -> Tensor:
dim = dx.size(-1)
sharpen = 2.0
sigma = dx / sharpen
factor = 1.0 / ((2.0 * math.pi) ** (dim / 2.0) * sigma.prod())
gaussian = torch.exp(-0.5 * torch.square((dist_vec / sigma)))
gaussian = factor * gaussian.prod(dim=-1)
norm = gaussian.sum(dim=2, keepdim=True)
weights = torch.unsqueeze(gaussian / norm, dim=3)
return weights
# @torch.jit.script
def _gather_nd(params: Tensor, indices: Tensor) -> Tensor:
"""As seen here https://discuss.pytorch.org/t/how-to-do-the-tf-gather-nd-in-pytorch/6445/30"""
orig_shape = list(indices.shape)
num_samples = 1
for s in orig_shape[:-1]:
num_samples *= s
m = orig_shape[-1]
n = len(params.shape)
if m <= n:
out_shape = orig_shape[:-1] + list(params.shape)[m:]
else:
raise ValueError(
f"the last dimension of indices must less or equal to the rank of params. Got indices:{indices.shape}, params:{params.shape}. {m} > {n}"
)
indices = indices.reshape((num_samples, m)).transpose(0, 1).tolist()
output = params[indices] # (num_samples, ...)
return output.reshape(out_shape).contiguous()
@torch.jit.script
def index_values_high_mem(points: Tensor, idx: Tensor) -> Tensor:
idx = idx.unsqueeze(3).repeat_interleave(points.size(-1), dim=3)
points = points.unsqueeze(1).repeat_interleave(idx.size(1), dim=1)
out = torch.gather(points, dim=2, index=idx)
return out
# @torch.jit.script
def index_values_low_mem(points: Tensor, idx: Tensor) -> Tensor:
"""
Input:
points: (b,m,c) float32 array, known points
idx: (b,n,3) int32 array, indices to known points
Output:
out: (b,m,n,c) float32 array, interpolated point values
"""
device = points.device
idxShape = idx.shape
batch_size = idxShape[0]
num_points = idxShape[1]
K = idxShape[2]
num_features = points.shape[2]
batch_indices = torch.reshape(
torch.tile(
torch.unsqueeze(torch.arange(0, batch_size).to(device), dim=0),
(num_points * K,),
),
[-1],
) # BNK
point_indices = torch.reshape(idx, [-1]) # BNK
vertices = _gather_nd(
points, torch.stack((batch_indices, point_indices), dim=1)
) # BNKxC
vertices4d = torch.reshape(
vertices, [batch_size, num_points, K, num_features]
) # BxNxKxC
return vertices4d
@torch.jit.script
def _grid_knn_idx(
query_points: Tensor,
grid: List[Tuple[float, float, int]],
stride: int,
padding: bool = True,
) -> Tensor:
# set k
k = stride // 2
# set device
device = query_points.device
# find nearest neighbors of query points from a grid
# dx vector on grid
dx = torch.tensor([(x[1] - x[0]) / (x[2] - 1) for x in grid])
dx = dx.view(1, 1, len(grid)).to(device)
# min point on grid (this will change if we are padding the grid)
start = torch.tensor([val[0] for val in grid]).to(device)
if padding:
start = start - (k * dx)
start = start.view(1, 1, len(grid))
# this is the center nearest neighbor in the grid
center_idx = (((query_points - start) / dx) + (stride / 2.0 % 1.0)).to(torch.int64)
# index window
idx_add = (
torch.arange(-((stride - 1) // 2), stride // 2 + 1).view(1, 1, -1).to(device)
)
# find all index in window around center index
# TODO make for more general diminsions
if len(grid) == 1:
idx_row_0 = center_idx[..., 0:1] + idx_add
idx = idx_row_0.view(idx_row_0.shape[0:2] + torch.Size([int(stride)]))
elif len(grid) == 2:
dim_size_1 = grid[1][2]
if padding:
dim_size_1 += 2 * k
idx_row_0 = dim_size_1 * (center_idx[..., 0:1] + idx_add)
idx_row_0 = idx_row_0.unsqueeze(-1)
idx_row_1 = center_idx[..., 1:2] + idx_add
idx_row_1 = idx_row_1.unsqueeze(2)
idx = (idx_row_0 + idx_row_1).view(
idx_row_0.shape[0:2] + torch.Size([int(stride**2)])
)
elif len(grid) == 3:
dim_size_1 = grid[1][2]
dim_size_2 = grid[2][2]
if padding:
dim_size_1 += 2 * k
dim_size_2 += 2 * k
idx_row_0 = dim_size_2 * dim_size_1 * (center_idx[..., 0:1] + idx_add)
idx_row_0 = idx_row_0.unsqueeze(-1).unsqueeze(-1)
idx_row_1 = dim_size_2 * (center_idx[..., 1:2] + idx_add)
idx_row_1 = idx_row_1.unsqueeze(2).unsqueeze(-1)
idx_row_2 = center_idx[..., 2:3] + idx_add
idx_row_2 = idx_row_2.unsqueeze(2).unsqueeze(3)
idx = (idx_row_0 + idx_row_1 + idx_row_2).view(
idx_row_0.shape[0:2] + torch.Size([int(stride**3)])
)
else:
raise RuntimeError
return idx
# TODO currently the `tolist` operation is not supported by torch script and when fixed torch script will be used
# @torch.jit.script
def interpolation(
query_points: Tensor,
context_grid: Tensor,
grid: List[Tuple[float, float, int]],
interpolation_type: str = "smooth_step_2",
mem_speed_trade: bool = True,
) -> Tensor:
# set stride TODO this will be replaced with InterpolationType later
if interpolation_type == "nearest_neighbor":
stride = 1
elif interpolation_type == "linear":
stride = 2
elif interpolation_type == "smooth_step_1":
stride = 2
elif interpolation_type == "smooth_step_2":
stride = 2
elif interpolation_type == "gaussian":
stride = 5
else:
raise RuntimeError
# set device
device = query_points.device
# useful values
dims = len(grid)
nr_channels = context_grid.size(0)
dx = [((x[1] - x[0]) / (x[2] - 1)) for x in grid]
# generate mesh grid of position information [grid_dim_1, grid_dim_2, ..., 2-3]
# NOTE the mesh grid is padded by stride//2
k = stride // 2
linspace = [
torch.linspace(x[0] - k * dx_i, x[1] + k * dx_i, x[2] + 2 * k)
for x, dx_i in zip(grid, dx)
]
meshgrid = torch.meshgrid(linspace)
meshgrid = torch.stack(meshgrid, dim=-1).to(device)
# pad context grid by k to avoid cuts on corners
padding = dims * (k, k)
context_grid = F.pad(context_grid, padding)
# reshape query points, context grid and mesh grid for easier indexing
# [1, grid_dim_1*grid_dim_2*..., 2-4]
nr_grid_points = int(torch.tensor([x[2] + 2 * k for x in grid]).prod())
meshgrid = meshgrid.view(1, nr_grid_points, dims)
context_grid = torch.reshape(context_grid, [1, nr_channels, nr_grid_points])
context_grid = torch.swapaxes(context_grid, 1, 2)
query_points = query_points.unsqueeze(0)
# compute index of nearest neighbor on grid to query points
idx = _grid_knn_idx(query_points, grid, stride, padding=True)
# index mesh grid to get distance vector
if mem_speed_trade:
mesh_grid_idx = index_values_low_mem(meshgrid, idx)
else:
mesh_grid_idx = index_values_high_mem(meshgrid, idx)
dist_vec = query_points.unsqueeze(2) - mesh_grid_idx
# make tf dx vec (for interpolation function)
dx = torch.tensor(dx, dtype=torch.float32)
dx = torch.reshape(dx, [1, 1, 1, dims]).to(device)
# compute bump function
if interpolation_type == "nearest_neighbor":
weights = nearest_neighbor_weighting(dist_vec, dx)
elif interpolation_type == "linear":
weights = linear_weighting(dist_vec, dx)
elif interpolation_type == "smooth_step_1":
weights = smooth_step_1_weighting(dist_vec, dx)
elif interpolation_type == "smooth_step_2":
weights = smooth_step_2_weighting(dist_vec, dx)
elif interpolation_type == "gaussian":
weights = gaussian_weighting(dist_vec, dx)
else:
raise RuntimeError
# index context grid with index
if mem_speed_trade:
context_grid_idx = index_values_low_mem(context_grid, idx)
else:
context_grid_idx = index_values_high_mem(context_grid, idx)
# interpolate points
product = weights * context_grid_idx
interpolated_points = product.sum(dim=2)
return interpolated_points[0]
| modulus-sym-main | modulus/sym/models/interpolation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from typing import Optional, List, Dict, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.models.layers import Activation
from modulus.sym.key import Key
from modulus.sym.constants import NO_OP_NORM
class FilterTypeMeta(enum.EnumMeta):
def __getitem__(self, name):
try:
return super().__getitem__(name.upper())
except (KeyError) as error:
raise KeyError(f"Invalid activation function {name}")
class FilterType(enum.Enum, metaclass=FilterTypeMeta):
FOURIER = enum.auto()
GABOR = enum.auto()
class MultiplicativeFilterNetArch(Arch):
"""
Multiplicative Filter Net with Activations
Reference: Fathony, R., Sahu, A.K., AI, A.A., Willmott, D. and Kolter, J.Z., MULTIPLICATIVE FILTER NETWORKS.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 512
Layer size for every hidden layer of the model.
nr_layers : int = 6
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
filter_type : FilterType = FilterType.FOURIER
Filter type for multiplicative filter network, (Fourier or Gabor).
weight_norm : bool = True
Use weight norm on fully connected layers.
input_scale : float = 10.0
Scale inputs for multiplicative filters.
gabor_alpha : float = 6.0
Alpha value for Gabor filter.
gabor_beta : float = 1.0
Beta value for Gabor filter.
normalization : Optional[Dict[str, Tuple[float, float]]] = None
Normalization of input to network.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
layer_size: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
activation_fn=layers.Activation.IDENTITY,
filter_type: Union[FilterType, str] = FilterType.FOURIER,
weight_norm: bool = True,
input_scale: float = 10.0,
gabor_alpha: float = 6.0,
gabor_beta: float = 1.0,
normalization: Optional[Dict[str, Tuple[float, float]]] = None,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
self.nr_layers = nr_layers
self.skip_connections = skip_connections
if isinstance(filter_type, str):
filter_type = FilterType[filter_type]
if filter_type == FilterType.FOURIER:
self.first_filter = layers.FourierFilter(
in_features=in_features,
layer_size=layer_size,
nr_layers=nr_layers,
input_scale=input_scale,
)
elif filter_type == FilterType.GABOR:
self.first_filter = layers.GaborFilter(
in_features=in_features,
layer_size=layer_size,
nr_layers=nr_layers,
input_scale=input_scale,
alpha=gabor_alpha,
beta=gabor_beta,
)
else:
raise ValueError
self.filters = nn.ModuleList()
self.fc_layers = nn.ModuleList()
for i in range(nr_layers):
self.fc_layers.append(
layers.FCLayer(
in_features=layer_size,
out_features=layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
)
)
if filter_type == FilterType.FOURIER:
self.filters.append(
layers.FourierFilter(
in_features=in_features,
layer_size=layer_size,
nr_layers=nr_layers,
input_scale=input_scale,
)
)
elif filter_type == FilterType.GABOR:
self.filters.append(
layers.GaborFilter(
in_features=in_features,
layer_size=layer_size,
nr_layers=nr_layers,
input_scale=input_scale,
alpha=gabor_alpha,
beta=gabor_beta,
)
)
else:
raise ValueError
self.final_layer = layers.FCLayer(
in_features=layer_size,
out_features=out_features,
activation_fn=layers.Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
self.normalization: Optional[Dict[str, Tuple[float, float]]] = normalization
# iterate input keys and add NO_OP_NORM if it is not specified
if self.normalization is not None:
for key in self.input_key_dict:
if key not in self.normalization:
self.normalization[key] = NO_OP_NORM
self.register_buffer(
"normalization_tensor",
self._get_normalization_tensor(self.input_key_dict, self.normalization),
persistent=False,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self._tensor_normalize(x, self.normalization_tensor)
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
res = self.first_filter(x)
res_skip: Optional[Tensor] = None
for i, (fc_layer, filter) in enumerate(zip(self.fc_layers, self.filters)):
res_fc = fc_layer(res)
res_filter = filter(x)
res = res_fc * res_filter
if self.skip_connections and i % 2 == 0:
if res_skip is not None:
res, res_skip = res + res_skip, res
else:
res_skip = res
x = self.final_layer(res)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
self._normalize(in_vars, self.normalization),
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
res = self.first_filter(x)
res_skip: Optional[Tensor] = None
for i, (fc_layer, filter) in enumerate(zip(self.fc_layers, self.filters)):
res_fc = fc_layer(res)
res_filter = filter(x)
res = res_fc * res_filter
if self.skip_connections and i % 2 == 0:
if res_skip is not None:
res, res_skip = res + res_skip, res
else:
res_skip = res
res = self.final_layer(res)
return self.prepare_output(
res, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
def _normalize(
self,
in_vars: Dict[str, Tensor],
norms: Optional[Dict[str, Tuple[float, float]]],
) -> Dict[str, Tensor]:
if norms is None:
return in_vars
normalized_in_vars = {}
for k, v in in_vars.items():
if k in norms:
v = (v - norms[k][0]) / (norms[k][1] - norms[k][0])
v = 2 * v - 1
normalized_in_vars[k] = v
return normalized_in_vars
| modulus-sym-main | modulus/sym/models/multiplicative_filter_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class DGMArch(Arch):
"""
A variation of the fully connected network.
Reference: Sirignano, J. and Spiliopoulos, K., 2018.
DGM: A deep learning algorithm for solving partial differential equations.
Journal of computational physics, 375, pp.1339-1364.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 512
Layer size for every hidden layer of the model.
nr_layers : int = 6
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
adaptive_activations : bool = False
If True then use an adaptive activation function as described here
https://arxiv.org/abs/1906.01170.
weight_norm : bool = True
Use weight norm on fully connected layers.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
layer_size: int = 512,
nr_layers: int = 6,
activation_fn=layers.Activation.SIN,
adaptive_activations: bool = False,
weight_norm: bool = True,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
self.fc_start = layers.FCLayer(
in_features=in_features,
out_features=layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
)
self.dgm_layers = nn.ModuleList()
for _ in range(nr_layers - 1):
single_layer = {}
for key in ["z", "g", "r", "h"]:
single_layer[key] = layers.DGMLayer(
in_features_1=in_features,
in_features_2=layer_size,
out_features=layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
activation_par=activation_par,
)
self.dgm_layers.append(nn.ModuleDict(single_layer))
self.fc_end = layers.FCLayer(
in_features=layer_size,
out_features=out_features,
activation_fn=layers.Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x,
self.input_scales_tensor,
periodicity=self.periodicity,
input_dict=self.input_key_dict,
dim=-1,
)
s = self.fc_start(x)
for layer in self.dgm_layers:
# TODO: this can be optimized, 'z', 'g', 'r' can be merged into a
# single layer with 3x output size
z = layer["z"](x, s)
g = layer["g"](x, s)
r = layer["r"](x, s)
h = layer["h"](x, s * r)
s = h - g * h + z * s
x = self.fc_end(s)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
s = self.fc_start(x)
for layer in self.dgm_layers:
# TODO: this can be optimized, 'z', 'g', 'r' can be merged into a
# single layer with 3x output size
z = layer["z"](x, s)
g = layer["g"](x, s)
r = layer["r"](x, s)
h = layer["h"](x, s * r)
s = h - g * h + z * s
x = self.fc_end(s)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/dgm.py |
# ignore_header_test
""""""
"""
SRResNet model. This code was modified from, https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution
The following license is provided from their source,
MIT License
Copyright (c) 2020 Sagar Vinodababu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
from torch import nn
import torchvision
import math
from typing import List, Dict
from modulus.sym.key import Key
from modulus.sym.models.arch import Arch
from modulus.sym.models.layers import Activation, get_activation_fn
Tensor = torch.Tensor
class ConvolutionalBlock3d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
batch_norm: bool = False,
activation_fn: Activation = Activation.IDENTITY,
):
super().__init__()
activation_fn = get_activation_fn(activation_fn)
# A container that will hold the layers in this convolutional block
layers = list()
# A convolutional layer
layers.append(
nn.Conv3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
)
)
# A batch normalization (BN) layer, if wanted
if batch_norm is True:
layers.append(nn.BatchNorm3d(num_features=out_channels))
self.activation_fn = get_activation_fn(activation_fn)
# Put together the convolutional block as a sequence of the layers in this container
self.conv_block = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
output = self.activation_fn(self.conv_block(input))
return output # (N, out_channels, w, h)
class PixelShuffle3d(nn.Module):
# reference: http://www.multisilicon.com/blog/a25332339.html
# This class is a 3d version of pixelshuffle.
def __init__(self, scale: int):
super().__init__()
self.scale = scale
def forward(self, input: Tensor) -> Tensor:
batch_size, channels, in_depth, in_height, in_width = input.size()
nOut = int(channels // self.scale**3)
out_depth = in_depth * self.scale
out_height = in_height * self.scale
out_width = in_width * self.scale
input_view = input.contiguous().view(
batch_size,
nOut,
self.scale,
self.scale,
self.scale,
in_depth,
in_height,
in_width,
)
output = input_view.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
return output.view(batch_size, nOut, out_depth, out_height, out_width)
class SubPixelConvolutionalBlock3d(nn.Module):
def __init__(
self, kernel_size: int = 3, conv_layer_size: int = 64, scaling_factor: int = 2
):
super().__init__()
# A convolutional layer that increases the number of channels by scaling factor^2, followed by pixel shuffle and PReLU
self.conv = nn.Conv3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size * (scaling_factor**3),
kernel_size=kernel_size,
padding=kernel_size // 2,
)
# These additional channels are shuffled to form additional pixels, upscaling each dimension by the scaling factor
self.pixel_shuffle = PixelShuffle3d(scaling_factor)
self.prelu = nn.PReLU()
def forward(self, input: Tensor) -> Tensor:
output = self.conv(input) # (N, n_channels * scaling factor^2, w, h)
output = self.pixel_shuffle(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
output = self.prelu(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
return output
class ResidualConvBlock3d(nn.Module):
def __init__(
self,
n_layers: int = 1,
kernel_size: int = 3,
conv_layer_size: int = 64,
activation_fn: Activation = Activation.IDENTITY,
):
super().__init__()
layers = []
for i in range(n_layers - 1):
layers.append(
ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=kernel_size,
batch_norm=True,
activation_fn=activation_fn,
)
)
# The final convolutional block with no activation
layers.append(
ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=kernel_size,
batch_norm=True,
)
)
self.conv_layers = nn.Sequential(*layers)
def forward(self, input: Tensor) -> Tensor:
residual = input # (N, n_channels, w, h)
output = self.conv_layers(input) # (N, n_channels, w, h)
output = output + residual # (N, n_channels, w, h)
return output
class SRResNetArch(Arch):
"""3D super resolution network
Based on the implementation: https://github.com/sgrvinod/a-PyTorch-Tutorial-to-Super-Resolution
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
large_kernel_size : int, optional
convolutional kernel size for first and last convolution, by default 7
small_kernel_size : int, optional
convolutional kernel size for internal convolutions, by default 3
conv_layer_size : int, optional
Latent channel size, by default 32
n_resid_blocks : int, optional
Number of residual blocks before , by default 8
scaling_factor : int, optional
Scaling factor to increase the output feature size compared to the input (2, 4, or 8), by default 8
activation_fn : Activation, optional
Activation function, by default Activation.PRELU
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
large_kernel_size: int = 7,
small_kernel_size: int = 3,
conv_layer_size: int = 32,
n_resid_blocks: int = 8,
scaling_factor: int = 8,
activation_fn: Activation = Activation.PRELU,
):
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
in_channels = sum(self.input_key_dict.values())
out_channels = sum(self.output_key_dict.values())
self.var_dim = 1
# Scaling factor must be 2, 4, or 8
scaling_factor = int(scaling_factor)
assert scaling_factor in {2, 4, 8}, "The scaling factor must be 2, 4, or 8!"
# The first convolutional block
self.conv_block1 = ConvolutionalBlock3d(
in_channels=in_channels,
out_channels=conv_layer_size,
kernel_size=large_kernel_size,
batch_norm=False,
activation_fn=activation_fn,
)
# A sequence of n_resid_blocks residual blocks, each containing a skip-connection across the block
self.residual_blocks = nn.Sequential(
*[
ResidualConvBlock3d(
n_layers=2,
kernel_size=small_kernel_size,
conv_layer_size=conv_layer_size,
activation_fn=activation_fn,
)
for i in range(n_resid_blocks)
]
)
# Another convolutional block
self.conv_block2 = ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=conv_layer_size,
kernel_size=small_kernel_size,
batch_norm=True,
)
# Upscaling is done by sub-pixel convolution, with each such block upscaling by a factor of 2
n_subpixel_convolution_blocks = int(math.log2(scaling_factor))
self.subpixel_convolutional_blocks = nn.Sequential(
*[
SubPixelConvolutionalBlock3d(
kernel_size=small_kernel_size,
conv_layer_size=conv_layer_size,
scaling_factor=2,
)
for i in range(n_subpixel_convolution_blocks)
]
)
# The last convolutional block
self.conv_block3 = ConvolutionalBlock3d(
in_channels=conv_layer_size,
out_channels=out_channels,
kernel_size=large_kernel_size,
batch_norm=False,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
input = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
periodicity=self.periodicity,
)
output = self.conv_block1(input) # (N, 3, w, h)
residual = output # (N, n_channels, w, h)
output = self.residual_blocks(output) # (N, n_channels, w, h)
output = self.conv_block2(output) # (N, n_channels, w, h)
output = output + residual # (N, n_channels, w, h)
output = self.subpixel_convolutional_blocks(
output
) # (N, n_channels, w * scaling factor, h * scaling factor)
output = self.conv_block3(
output
) # (N, 3, w * scaling factor, h * scaling factor)
return self.prepare_output(
output, self.output_key_dict, dim=1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/super_res_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class ModifiedFourierNetArch(Arch):
"""
A modified Fourier Network which enables multiplicative interactions
betweeen the Fourier features and hidden layers.
References:
(1) Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S.,
Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T. and Ng, R., 2020.
Fourier features let networks learn high frequency functions in low dimensional domains.
arXiv preprint arXiv:2006.10739.
(2) Wang, S., Teng, Y. and Perdikaris, P., 2020.
Understanding and mitigating gradient pathologies in physics-informed
neural networks. arXiv preprint arXiv:2001.04536.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
frequencies : Tuple[str, List[float]] = ("axis", [i for i in range(10)])
A tuple that describes the Fourier encodings to use any inputs in
the list `['x', 'y', 'z', 't']`.
The first element describes the type of frequency encoding
with options, `'gaussian', 'full', 'axis', 'diagonal'`.
`'gaussian'` samples frequency of Fourier series from Gaussian.
`'axis'` samples along axis of spectral space with the given list range of frequencies.
`'diagonal'` samples along diagonal of spectral space with the given list range of frequencies.
`'full'` samples along entire spectral space for all combinations of frequencies in given list.
frequencies_params : Tuple[str, List[float]] = ("axis", [i for i in range(10)])
Same as `frequencies` except these are used for encodings
on any inputs not in the list `['x', 'y', 'z', 't']`.
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
layer_size : int = 512
Layer size for every hidden layer of the model.
nr_layers : int = 6
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
weight_norm : bool = True
Use weight norm on fully connected layers.
adaptive_activations : bool = False
If True then use an adaptive activation function as described here
https://arxiv.org/abs/1906.01170.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
frequencies=("axis", [i for i in range(10)]),
frequencies_params=("axis", [i for i in range(10)]),
activation_fn=layers.Activation.SILU,
layer_size: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
weight_norm: bool = True,
adaptive_activations: bool = False,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
self.skip_connections = skip_connections
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
self.xyzt_var = [x for x in self.input_key_dict if x in ["x", "y", "z", "t"]]
# Prepare slice index
xyzt_slice_index = self.prepare_slice_index(self.input_key_dict, self.xyzt_var)
self.register_buffer("xyzt_slice_index", xyzt_slice_index, persistent=False)
self.params_var = [
x for x in self.input_key_dict if x not in ["x", "y", "z", "t"]
]
params_slice_index = self.prepare_slice_index(
self.input_key_dict, self.params_var
)
self.register_buffer("params_slice_index", params_slice_index, persistent=False)
in_features_xyzt = sum(
(v for k, v in self.input_key_dict.items() if k in self.xyzt_var)
)
in_features_params = sum(
(v for k, v in self.input_key_dict.items() if k in self.params_var)
)
in_features = in_features_xyzt + in_features_params
out_features = sum(self.output_key_dict.values())
in_features = in_features_xyzt + in_features_params
if in_features_xyzt > 0:
self.fourier_layer_xyzt = layers.FourierLayer(
in_features=in_features_xyzt, frequencies=frequencies
)
in_features += self.fourier_layer_xyzt.out_features()
else:
self.fourier_layer_xyzt = None
if in_features_params > 0:
self.fourier_layer_params = layers.FourierLayer(
in_features=in_features_params, frequencies=frequencies_params
)
in_features += self.fourier_layer_params.out_features()
else:
self.fourier_layer_params = None
self.fc_u = layers.FCLayer(
in_features=in_features,
out_features=layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
activation_par=activation_par,
)
self.fc_v = layers.FCLayer(
in_features=in_features,
out_features=layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
activation_par=activation_par,
)
self.fc_0 = layers.FCLayer(
in_features,
layer_size,
activation_fn,
weight_norm,
activation_par=activation_par,
)
self.fc_layers = nn.ModuleList()
for i in range(nr_layers - 1):
self.fc_layers.append(
layers.FCLayer(
layer_size,
layer_size,
activation_fn,
weight_norm,
activation_par=activation_par,
)
)
self.final_layer = layers.FCLayer(
in_features=layer_size,
out_features=out_features,
activation_fn=layers.Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.slice_input(x, self.xyzt_slice_index, dim=-1)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.slice_input(x, self.params_slice_index, dim=-1)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
xu = self.fc_u(x)
xv = self.fc_v(x)
x = self.fc_0(x)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers, 1):
x = layer(x)
x = xu - x * xu + x * xv
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.prepare_input(
in_vars,
self.xyzt_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.prepare_input(
in_vars,
self.params_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
xu = self.fc_u(x)
xv = self.fc_v(x)
x = self.fc_0(x)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers, 1):
x = layer(x)
x = xu - x * xu + x * xv
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/modified_fourier_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Dict, Tuple, Union
import torch
import functorch
import logging
import torch.nn as nn
from torch import Tensor
from typing import Optional, Dict, Union, List
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
from modulus.sym.manager import GraphManager
logger = logging.getLogger(__name__)
class DeepONetArch(Arch):
"""DeepONet
Parameters
----------
branch_net : Arch
Branch net model. Output key should be variable "branch"
trunk_net : Arch
Trunk net model. Output key should be variable "trunk"
output_keys : List[Key], optional
Output variable keys, by default None
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
branch_dim : Union[None, int], optional
Dimension of the branch encoding vector. If none, the model will use the
variable trunk dimension. Should be set for 2D/3D models. By default None
trunk_dim : Union[None, int], optional
Dimension of the trunk encoding vector. If none, the model will use the
variable trunk dimension. Should be set for 2D/3D models. By default None
Note
----
The branch and trunk net should ideally output to the same dimensionality, but if
this is not the case the DeepO model will use a linear layer to match both branch/trunk
dimensionality to (branch_dim + trunk_dim)/2. This vector will then be
used for the final output multiplication.
Note
----
Higher dimension branch networks are supported. If the output is not a 1D vector the
DeepO model will reshape for the final output multiplication.
Note
----
For more info on DeepONet refer to: https://arxiv.org/abs/1910.03193
"""
def __init__(
self,
branch_net: Arch,
trunk_net: Arch,
output_keys: List[Key] = None,
detach_keys: List[Key] = [],
branch_dim: Union[None, int] = None,
trunk_dim: Union[None, int] = None,
) -> None:
super().__init__(
input_keys=[],
output_keys=output_keys,
detach_keys=detach_keys,
)
# branch net
self.branch_net = branch_net
self.branch_dim = branch_dim
# trunk net
self.trunk_net = trunk_net
self.trunk_dim = trunk_dim
# Set up input keys not trunk and branch should be initialized
self.input_keys = self.branch_net.input_keys + self.trunk_net.input_keys
self.input_key_dict = {str(var): var.size for var in self.input_keys}
self.input_scales = {str(k): k.scale for k in self.input_keys}
# Set up output linear layer for multiple variables
# If output dims have not been defined, attempt to set then through the variables
if self.trunk_dim is None:
self.trunk_dim = sum(self.trunk_net.output_key_dict.values())
if self.branch_dim is None:
self.branch_dim = sum(self.branch_net.output_key_dict.values())
self.deepo_dim = (self.trunk_dim + self.branch_dim) // 2
out_features = sum(self.output_key_dict.values())
if not self.trunk_dim == self.branch_dim:
self.branch_linear = torch.nn.Linear(
self.branch_dim, self.deepo_dim, bias=False
)
self.trunk_linear = torch.nn.Linear(
self.trunk_dim, self.deepo_dim, bias=False
)
else:
self.branch_linear = torch.nn.Identity()
self.trunk_linear = torch.nn.Identity()
self.output_linear = torch.nn.Linear(self.deepo_dim, out_features, bias=False)
# prepare slice indices
branch_slice_index = self.prepare_slice_index(
self.input_key_dict, self.branch_net.input_key_dict.keys()
)
self.register_buffer("branch_slice_index", branch_slice_index, persistent=False)
trunk_slice_index = self.prepare_slice_index(
self.input_key_dict, self.trunk_net.input_key_dict.keys()
)
self.register_buffer("trunk_slice_index", trunk_slice_index, persistent=False)
# Because we directly call `branch_net._tensor_forward` and `trunk_net._tensor_forward`
# method in `self._tensor_forward`, we have to redirect `self.forward` to
# `self._dict_forward` if one of them does not support func_arch.
if not self.supports_func_arch:
self.forward = self._dict_forward
if GraphManager().func_arch:
logger.warning(
f"The combination of branch_net ({type(self.branch_net)}) and trunk_net"
+ f"({type(self.trunk_net)}) does not support FuncArch."
)
@property
def supports_func_arch(self) -> bool:
return self.branch_net.supports_func_arch and self.trunk_net.supports_func_arch
def _tensor_forward(self, x: Tensor) -> Tensor:
assert self.supports_func_arch, (
f"The combination of branch_net {type(self.branch_net)} and trunk_net "
+ f"{type(self.trunk_net)} does not support FuncArch."
)
branch_x = self.slice_input(x, self.branch_slice_index, dim=-1)
trunk_x = self.slice_input(x, self.trunk_slice_index, dim=-1)
branch_output = self.branch_net._tensor_forward(branch_x)
trunk_output = self.trunk_net._tensor_forward(trunk_x)
# Convert ouputs into 1D feature vectors
if torch._C._functorch.is_gradtrackingtensor(
trunk_output
) or torch._C._functorch.is_batchedtensor(trunk_output):
# batched tensor does not have the original shape
branch_output = branch_output.view(-1)
trunk_output = trunk_output.view(-1)
else:
branch_output = branch_output.view(branch_output.shape[0], -1)
trunk_output = trunk_output.view(trunk_output.shape[0], -1)
assert (
branch_output.size(-1) == self.branch_dim
), f"Invalid feature dimension from branch net, expected {self.branch_dim} but found {branch_output.size(-1)}"
assert (
trunk_output.size(-1) == self.trunk_dim
), f"Invalid feature dimension from trunk net, expected {self.trunk_dim} but found {trunk_output.size(-1)}"
# Send through final linear layers
branch_output = self.branch_linear(branch_output)
trunk_output = self.trunk_linear(trunk_output)
y = self.output_linear(branch_output * trunk_output)
y = self.process_output(y, self.output_scales_tensor)
return y
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
# Forward pass of branch and trunk net
branch_output = self.branch_net(in_vars)
trunk_output = self.trunk_net(in_vars)
branch_output = branch_output["branch"]
trunk_output = trunk_output["trunk"]
# Convert ouputs into 1D feature vectors
branch_output = branch_output.view(branch_output.shape[0], -1)
trunk_output = trunk_output.view(trunk_output.shape[0], -1)
assert (
branch_output.size(-1) == self.branch_dim
), f"Invalid feature dimension from branch net, expected {self.branch_dim} but found {branch_output.size(-1)}"
assert (
trunk_output.size(-1) == self.trunk_dim
), f"Invalid feature dimension from trunk net, expected {self.trunk_dim} but found {trunk_output.size(-1)}"
# Send through final linear layers
branch_output = self.branch_linear(branch_output)
trunk_output = self.trunk_linear(trunk_output)
out = self.output_linear(branch_output * trunk_output)
return self.prepare_output(
out, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/deeponet.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch import Tensor
import numpy as np
import logging
import functorch
import ast
from termcolor import colored
from inspect import signature, _empty
from typing import Optional, Callable, List, Dict, Union, Tuple
from modulus.sym.constants import NO_OP_SCALE
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.constants import JIT_PYTORCH_VERSION
from modulus.sym.distributed import DistributedManager
from modulus.sym.manager import JitManager, JitArchMode
from modulus.sym.models.layers import Activation
logger = logging.getLogger(__name__)
class Arch(nn.Module):
"""
Base class for all neural networks
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
):
super().__init__()
self.input_keys = input_keys
self.output_keys = output_keys
self.periodicity = periodicity
self.saveable = True
self.input_key_dict = {str(var): var.size for var in input_keys}
self.output_key_dict = {str(var): var.size for var in output_keys}
# input and output scales
input_scales = {str(k): k.scale for k in input_keys}
output_scales = {str(k): k.scale for k in output_keys}
self.input_scales = (
None
if all([s == NO_OP_SCALE for s in input_scales.values()])
else input_scales
)
self.output_scales = (
None
if all([s == NO_OP_SCALE for s in output_scales.values()])
else output_scales
)
# Register scales tensors as buffers.
# Buffer is allowed to be None, in this case it is a no-op in process_input function.
self.register_buffer(
"input_scales_tensor",
self._get_scalers_tensor(self.input_key_dict, self.input_scales),
persistent=False,
)
self.register_buffer(
"output_scales_tensor",
self._get_scalers_tensor(self.output_key_dict, self.output_scales),
persistent=False,
)
self.detach_keys = detach_keys
self.detach_key_dict: Dict[str, int] = {
str(var): var.size for var in detach_keys
}
self.var_dim = -1
# If no detach keys, add a dummy for TorchScript compilation
if not self.detach_key_dict:
dummy_str = "_"
while dummy_str in self.input_key_dict:
dummy_str += "_"
self.detach_key_dict[dummy_str] = 0
def make_node(self, name: str, jit: Optional[bool] = None, optimize: bool = True):
"""Makes neural network node for unrolling with Modulus `Graph`.
Parameters
----------
name : str
This will be used as the name of created node.
jit : bool
If true then compile the whole arch with jit, https://pytorch.org/docs/stable/jit.html.
If None (default), will use the JitManager to get the global flag and mode (the default
mode is `jit_arch_mode="only_activation"`), which could be configured in the hydra config.
Please note that jit=true does not work with functorch and autograd trim edges.
optimize : bool
If true then treat parameters as optimizable.
Examples
--------
Here is a simple example of creating a node from the fully connected network::
>>> from .fully_connected import FullyConnectedArch
>>> from modulus.sym.key import Key
>>> fc_arch = FullyConnectedArch([Key('x'), Key('y')], [Key('u')])
>>> fc_node = fc_arch.make_node(name="fc_node")
>>> print(fc_node)
node: fc_node
inputs: [x, y]
derivatives: []
outputs: [u]
optimize: True
"""
manager = DistributedManager()
model_parallel_rank = (
manager.group_rank("model_parallel") if manager.distributed else 0
)
# set name for loading and saving model
self.name = name
# set checkpoint filename for model
# append model parallel rank since each process in the first model
# parallel group will save a separate checkpoint
self.checkpoint_filename = name + f".{model_parallel_rank}.pth"
if jit:
logger.warning(
"Passing jit=true when constructing Arch Node is deprecated, "
"please remove it as JITManager could automatically handel it."
)
elif jit is None:
jit = JitManager().enabled and JitManager().arch_mode == JitArchMode.ALL
# compile network
if jit:
# Warn user if pytorch version difference
if not torch.__version__ == JIT_PYTORCH_VERSION:
logger.warning(
f"Installed PyTorch version {torch.__version__} is not TorchScript"
+ f" supported in Modulus. Version {JIT_PYTORCH_VERSION} is officially supported."
)
arch = torch.jit.script(self)
node_name = "Arch Node (jit): " + ("" if name is None else str(name))
logger.info("Jit compiling network arch")
else:
arch = self
node_name = "Arch Node: " + ("" if name is None else str(name))
# Set save and load methods TODO this is hacky but required for jit
arch.save = self.save
arch.load = self.load
# Create and return node from this network architecture
net_node = Node(
self.input_keys, self.output_keys, arch, name=node_name, optimize=optimize
)
return net_node
def save(self, directory):
torch.save(self.state_dict(), directory + "/" + self.checkpoint_filename)
def load(self, directory, map_location=None):
self.load_state_dict(
torch.load(
directory + "/" + self.checkpoint_filename, map_location=map_location
)
)
def set_scaling(
self,
var_name: str,
shift: float = 0,
scale: float = 1,
):
if var_name in self.input_key_dict:
self.input_scales[var_name] = (shift, scale)
if var_name in self.output_key_dict:
self.output_scales[var_name] = (shift, scale)
self.input_scales_tensor = self._get_scalers_tensor(
self.input_key_dict, self.input_scales
)
self.output_scales_tensor = self._get_scalers_tensor(
self.output_key_dict, self.output_scales
)
@staticmethod
def _get_scalers_tensor(
key_size_dict: Dict[str, int],
key_scales: Union[Dict[str, Tuple[float, float]], None] = None,
) -> Tensor:
if key_scales is None:
return None
scalers_tensor = [[], []]
for key, size in key_size_dict.items():
for _ in range(size):
scalers_tensor[0].append(key_scales[key][0])
scalers_tensor[1].append(key_scales[key][1])
return torch.tensor(scalers_tensor)
@staticmethod
def prepare_input(
input_variables: Dict[str, Tensor],
mask: List[str],
detach_dict: Dict[str, int],
dim: int = 0,
input_scales: Union[Dict[str, Tuple[float, float]], None] = None,
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
) -> Tensor:
output_tensor = []
for key in mask:
if key in detach_dict:
x = input_variables[key].detach()
else:
x = input_variables[key]
# Scale input data
if input_scales is not None:
x = (x - input_scales[key][0]) / input_scales[key][1]
append_tensor = [x]
if periodicity is not None:
if key in list(periodicity.keys()):
scaled_input = (x - periodicity[key][0]) / (
periodicity[key][1] - periodicity[key][0]
)
sin_tensor = torch.sin(2.0 * np.pi * scaled_input)
cos_tensor = torch.cos(2.0 * np.pi * scaled_input)
append_tensor = [sin_tensor, cos_tensor]
output_tensor += append_tensor
return torch.cat(output_tensor, dim=dim)
@staticmethod
def concat_input(
input_variables: Dict[str, Tensor],
mask: List[str],
detach_dict: Union[Dict[str, int], None] = None,
dim: int = -1,
) -> Tensor:
output_tensor = []
for key in mask:
if detach_dict is not None and key in detach_dict:
x = input_variables[key].detach()
else:
x = input_variables[key]
output_tensor += [x]
return torch.cat(output_tensor, dim=dim)
@staticmethod
def process_input(
input_tensor: Tensor,
input_scales_tensor: Union[Tensor, None] = None,
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
input_dict: Union[Dict[str, int], None] = None,
dim: int = -1,
) -> Tensor:
if input_scales_tensor is not None:
input_tensor = (
input_tensor - input_scales_tensor[0]
) / input_scales_tensor[1]
if periodicity is not None:
assert input_dict is not None
inputs = input_tensor.split(list(input_dict.values()), dim=dim)
outputs = []
for i, key in enumerate(input_dict.keys()):
if key in list(periodicity.keys()):
scaled_input = (inputs[i] - periodicity[key][0]) / (
periodicity[key][1] - periodicity[key][0]
)
sin_tensor = torch.sin(2.0 * np.pi * scaled_input)
cos_tensor = torch.cos(2.0 * np.pi * scaled_input)
outputs += [sin_tensor, cos_tensor]
else:
outputs += [inputs[i]]
input_tensor = torch.cat(outputs, dim=dim)
return input_tensor
@staticmethod
def prepare_slice_index(
input_dict: Dict[str, int],
slice_keys: List[str],
) -> Tensor:
"""
Used in fourier-like architectures.
For example:
input_dict = {"x": 1, "y": 2, "z": 1}
slice_keys = ["x", "z"]
return tensor([0, 3])
"""
index_dict = {}
c = 0
for key, size in input_dict.items():
index_dict[key] = []
for _ in range(size):
index_dict[key].append(c)
c += 1
slice_index = []
for key in slice_keys:
slice_index += index_dict[key]
return torch.tensor(slice_index)
@staticmethod
def slice_input(
input_tensor: Tensor,
slice_index: Tensor,
dim: int = -1,
) -> Tensor:
"""
Used in fourier-like architectures.
"""
return input_tensor.index_select(dim, slice_index)
@staticmethod
def _get_normalization_tensor(
key_size_dict: Dict[str, int],
key_normalization: Union[Dict[str, Tuple[float, float]], None] = None,
) -> Tensor:
"""
Used in siren and multiplicative_filter_net architectures.
"""
if key_normalization is None:
return None
normalization_tensor = [[], []]
for key, size in key_size_dict.items():
for _ in range(size):
normalization_tensor[0].append(key_normalization[key][0])
normalization_tensor[1].append(key_normalization[key][1])
return torch.tensor(normalization_tensor)
@staticmethod
def _tensor_normalize(x: Tensor, norm_tensor: Tensor) -> Tensor:
"""
Used in siren and multiplicative_filter_net architectures.
"""
if norm_tensor is None:
return x
normalized_x = (x - norm_tensor[0]) / (norm_tensor[1] - norm_tensor[0])
normalized_x = 2 * normalized_x - 1
return normalized_x
@staticmethod
def prepare_output(
output_tensor: Tensor,
output_var: Dict[str, int],
dim: int = 0,
output_scales: Union[Dict[str, Tuple[float, float]], None] = None,
) -> Dict[str, Tensor]:
# create unnormalised output tensor
output = {}
for k, v in zip(
output_var,
torch.split(output_tensor, list(output_var.values()), dim=dim),
):
output[k] = v
if output_scales is not None:
output[k] = output[k] * output_scales[k][1] + output_scales[k][0]
return output
@staticmethod
def split_output(
output_tensor: Tensor,
output_dict: Dict[str, int],
dim: int = -1,
) -> Dict[str, Tensor]:
output = {}
for k, v in zip(
output_dict,
torch.split(output_tensor, list(output_dict.values()), dim=dim),
):
output[k] = v
return output
@staticmethod
def process_output(
output_tensor: Tensor,
output_scales_tensor: Union[Tensor, None] = None,
) -> Tensor:
if output_scales_tensor is not None:
output_tensor = (
output_tensor * output_scales_tensor[1] + output_scales_tensor[0]
)
return output_tensor
def _tensor_forward(self, x: Tensor) -> None:
r"""
This method defines the computation performed with an input tensor
concatenated from the input dictionary. All subclasses need to
override this method to be able to use FuncArch.
"""
raise NotImplementedError
def _find_computable_deriv_with_func_arch(
self, needed_names: List[Key], allow_partial_hessian
):
"""
Given a list of names, find a list of derivatives that could be computed
by using the FuncArch API.
allow_partial_hessian: bool
If allow_partial_hessian is on, allow evaluating partial hessian to save
some unnecessary computations.
For example, when the input is x, outputs are [u, p], and the needed
derivatives are `[u__x, p__x, u__x__x]`, when this flag is on, FuncArch
will only evaluate [u__x, u__x__x].
"""
compute_derivs = {1: [], 2: []}
# collect all computable derivatives
for n in needed_names:
computable = True
# check the derivative is computable
order = len(n.derivatives)
if 0 < order < 3 and Key(n.name) in self.output_keys:
for deriv in n.derivatives:
if deriv not in self.input_keys:
computable = False
if computable:
compute_derivs[order].append(n)
# Filtering out the Jacobian terms that are not required for the Hessian terms,
# these Jacobian terms will get picked up by the regular autograd engine.
if allow_partial_hessian and len(compute_derivs[2]):
needed_hessian_name = set([d.name for d in compute_derivs[2]])
compute_derivs[1] = [
d for d in compute_derivs[1] if d.name in needed_hessian_name
]
return sorted(compute_derivs[1]) + sorted(compute_derivs[2])
@property
@torch.jit.unused
# We could not use @torch.jit.ignore when combining with @property
# see https://github.com/pytorch/pytorch/issues/54688 .
# Using @torch.jit.unused is good for us as we never call `supports_func_arch`
# in `forward` or `_tensor_forward` method.
def supports_func_arch(self) -> bool:
"""
Returns whether the instantiate arch object support FuncArch API.
We determine it by checking whether the arch object's subclass has
overridden the `_tensor_forward` method.
"""
return self.__class__._tensor_forward != Arch._tensor_forward
@classmethod
def from_config(cls, cfg: Dict):
"""Instantiates a neural network based on a model's OmegaConfig
Nearly all parameters of a model can be specified in the Hydra config or provied
when calling `instantiate_arch`. Additionally, model keys can be defined
and parsed or proved manually in the `instantiate_arch` method. Parameters that
are not primitive data types can be added explicitly or as python code as a
string in the config.
Parameters
----------
cfg : Dict
Config dictionary
Returns
-------
Arch, Dict[str, any]
Returns instantiated model and dictionary of parameters used to initialize it
Example
-------
This is an example of a fourier network's config
>>> arch:
>>> fourier:
>>> input_keys: [x, y] # Key('x'), Key('y')
>>> output_keys: ['trunk', 256] # Key('trunk', size=256)
>>> frequencies: "('axis', [i for i in range(5)])" # Python code gets parsed
>>> frequencies_params: "[0,1,2,3,4]" # Literal strings allowed
>>> nr_layers: 4
>>> layer_size: 128
Note
----
Refer to `Key.convert_config` for more details on how to define keys in the config.
"""
model_params = signature(cls.__init__).parameters
# Init keys if config was used to define them (string)
if "input_keys" in cfg:
cfg["input_keys"] = Key.convert_config(cfg["input_keys"])
if "output_keys" in cfg:
cfg["output_keys"] = Key.convert_config(cfg["output_keys"])
if "detach_keys" in cfg:
cfg["detach_keys"] = Key.convert_config(cfg["detach_keys"])
# Activation functions
if "activation_fn" in cfg and isinstance(cfg["activation_fn"], str):
cfg["activation_fn"] = Activation[cfg["activation_fn"]]
params = {}
for key in model_params:
parameter = model_params[key]
if parameter.name in cfg:
if isinstance(cfg[parameter.name], str) and not isinstance(
parameter.kind, str
):
try:
# Try eval for python code that needs to run
# Such as list compression
param_literal = ast.literal_eval(cfg[parameter.name])
except:
# Fall back... hope a string works
param_literal = cfg[parameter.name]
params[parameter.name] = param_literal
else:
params[parameter.name] = cfg[parameter.name]
# If parameter is not in the config and has no default value
# Give a warning, because this will error
elif parameter.default is _empty and parameter.name != "self":
logger.warning(
colored(
f"Positional argument '{parameter.name}' not provided. Consider manually adding it to instantiate_arch() call.",
"yellow",
)
)
model = cls(**params)
# Set any variable scaling
if "scaling" in cfg and not cfg["scaling"] is None:
for scale_dict in cfg["scaling"]:
try:
name = next(iter(scale_dict))
shift, scale = scale_dict[name]
model.set_scaling(name, shift, scale)
except:
logger.warning(f"Failed to set scaling with config {scale_dict}")
return model, params
class FuncArch(nn.Module):
"""
Base class for all neural networks using functorch functional API.
FuncArch perform Jacobian and Hessian calculations during the forward pass.
Parameters
----------
arch : Arch
An instantiated Arch object.
deriv_keys : List[Key]
A list of needed derivative keys.
forward_func : Callable, Optional
If provided then it will be used as the forward function instead of the
`arch._tensor_forward` function.
"""
def __init__(
self, arch: Arch, deriv_keys: List[Key], forward_func: Optional[Callable] = None
):
super().__init__()
if "torch.jit" in str(type(arch)):
raise RuntimeError(
f"Found {type(arch)}, currently FuncArch does not work with jit."
)
assert isinstance(
arch, Arch
), f"arch should be an instantiated Arch object, but found to be {type(arch)}."
assert (
arch.supports_func_arch
), f"{type(arch)} currently does not support FuncArch."
if forward_func is None:
forward_func = arch._tensor_forward
self.saveable = True
self.deriv_keys = deriv_keys
self.arch = arch
self.input_key_dim = self._get_key_dim(arch.input_keys)
self.output_key_dim = self._get_key_dim(arch.output_keys)
self.deriv_key_dict, self.max_order = self._collect_derivs(
arch.input_key_dict, arch.output_key_dict, deriv_keys
)
# may only need to evaluate the partial hessian or jacobian
needed_output_keys = set(
[Key(d.name) for d in self.deriv_key_dict[1] + self.deriv_key_dict[2]]
)
# keep the keys in the original order, so the mapped dims are correct
needed_output_keys = [
key for key in arch.output_keys if key in needed_output_keys
]
# needed_output_dims is used to slice I_N to save some computation
self.needed_output_dims = torch.tensor(
[self.output_key_dim[key.name] for key in needed_output_keys]
)
# if partial hessian or jacobian, the final output shape has changed and so the
# corresponding output key dim mapping
self.output_key_dim = {str(var): i for i, var in enumerate(needed_output_keys)}
in_features = sum(arch.input_key_dict.values())
out_features = sum(arch.output_key_dict.values())
if self.max_order == 0:
self._tensor_forward = forward_func
elif self.max_order == 1:
I_N = torch.eye(out_features)[self.needed_output_dims]
self.register_buffer("I_N", I_N, persistent=False)
self._tensor_forward = self._jacobian_impl(forward_func)
elif self.max_order == 2:
I_N1 = torch.eye(out_features)[self.needed_output_dims]
I_N2 = torch.eye(in_features)
self.register_buffer("I_N1", I_N1, persistent=False)
self.register_buffer("I_N2", I_N2, persistent=False)
self._tensor_forward = self._hessian_impl(forward_func)
else:
raise ValueError(
"FuncArch currently does not support "
f"{self.max_order}th order derivative"
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.arch.concat_input(
in_vars,
self.arch.input_key_dict.keys(),
detach_dict=self.arch.detach_key_dict,
dim=-1,
)
if self.max_order == 0:
pred = self._tensor_forward(x)
jacobian = None
hessian = None
elif self.max_order == 1:
pred, jacobian = self._tensor_forward(x)
hessian = None
elif self.max_order == 2:
pred, jacobian, hessian = self._tensor_forward(x)
else:
raise ValueError(
"FuncArch currently does not support "
f"{self.max_order}th order derivative"
)
# prepare output, jacobian and hessian
out = self.arch.split_output(
pred,
self.arch.output_key_dict,
dim=-1,
)
if jacobian is not None:
out.update(
self.prepare_jacobian(
jacobian,
self.deriv_key_dict[1],
self.input_key_dim,
self.output_key_dim,
)
)
if hessian is not None:
out.update(
self.prepare_hessian(
hessian,
self.deriv_key_dict[2],
self.input_key_dim,
self.output_key_dim,
)
)
return out
def make_node(self, name: str, jit: bool = False, optimize: bool = True):
"""Makes functional arch node for unrolling with Modulus `Graph`.
Parameters
----------
name : str
This will be used as the name of created node.
jit : bool
If true then compile with jit, https://pytorch.org/docs/stable/jit.html.
optimize : bool
If true then treat parameters as optimizable.
"""
# Forcing JIT off
jit = False
# set name for loading and saving model
self.name = name
self.checkpoint_filename = name + ".pth"
node_name = "Functional " + ("Arch" if name is None else str(name))
ft_arch = self
# Set save and load methods
ft_arch.save = self.arch.save
ft_arch.load = self.arch.load
# Create and return node from this network architecture
net_node = Node(
self.arch.input_keys,
self.arch.output_keys + self.deriv_keys,
ft_arch,
name=node_name,
optimize=optimize,
)
return net_node
@staticmethod
def _get_key_dim(keys: List[Key]):
"""
Find the corresponding dims of the keys.
For example: Suppose we have the following keys and corresponding size
{x: 2, y: 1, z: 1}, the concatenate result has dim 4, and each key map to
a dim {x: [0, 1], y: 2, z: 3}.
TODO Currently, the keys with more than one dim are dropped because they
have no use cases.
"""
def exclusive_sum(sizes: List):
return np.concatenate([[0], np.cumsum(sizes)])
exclu_sum = exclusive_sum([k.size for k in keys])
out = {}
for i, k in enumerate(keys):
if k.size == 1:
out[str(k)] = exclu_sum[i]
return out
@staticmethod
def _collect_derivs(
input_key_dict: Dict[str, int],
output_key_dict: Dict[str, int],
deriv_keys: List[Key],
):
deriv_key_dict = {1: [], 2: []}
for x in deriv_keys:
# check the derivative is computable
assert x.name in output_key_dict, f"Cannot calculate {x}"
assert output_key_dict[x.name] == 1, f"key ({x.name}) size must be 1"
for deriv in x.derivatives:
assert deriv.name in input_key_dict, f"Cannot calculate {x}"
assert (
input_key_dict[deriv.name] == 1
), f"key ({deriv.name}) size must be 1"
# collect each order derivatives
order = len(x.derivatives)
if order == 0 or order >= 3:
raise ValueError(
"FuncArch currently does not support " f"{order}th order derivative"
)
else:
deriv_key_dict[order].append(x)
max_order = 0
for order, keys in deriv_key_dict.items():
if keys:
max_order = order
return deriv_key_dict, max_order
def _jacobian_impl(self, forward_func):
def jacobian_func(x, v):
pred, vjpfunc = functorch.vjp(forward_func, x)
return vjpfunc(v)[0], pred
def get_jacobian(x):
I_N = self.I_N
jacobian, pred = functorch.vmap(
functorch.vmap(jacobian_func, in_dims=(None, 0)), in_dims=(0, None)
)(x, I_N)
pred = pred[:, 0, :]
return pred, jacobian
return get_jacobian
def _hessian_impl(self, forward_func):
def hessian_func(x, v1, v2):
def jacobian_func(x):
pred, vjpfunc = functorch.vjp(forward_func, x)
return vjpfunc(v1)[0], pred
# jvp and vjp
(jacobian, hessian, pred) = functorch.jvp(
jacobian_func, (x,), (v2,), has_aux=True
)
# vjp and vjp is slow
# jacobian, hessianfunc, pred = functorch.vjp(jacobian_func, x, has_aux=True)
# hessian = hessianfunc(v2)[0]
return hessian, jacobian, pred
def get_hessian(x):
I_N1 = self.I_N1 # used to slice hessian rows
I_N2 = self.I_N2 # used to slice hessian columns
hessian, jacobian, pred = functorch.vmap(
functorch.vmap(
functorch.vmap(hessian_func, in_dims=(None, None, 0)), # I_N2
in_dims=(None, 0, None), # I_N1
),
in_dims=(0, None, None), # x
)(x, I_N1, I_N2)
pred = pred[:, 0, 0, :]
jacobian = jacobian[:, :, 0, :]
return pred, jacobian, hessian
return get_hessian
@staticmethod
def prepare_jacobian(
output_tensor: Tensor,
deriv_keys_1st_order: List[Key],
input_key_dim: Dict[str, int],
output_key_dim: Dict[str, int],
) -> Dict[str, Tensor]:
output = {}
for k in deriv_keys_1st_order:
input_dim = input_key_dim[k.derivatives[0].name]
out_dim = output_key_dim[k.name]
output[str(k)] = output_tensor[:, out_dim, input_dim].reshape(-1, 1)
return output
@staticmethod
def prepare_hessian(
output_tensor: Tensor,
deriv_keys_2nd_order: List[Key],
input_key_dim: Dict[str, int],
output_key_dim: Dict[str, int],
) -> Dict[str, Tensor]:
output = {}
for k in deriv_keys_2nd_order:
input_dim0 = input_key_dim[k.derivatives[0].name]
input_dim1 = input_key_dim[k.derivatives[1].name]
out_dim = output_key_dim[k.name]
output[str(k)] = output_tensor[:, out_dim, input_dim0, input_dim1].reshape(
-1, 1
)
return output
| modulus-sym-main | modulus/sym/models/arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-sym-main | modulus/sym/models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union, Tuple
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class MultiscaleFourierNetArch(Arch):
"""
Multi-scale Fourier Net
References:
1. Sifan Wang, Hanwen Wang, Paris Perdikaris, On the eigenvector bias of Fourier feature
networks: From regression to solving multi-scale PDEs with physics-informed neural networks,
Computer Methods in Applied Mechanics and Engineering, Volume 384,2021.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
frequencies : Tuple[Tuple[str, List[float]],...] = (("axis", [i for i in range(10)]),)
A set of Fourier encoding tuples to use any inputs in
the list `['x', 'y', 'z', 't']`.
The first element describes the type of frequency encoding
with options, `'gaussian', 'full', 'axis', 'diagonal'`.
`'gaussian'` samples frequency of Fourier series from Gaussian.
`'axis'` samples along axis of spectral space with the given list range of frequencies.
`'diagonal'` samples along diagonal of spectral space with the given list range of frequencies.
`'full'` samples along entire spectral space for all combinations of frequencies in given list.
frequencies_params : Tuple[Tuple[str, List[float]],...] = (("axis", [i for i in range(10)]),)
Same as `frequencies` except these are used for encodings
on any inputs not in the list `['x', 'y', 'z', 't']`.
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
layer_size : int = 512
Layer size for every hidden layer of the model.
nr_layers : int = 6
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
weight_norm : bool = True
Use weight norm on fully connected layers.
adaptive_activations : bool = False
If True then use an adaptive activation function as described here
https://arxiv.org/abs/1906.01170.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
frequencies=(("axis", [i for i in range(10)]),),
frequencies_params=(("axis", [i for i in range(10)]),),
activation_fn=layers.Activation.SILU,
layer_size: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
weight_norm: bool = True,
adaptive_activations: bool = False,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
self.skip_connections = skip_connections
self.xyzt_var = [x for x in self.input_key_dict if x in ["x", "y", "z", "t"]]
# Prepare slice index
xyzt_slice_index = self.prepare_slice_index(self.input_key_dict, self.xyzt_var)
self.register_buffer("xyzt_slice_index", xyzt_slice_index, persistent=False)
self.params_var = [
x for x in self.input_key_dict if x not in ["x", "y", "z", "t"]
]
params_slice_index = self.prepare_slice_index(
self.input_key_dict, self.params_var
)
self.register_buffer("params_slice_index", params_slice_index, persistent=False)
in_features_xyzt = sum(
(v for k, v in self.input_key_dict.items() if k in self.xyzt_var)
)
in_features_params = sum(
(v for k, v in self.input_key_dict.items() if k in self.params_var)
)
in_features = in_features_xyzt + in_features_params
out_features = sum(self.output_key_dict.values())
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
in_features = in_features_xyzt + in_features_params
if frequencies_params is None:
frequencies_params = frequencies
self.num_freqs = len(frequencies)
if in_features_xyzt > 0:
self.fourier_layers_xyzt = nn.ModuleList()
for idx in range(self.num_freqs):
self.fourier_layers_xyzt.append(
layers.FourierLayer(
in_features=in_features_xyzt,
frequencies=frequencies[idx],
)
)
in_features += self.fourier_layers_xyzt[0].out_features()
else:
self.fourier_layers_xyzt = None
if in_features_params > 0:
self.fourier_layers_params = nn.ModuleList()
for idx in range(self.num_freqs):
self.fourier_layers_params.append(
layers.FourierLayer(
in_features=in_features_params,
frequencies=frequencies_params[idx],
)
)
in_features += self.fourier_layers_params[0].out_features()
else:
self.fourier_layers_params = None
self.fc_layers = nn.ModuleList()
layer_in_features = in_features
for i in range(nr_layers):
self.fc_layers.append(
layers.FCLayer(
layer_in_features,
layer_size,
activation_fn,
weight_norm,
activation_par,
)
)
layer_in_features = layer_size
self.final_layer = layers.FCLayer(
in_features=layer_size * self.num_freqs,
out_features=out_features,
activation_fn=layers.Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
if self.fourier_layers_xyzt is not None:
in_xyzt_var = self.slice_input(x, self.xyzt_slice_index, dim=-1)
if self.fourier_layers_params is not None:
in_params_var = self.slice_input(x, self.params_slice_index, dim=-1)
old_x = x
fc_outputs = []
_len = (
len(self.fourier_layers_xyzt)
if self.fourier_layers_xyzt is not None
else len(self.fourier_layers_params)
)
zip_fourier_layers_xyzt = (
self.fourier_layers_xyzt
if self.fourier_layers_xyzt is not None
else [None] * _len
)
zip_fourier_layers_params = (
self.fourier_layers_params
if self.fourier_layers_params is not None
else [None] * _len
)
for fourier_layer_xyzt, fourier_layer_params in zip(
zip_fourier_layers_xyzt, zip_fourier_layers_params
):
x = old_x
if self.fourier_layers_xyzt is not None:
fourier_xyzt = fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layers_params is not None:
fourier_params = fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers):
x = layer(x)
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
fc_outputs.append(x)
x = torch.cat(fc_outputs, dim=-1)
x = self.final_layer(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
if self.fourier_layers_xyzt is not None:
in_xyzt_var = self.prepare_input(
in_vars,
self.xyzt_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
if self.fourier_layers_params is not None:
in_params_var = self.prepare_input(
in_vars,
self.params_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
old_x = x
fc_outputs = []
_len = (
len(self.fourier_layers_xyzt)
if self.fourier_layers_xyzt is not None
else len(self.fourier_layers_params)
)
zip_fourier_layers_xyzt = (
self.fourier_layers_xyzt
if self.fourier_layers_xyzt is not None
else [None] * _len
)
zip_fourier_layers_params = (
self.fourier_layers_params
if self.fourier_layers_params is not None
else [None] * _len
)
for fourier_layer_xyzt, fourier_layer_params in zip(
zip_fourier_layers_xyzt, zip_fourier_layers_params
):
x = old_x
if self.fourier_layers_xyzt is not None:
fourier_xyzt = fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layers_params is not None:
fourier_params = fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers):
x = layer(x)
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
fc_outputs.append(x)
x = torch.cat(fc_outputs, dim=-1)
x = self.final_layer(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/multiscale_fourier_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class HighwayFourierNetArch(Arch):
"""
A modified highway network using Fourier features.
References:
(1) Srivastava, R.K., Greff, K. and Schmidhuber, J., 2015.
Training very deep networks. In Advances in neural information
processing systems (pp. 2377-2385).
(2) Tancik, M., Srinivasan, P.P., Mildenhall, B., Fridovich-Keil, S.,
Raghavan, N., Singhal, U., Ramamoorthi, R., Barron, J.T. and Ng, R., 2020.
Fourier features let networks learn high frequency functions in low
dimensional domains.
arXiv preprint arXiv:2006.10739.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
frequencies : Tuple[str, List[float]] = ("axis", [i for i in range(10)])
A tuple that describes the Fourier encodings to use any inputs in
the list `['x', 'y', 'z', 't']`.
The first element describes the type of frequency encoding
with options, `'gaussian', 'full', 'axis', 'diagonal'`.
`'gaussian'` samples frequency of Fourier series from Gaussian.
`'axis'` samples along axis of spectral space with the given list range of frequencies.
`'diagonal'` samples along diagonal of spectral space with the given list range of frequencies.
`'full'` samples along entire spectral space for all combinations of frequencies in given list.
frequencies_params : Tuple[str, List[float]] = ("axis", [i for i in range(10)])
Same as `frequencies` except these are used for encodings
on any inputs not in the list `['x', 'y', 'z', 't']`.
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
layer_size : int = 512
Layer size for every hidden layer of the model.
nr_layers : int = 6
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
weight_norm : bool = True
Use weight norm on fully connected layers.
adaptive_activations : bool = False
If True then use an adaptive activation function as described here
https://arxiv.org/abs/1906.01170.
transform_fourier_features : bool = True
If True use the Fourier features in the projector layer.
project_fourier_features : bool = False
If True use the Fourier features in the projector layer.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
frequencies=("axis", [i for i in range(10)]),
frequencies_params=("axis", [i for i in range(10)]),
activation_fn=layers.Activation.SILU,
layer_size: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
weight_norm: bool = True,
adaptive_activations: bool = False,
transform_fourier_features: bool = True,
project_fourier_features: bool = False,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
self.transform_fourier_features = transform_fourier_features
self.project_fourier_features = project_fourier_features
self.skip_connections = skip_connections
self.xyzt_var = [x for x in self.input_key_dict if x in ["x", "y", "z", "t"]]
# Prepare slice index
xyzt_slice_index = self.prepare_slice_index(self.input_key_dict, self.xyzt_var)
self.register_buffer("xyzt_slice_index", xyzt_slice_index, persistent=False)
self.params_var = [
x for x in self.input_key_dict if x not in ["x", "y", "z", "t"]
]
params_slice_index = self.prepare_slice_index(
self.input_key_dict, self.params_var
)
self.register_buffer("params_slice_index", params_slice_index, persistent=False)
in_features_xyzt = sum(
(v for k, v in self.input_key_dict.items() if k in self.xyzt_var)
)
in_features_params = sum(
(v for k, v in self.input_key_dict.items() if k in self.params_var)
)
in_features = in_features_xyzt + in_features_params
out_features = sum(self.output_key_dict.values())
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
in_features = in_features_xyzt + in_features_params
initial_in_features = in_features
if in_features_xyzt > 0:
self.fourier_layer_xyzt = layers.FourierLayer(
in_features=in_features_xyzt, frequencies=frequencies
)
in_features += self.fourier_layer_xyzt.out_features()
else:
self.fourier_layer_xyzt = None
if in_features_params > 0:
self.fourier_layer_params = layers.FourierLayer(
in_features=in_features_params, frequencies=frequencies_params
)
in_features += self.fourier_layer_params.out_features()
else:
self.fourier_layer_params = None
if transform_fourier_features:
transformer_in_features = in_features
else:
transformer_in_features = initial_in_features
if project_fourier_features:
projector_in_features = in_features
else:
projector_in_features = initial_in_features
self.fc_t = layers.FCLayer(
transformer_in_features,
layer_size,
activation_fn=layers.Activation.SIGMOID,
weight_norm=weight_norm,
activation_par=activation_par,
)
self.fc_v = layers.FCLayer(
projector_in_features,
layer_size,
activation_fn=layers.Activation.IDENTITY,
weight_norm=weight_norm,
activation_par=activation_par,
)
self.fc_layers = nn.ModuleList()
layer_in_features = in_features
for i in range(nr_layers):
self.fc_layers.append(
layers.FCLayer(
layer_in_features,
layer_size,
activation_fn=activation_fn,
weight_norm=weight_norm,
activation_par=activation_par,
)
)
layer_in_features = layer_size
self.final_layer = layers.FCLayer(
layer_size,
out_features,
activation_fn=layers.Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
old_x = x
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.slice_input(x, self.xyzt_slice_index, dim=-1)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.slice_input(x, self.params_slice_index, dim=-1)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
if self.transform_fourier_features:
transformer_input = x
else:
transformer_input = old_x
if self.project_fourier_features:
projector_input = x
else:
projector_input = old_x
xt = self.fc_t(transformer_input)
xp = self.fc_v(projector_input)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers):
x = layer(x)
x = x * xt + xp - xp * xt
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
old_x = x
if self.fourier_layer_xyzt is not None:
in_xyzt_var = self.prepare_input(
in_vars,
self.xyzt_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_xyzt = self.fourier_layer_xyzt(in_xyzt_var)
x = torch.cat((x, fourier_xyzt), dim=-1)
if self.fourier_layer_params is not None:
in_params_var = self.prepare_input(
in_vars,
self.params_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
fourier_params = self.fourier_layer_params(in_params_var)
x = torch.cat((x, fourier_params), dim=-1)
if self.transform_fourier_features:
transformer_input = x
else:
transformer_input = old_x
if self.project_fourier_features:
projector_input = x
else:
projector_input = old_x
xt = self.fc_t(transformer_input)
xp = self.fc_v(projector_input)
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.fc_layers):
x = layer(x)
x = x * xt + xp - xp * xt
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/highway_fourier_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Dict, Tuple, Union
from modulus.sym.key import Key
import torch
import torch.nn as nn
from torch import Tensor
from modulus.sym.models.layers import Activation, FCLayer, Conv1dFCLayer
from modulus.sym.models.arch import Arch
from typing import List
class FullyConnectedArchCore(nn.Module):
def __init__(
self,
in_features: int = 512,
layer_size: int = 512,
out_features: int = 512,
nr_layers: int = 6,
skip_connections: bool = False,
activation_fn: Activation = Activation.SILU,
adaptive_activations: bool = False,
weight_norm: bool = True,
conv_layers: bool = False,
) -> None:
super().__init__()
self.skip_connections = skip_connections
# Allows for regular linear layers to be swapped for 1D Convs
# Useful for channel operations in FNO/Transformers
if conv_layers:
fc_layer = Conv1dFCLayer
else:
fc_layer = FCLayer
if adaptive_activations:
activation_par = nn.Parameter(torch.ones(1))
else:
activation_par = None
if not isinstance(activation_fn, list):
activation_fn = [activation_fn] * nr_layers
if len(activation_fn) < nr_layers:
activation_fn = activation_fn + [activation_fn[-1]] * (
nr_layers - len(activation_fn)
)
self.layers = nn.ModuleList()
layer_in_features = in_features
for i in range(nr_layers):
self.layers.append(
fc_layer(
layer_in_features,
layer_size,
activation_fn[i],
weight_norm,
activation_par,
)
)
layer_in_features = layer_size
self.final_layer = fc_layer(
in_features=layer_size,
out_features=out_features,
activation_fn=Activation.IDENTITY,
weight_norm=False,
activation_par=None,
)
def forward(self, x: Tensor) -> Tensor:
x_skip: Optional[Tensor] = None
for i, layer in enumerate(self.layers):
x = layer(x)
if self.skip_connections and i % 2 == 0:
if x_skip is not None:
x, x_skip = x + x_skip, x
else:
x_skip = x
x = self.final_layer(x)
return x
def get_weight_list(self):
weights = [layer.conv.weight for layer in self.layers] + [
self.final_layer.conv.weight
]
biases = [layer.conv.bias for layer in self.layers] + [
self.final_layer.conv.bias
]
return weights, biases
class FullyConnectedArch(Arch):
"""Fully Connected Neural Network.
Parameters
----------
input_keys : List[Key]
Input key list.
output_keys : List[Key]
Output key list.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int, optional
Layer size for every hidden layer of the model, by default 512
nr_layers : int, optional
Number of hidden layers of the model, by default 6
activation_fn : Activation, optional
Activation function used by network, by default :obj:`Activation.SILU`
periodicity : Union[Dict[str, Tuple[float, float]], None], optional
Dictionary of tuples that allows making model give periodic predictions on
the given bounds in tuple.
skip_connections : bool, optional
Apply skip connections every 2 hidden layers, by default False
weight_norm : bool, optional
Use weight norm on fully connected layers, by default True
adaptive_activations : bool, optional
Use an adaptive activation functions, by default False
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size]`
- Output variable tensor shape: :math:`[N, size]`
Example
-------
Fully-connected model (2 -> 64 -> 64 -> 2)
>>> arch = .fully_connected.FullyConnectedArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> layer_size = 64,
>>> nr_layers = 2)
>>> model = arch.make_node()
>>> input = {"x": torch.randn(64, 2)}
>>> output = model.evaluate(input)
Fully-connected model with periodic outputs between (0,1)
>>> arch = .fully_connected.FullyConnectedArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> periodicity={'x': (0, 1)})
Note
----
For information regarding adaptive activations please refer to
https://arxiv.org/abs/1906.01170.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
layer_size: int = 512,
nr_layers: int = 6,
activation_fn=Activation.SILU,
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
skip_connections: bool = False,
adaptive_activations: bool = False,
weight_norm: bool = True,
) -> None:
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
detach_keys=detach_keys,
periodicity=periodicity,
)
if self.periodicity is not None:
in_features = sum(
[
x.size
for x in self.input_keys
if x.name not in list(periodicity.keys())
]
) + +sum(
[
2 * x.size
for x in self.input_keys
if x.name in list(periodicity.keys())
]
)
else:
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
self._impl = FullyConnectedArchCore(
in_features,
layer_size,
out_features,
nr_layers,
skip_connections,
activation_fn,
adaptive_activations,
weight_norm,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self.process_input(
x,
self.input_scales_tensor,
periodicity=self.periodicity,
input_dict=self.input_key_dict,
dim=-1,
)
x = self._impl(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
periodicity=self.periodicity,
)
y = self._impl(x)
return self.prepare_output(
y, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
class ConvFullyConnectedArch(Arch):
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
layer_size: int = 512,
nr_layers: int = 6,
activation_fn=Activation.SILU,
skip_connections: bool = False,
adaptive_activations: bool = False,
) -> None:
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
detach_keys=detach_keys,
)
self.var_dim = 1
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
self._impl = FullyConnectedArchCore(
in_features,
layer_size,
out_features,
nr_layers,
skip_connections,
activation_fn,
adaptive_activations,
weight_norm=False,
conv_layers=True,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
periodicity=self.periodicity,
)
x_shape = list(x.size())
x = x.view(x.shape[0], x.shape[1], -1)
y = self._impl(x)
x_shape[1] = y.shape[1]
y = y.view(x_shape)
return self.prepare_output(
y, self.output_key_dict, dim=1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/fully_connected.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import enum
import inspect
import importlib
from omegaconf import OmegaConf
from inspect import signature
from modulus.sym import Key
from modulus.sym.models.layers import Activation
from modulus.sym.models.arch import Arch
logger = logging.getLogger(__name__)
class ModulusModels(object):
_model_classes = {
"afno": "AFNOArch",
"distributed_afno": "DistributedAFNOArch",
"deeponet": "DeepONetArch",
"fno": "FNOArch",
"fourier": "FourierNetArch",
"fully_connected": "FullyConnectedArch",
"conv_fully_connected": "ConvFullyConnectedArch",
"fused_fully_connected": "FusedMLPArch",
"fused_fourier": "FusedFourierNetArch",
"fused_hash_encoding": "FusedGridEncodingNetArch",
"hash_encoding": "MultiresolutionHashNetArch",
"highway_fourier": "HighwayFourierNetArch",
"modified_fourier": "ModifiedFourierNetArch",
"multiplicative_fourier": "MultiplicativeFilterNetArch",
"multiscale_fourier": "MultiscaleFourierNetArch",
"pix2pix": "Pix2PixArch",
"siren": "SirenArch",
"super_res": "SRResNetArch",
}
# Dynamic imports (prevents dep warnings of unused models)
_model_imports = {
"afno": "modulus.sym.models.afno",
"distributed_afno": "modulus.sym.models.afno.distributed",
"deeponet": "modulus.sym.models.deeponet",
"fno": "modulus.sym.models.fno",
"fourier": "modulus.sym.models.fourier_net",
"fully_connected": "modulus.sym.models.fully_connected",
"conv_fully_connected": "modulus.sym.models.fully_connected",
"fused_fully_connected": "modulus.sym.models.fused_mlp",
"fused_fourier": "modulus.sym.models.fused_mlp",
"fused_hash_encoding": "modulus.sym.models.fused_mlp",
"hash_encoding": "modulus.sym.models.hash_encoding_net",
"highway_fourier": "modulus.sym.models.highway_fourier_net",
"modified_fourier": "modulus.sym.models.modified_fourier_net",
"multiplicative_fourier": "modulus.sym.models.multiplicative_filter_net",
"multiscale_fourier": "modulus.sym.models.multiscale_fourier_net",
"pix2pix": "modulus.sym.models.pix2pix",
"siren": "modulus.sym.models.siren",
"super_res": "modulus.sym.models.super_res_net",
}
_registered_archs = {}
def __new__(cls):
obj = super(ModulusModels, cls).__new__(cls)
obj._registered_archs = cls._registered_archs
return obj
def __contains__(self, k):
return k.lower() in self._model_classes or k.lower() in self._registered_archs
def __len__(self):
return len(self._model_classes) + len(self._registered_archs)
def __getitem__(self, k: str):
assert isinstance(k, str), "Model type key should be a string"
# Case invariant
k = k.lower()
# Import built-in archs
if k in self._model_classes:
return getattr(
importlib.import_module(self._model_imports[k]), self._model_classes[k]
)
# Return user registered arch
elif k in self._registered_archs:
return self._registered_archs[k]
else:
raise ValueError("Invalid model type key")
def keys(self):
return list(self._model_classes.keys()) + list(self._registered_archs.keys())
@classmethod
def add_model(cls, key: str, value):
key = key.lower()
assert (
not key in cls._model_classes
), f"Model type name {key} already registered! Must be unique."
cls._registered_archs[key] = value
def register_arch(model: Arch, type_name: str):
"""Add a custom architecture to the Modulus model zoo
Parameters
----------
model : Arch
Model class
type_name : str
Unique name to idenitfy model in the configs
"""
ModulusModels.add_model(type_name, model)
| modulus-sym-main | modulus/sym/models/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import numpy as np
from torch import Tensor
from typing import Dict, List, Tuple
import itertools
import modulus.sym.models.fully_connected as fully_connected
import modulus.sym.models.layers as layers
from modulus.sym.models.interpolation import (
_grid_knn_idx,
_hyper_cube_weighting,
smooth_step_2,
linear_step,
)
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
from modulus.sym.distributed import DistributedManager
class MultiresolutionHashNetArch(Arch):
"""Hash encoding network as seen in,
Müller, Thomas, et al. "Instant Neural Graphics Primitives with a Multiresolution Hash Encoding." arXiv preprint arXiv:2201.05989 (2022).
A reference pytorch implementation can be found, https://github.com/yashbhalgat/HashNeRF-pytorch
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
activation_fn : layers.Activation = layers.Activation.SILU
Activation function used by network.
layer_size : int = 64
Layer size for every hidden layer of the model.
nr_layers : int = 3
Number of hidden layers of the model.
skip_connections : bool = False
If true then apply skip connections every 2 hidden layers.
weight_norm : bool = False
Use weight norm on fully connected layers.
adaptive_activations : bool = False
If True then use an adaptive activation function as described here
https://arxiv.org/abs/1906.01170.
bounds : List[Tuple[float, float]] = [(-1.0, 1.0), (-1.0, 1.0)]
List of bounds for hash grid. Each element is a tuple
of the upper and lower bounds.
nr_levels : int = 5
Number of levels in the hash grid.
nr_features_per_level : int = 2
Number of features from each hash grid.
log2_hashmap_size : int = 19
Hash map size will be `2**log2_hashmap_size`.
base_resolution : int = 2
base resolution of hash grids.
finest_resolution : int = 32
Highest resolution of hash grids.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
activation_fn=layers.Activation.SILU,
layer_size: int = 64,
nr_layers: int = 3,
skip_connections: bool = False,
weight_norm: bool = True,
adaptive_activations: bool = False,
bounds: List[Tuple[float, float]] = [(-1.0, 1.0), (-1.0, 1.0)],
nr_levels: int = 16,
nr_features_per_level: int = 2,
log2_hashmap_size: int = 19,
base_resolution: int = 2,
finest_resolution: int = 32,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
# get needed input output information
self.xyzt_var = [x for x in self.input_key_dict if x in ["x", "y", "z", "t"]]
self.params_var = [
x for x in self.input_key_dict if x not in ["x", "y", "z", "t"]
]
in_features_xyzt = sum(
(v for k, v in self.input_key_dict.items() if k in self.xyzt_var)
)
in_features_params = sum(
(v for k, v in self.input_key_dict.items() if k in self.params_var)
)
in_features = in_features_xyzt + in_features_params
out_features = sum(self.output_key_dict.values())
if len(self.params_var) == 0:
self.params_var = None
# get device for torch constants used in inference
self.device = DistributedManager().device
# store hash grid parameters
self.bounds = bounds
self.log2_hashmap_size = log2_hashmap_size
self.base_resolution = Tensor([base_resolution])
self.finest_resolution = Tensor([finest_resolution])
self.nr_levels = nr_levels
self.nr_features_per_level = nr_features_per_level
# make embeddings
self.embedding = nn.Embedding(
self.nr_levels * 2**self.log2_hashmap_size, self.nr_features_per_level
)
nn.init.uniform_(self.embedding.weight, a=-0.001, b=0.001)
self.b = np.exp(
(np.log(self.finest_resolution) - np.log(self.base_resolution))
/ (nr_levels - 1)
)
# make grid dx and start tensors
list_dx = []
list_start = []
list_resolution = []
for i in range(self.nr_levels):
# calculate resolution
resolution = int(np.floor(self.base_resolution * self.b**i))
list_resolution.append(
torch.tensor([resolution]).to(self.device).view(1, 1)
)
# make adjust factor
adjust_factor = ((8253729**i + 2396403) % 32767) / 32767.0
# compute grid and adjust it
not_adjusted_dx = [(x[1] - x[0]) / (resolution - 1) for x in self.bounds]
grid = [
(
b[0] + (-2.0 + adjust_factor) * x,
b[1] + (2.0 + adjust_factor) * x,
resolution,
)
for b, x in zip(self.bounds, not_adjusted_dx)
]
# make grid spacing size tensor
dx = torch.tensor([(x[1] - x[0]) / (x[2] - 1) for x in grid]).to(
self.device
)
dx = dx.view(1, len(grid))
list_dx.append(dx)
# make start tensor of grid
start = torch.tensor([val[0] for val in grid]).to(self.device)
start = start.view(1, len(grid))
list_start.append(start)
# stack values
self.resolutions = torch.stack(list_resolution, dim=1)
self.dx = torch.stack(list_dx, dim=1)
self.start = torch.stack(list_start, dim=1)
# hyper cube for adding to lower point index
self.hyper_cube = (
torch.tensor(list(itertools.product(*(len(self.bounds) * [[0, 1]]))))
.to(self.device)
.view(1, 1, -1, len(bounds))
)
# multiply factor for hash encoding to order layers
list_mul_factor = []
mul_factor = torch.tensor([1], dtype=torch.int).to(self.device)
for r in range(self.nr_levels):
for d in range(len(self.bounds)):
list_mul_factor.append(mul_factor.clone())
mul_factor *= self.resolutions[0, r, 0]
mul_factor %= 20731370 # prevent overflow
self.mul_factor = torch.stack(list_mul_factor).view(
1, self.nr_levels, 1, len(self.bounds)
)
# make fully connected decoding network
self.fc = fully_connected.FullyConnectedArchCore(
in_features=(self.nr_features_per_level * nr_levels) + in_features_params,
layer_size=layer_size,
out_features=out_features,
nr_layers=nr_layers,
skip_connections=skip_connections,
activation_fn=activation_fn,
adaptive_activations=adaptive_activations,
weight_norm=weight_norm,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
# get spacial inputs and hash encode
in_xyzt_var = self.prepare_input(
in_vars,
self.xyzt_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
# unsqueeze input to operate on all grids at once
unsqueezed_xyzt = torch.unsqueeze(in_xyzt_var, 1)
# get lower and upper bounds cells
lower_indice = torch.floor((unsqueezed_xyzt - self.start) / self.dx).int()
all_indice = torch.unsqueeze(lower_indice, -2) + self.hyper_cube
lower_point = lower_indice * self.dx + self.start
upper_point = lower_point + self.dx
# get hash from indices and resolutions
key = torch.sum(all_indice * self.mul_factor, dim=-1)
key = 10000003 * key + 124777 * torch.bitwise_xor(
key, torch.tensor(3563504501)
) # shuffle it
key = (
torch.tensor(self.nr_levels * (1 << self.log2_hashmap_size) - 1).to(
key.device
)
& key
)
# compute embedding
embed = self.embedding(key)
# compute smooth step interpolation of embeddings
smoothed_lower_point = smooth_step_2((unsqueezed_xyzt - lower_point) / self.dx)
smoother_upper_point = smooth_step_2(-(unsqueezed_xyzt - upper_point) / self.dx)
weights = _hyper_cube_weighting(smoothed_lower_point, smoother_upper_point)
# add embedding to list
hash_xyzt = torch.sum(embed * weights, dim=-2)
x = torch.reshape(hash_xyzt, [hash_xyzt.shape[0], -1])
# add other features
if self.params_var is not None:
in_params_var = self.prepare_input(
in_vars,
self.params_var,
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
x = torch.cat((x, in_params_var), dim=-1)
x = self.fc(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/hash_encoding_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Dict, Tuple, Union
from modulus.sym.key import Key
import torch
import torch.nn as nn
from torch import Tensor
import logging
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from typing import List
logger = logging.getLogger(__name__)
if torch.cuda.is_available():
major, minor = torch.cuda.get_device_capability()
compute_capability = major * 10 + minor
if compute_capability < 80:
logger.warning(
"Detected GPU architecture older than Ampere. Please check documentation for instructions to recompile and run tinycudann for your GPU"
)
import tinycudann as tcnn
else:
raise ImportError("Tiny CUDA NN only supported on CUDA enabled GPUs")
class TinyCudaNNArchCore(Arch):
"""
Fully Fused Multi Layer Perceptron (MLP) architecture.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
periodicity : Union[Dict[str, Tuple[float, float]], None] = None
Dictionary of tuples that allows making model
give periodic predictions on the given bounds in
tuple. For example, `periodicity={'x': (0, 1)}` would
make the network give periodic results for `x` on the
interval `(0, 1)`.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 64
Layer size for every hidden layer of the model.
nr_layers : int = 2
Number of hidden layers of the model.
activation_fn : layers.Activation = layers.Activation.SIGMOID
Activation function used by network.
fully_fused : bool = True
Whether to use a fully fused MLP kernel implementation
This option is only respected if the number of neurons per layer
is one of [16, 32, 64, 128] and is supported only on Turing+
architectures
encoding_config : Optional[Dict] = None
Optional encoding configuration dictionary
See here for specifics: https://github.com/NVlabs/tiny-cuda-nn/blob/master/DOCUMENTATION.md#encodings
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
detach_keys: List[Key] = [],
layer_size: int = 64,
nr_layers: int = 2,
activation_fn=layers.Activation.SIGMOID,
fully_fused: bool = True,
encoding_config: Optional[Dict] = None,
) -> None:
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
detach_keys=detach_keys,
periodicity=periodicity,
)
# supported activations
supported_activations = {
layers.Activation.RELU: "ReLU",
# layers.Activation.EXP : "Exponential",
# layers.Activation.SIN : "Sine",
layers.Activation.SIGMOID: "Sigmoid",
layers.Activation.SQUAREPLUS: "Squareplus",
layers.Activation.SOFTPLUS: "Softplus",
layers.Activation.IDENTITY: "None",
}
if activation_fn not in supported_activations.keys():
raise ValueError(
f"{activation_fn} activation is not supported for fused architectures."
)
if self.periodicity is not None:
in_features = sum(
[
x.size
for x in self.input_keys
if x.name not in list(periodicity.keys())
]
) + +sum(
[
2 * x.size
for x in self.input_keys
if x.name in list(periodicity.keys())
]
)
else:
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
if fully_fused and (layer_size not in set([16, 32, 64, 128])):
fully_fused = False
logger.warning(
f"Unsupported layer_size {layer_size} for FullyFusedMLP. Using CutlassMLP instead."
)
network_config = {
"otype": "FullyFusedMLP" if fully_fused else "CutlassMLP",
"activation": supported_activations[activation_fn],
"output_activation": "None",
"n_neurons": layer_size,
"n_hidden_layers": nr_layers,
}
if encoding_config is not None:
self._impl = tcnn.NetworkWithInputEncoding(
in_features, out_features, encoding_config, network_config
)
else:
self._impl = tcnn.Network(in_features, out_features, network_config)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
periodicity=self.periodicity,
)
y = self._impl(x)
return self.prepare_output(
y, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
def make_node(self, name: str, jit: bool = False, optimize: bool = True):
if jit:
logger.warning(
"JIT compilation not supported for TinyCudaNNArchCore. Creating node with JIT turned off"
)
return super().make_node(name, False, optimize)
class FusedMLPArch(TinyCudaNNArchCore):
"""
Fully Fused Multi Layer Perceptron (MLP) architecture.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
periodicity : Union[Dict[str, Tuple[float, float]], None] = None
Dictionary of tuples that allows making model
give periodic predictions on the given bounds in
tuple. For example, `periodicity={'x': (0, 1)}` would
make the network give periodic results for `x` on the
interval `(0, 1)`.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 64
Layer size for every hidden layer of the model.
nr_layers : int = 2
Number of hidden layers of the model.
activation_fn : layers.Activation = layers.Activation.SIGMOID
Activation function used by network.
fully_fused : bool = True
Whether to use a fully fused MLP kernel implementation
This option is only respected if the number of neurons per layer
is one of [16, 32, 64, 128] and is supported only on Turing+
architectures
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
detach_keys: List[Key] = [],
layer_size: int = 64,
nr_layers: int = 2,
activation_fn=layers.Activation.SIGMOID,
fully_fused: bool = True,
) -> None:
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
periodicity=periodicity,
detach_keys=detach_keys,
layer_size=layer_size,
nr_layers=nr_layers,
activation_fn=activation_fn,
fully_fused=fully_fused,
encoding_config=None,
)
class FusedFourierNetArch(TinyCudaNNArchCore):
"""
Fused Fourier Net architecture.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
periodicity : Union[Dict[str, Tuple[float, float]], None] = None
Dictionary of tuples that allows making model
give periodic predictions on the given bounds in
tuple. For example, `periodicity={'x': (0, 1)}` would
make the network give periodic results for `x` on the
interval `(0, 1)`.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 64
Layer size for every hidden layer of the model.
nr_layers : int = 2
Number of hidden layers of the model.
activation_fn : layers.Activation = layers.Activation.SIN
Activation function used by network.
fully_fused : bool = True
Whether to use a fully fused MLP kernel implementation
This option is only respected if the number of neurons per layer
is one of [16, 32, 64, 128] and is supported only on Turing+
architectures
n_frequencies : int = 12
number of frequencies to use in the encoding
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
detach_keys: List[Key] = [],
layer_size: int = 64,
nr_layers: int = 2,
activation_fn=layers.Activation.SIGMOID,
fully_fused: bool = True,
n_frequencies: int = 12,
) -> None:
encoding_config = {
"otype": "Frequency",
"n_frequencies": n_frequencies,
}
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
periodicity=periodicity,
detach_keys=detach_keys,
layer_size=layer_size,
nr_layers=nr_layers,
activation_fn=activation_fn,
fully_fused=fully_fused,
encoding_config=encoding_config,
)
class FusedGridEncodingNetArch(TinyCudaNNArchCore):
"""
Fused Fourier Net architecture.
Parameters
----------
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
periodicity : Union[Dict[str, Tuple[float, float]], None] = None
Dictionary of tuples that allows making model
give periodic predictions on the given bounds in
tuple. For example, `periodicity={'x': (0, 1)}` would
make the network give periodic results for `x` on the
interval `(0, 1)`.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int = 64
Layer size for every hidden layer of the model.
nr_layers : int = 2
Number of hidden layers of the model.
activation_fn : layers.Activation = layers.Activation.SIN
Activation function used by network.
fully_fused : bool = True
Whether to use a fully fused MLP kernel implementation
This option is only respected if the number of neurons per layer
is one of [16, 32, 64, 128] and is supported only on Turing+
architectures
indexing : str = "Hash"
Type of backing storage of the grids. Can be "Hash", "Tiled"
or "Dense".
n_levels : int = 16
Number of levels (resolutions)
n_features_per_level : int = 2
Dimensionality of feature vector stored in each level's entries.
log2_hashmap_size : int = 19
If type is "Hash", is the base-2 logarithm of the number of
elements in each backing hash table.
base_resolution : int = 16
The resolution of the coarsest level is base_resolution^input_dims.
per_level_scale : float = 2.0
The geometric growth factor, i.e. the factor by which the resolution
of each grid is larger (per axis) than that of the preceeding level.
interpolation : str = "Smoothstep"
How to interpolate nearby grid lookups.
Can be "Nearest", "Linear", or "Smoothstep" (for smooth derivatives).
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
periodicity: Union[Dict[str, Tuple[float, float]], None] = None,
detach_keys: List[Key] = [],
layer_size: int = 64,
nr_layers: int = 2,
activation_fn=layers.Activation.SIGMOID,
fully_fused: bool = True,
indexing: str = "Hash",
n_levels: int = 16,
n_features_per_level: int = 2,
log2_hashmap_size: int = 19,
base_resolution: int = 16,
per_level_scale: float = 2.0,
interpolation: str = "Smoothstep",
) -> None:
if indexing not in ["Hash", "Tiled", "Dense"]:
raise ValueError(f"indexing type {indexing} not supported")
if interpolation not in ["Nearest", "Linear", "Smoothstep"]:
raise ValueError(f"interpolation type {interpolation} not supported")
encoding_config = {
"otype": "Grid",
"type": indexing,
"n_levels": n_levels,
"n_features_per_level": 2,
"log2_hashmap_size": 19,
"base_resolution": 16,
"per_level_scale": 2.0,
"interpolation": interpolation,
}
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
periodicity=periodicity,
detach_keys=detach_keys,
layer_size=layer_size,
nr_layers=nr_layers,
activation_fn=activation_fn,
fully_fused=fully_fused,
encoding_config=encoding_config,
)
| modulus-sym-main | modulus/sym/models/fused_mlp.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Dict, Tuple
from modulus.sym.key import Key
import copy
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from .interpolation import smooth_step_1, smooth_step_2
from modulus.sym.models.arch import Arch
from typing import List
class MovingTimeWindowArch(Arch):
"""
Moving time window model the keeps track of
current time window and previous window.
Parameters
----------
arch : Arch
Modulus architecture to use for moving time window.
window_size : float
Size of the time window. This will be used to slide
the window forward every iteration.
"""
def __init__(
self,
arch: Arch,
window_size: float,
) -> None:
output_keys = (
arch.output_keys
+ [Key(x.name + "_prev_step") for x in arch.output_keys]
+ [Key(x.name + "_prev_step_diff") for x in arch.output_keys]
)
super().__init__(
input_keys=arch.input_keys,
output_keys=output_keys,
periodicity=arch.periodicity,
)
# set networks for current and prev time window
self.arch_prev_step = arch
self.arch = copy.deepcopy(arch)
# store time window parameters
self.window_size = window_size
self.window_location = nn.Parameter(torch.empty(1), requires_grad=False)
self.reset_parameters()
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
with torch.no_grad():
in_vars["t"] += self.window_location
y_prev_step = self.arch_prev_step.forward(in_vars)
y = self.arch.forward(in_vars)
y_keys = list(y.keys())
for key in y_keys:
y_prev = y_prev_step[key]
y[key + "_prev_step"] = y_prev
y[key + "_prev_step_diff"] = y[key] - y_prev
return y
def move_window(self):
self.window_location.data += self.window_size
for param, param_prev_step in zip(
self.arch.parameters(), self.arch_prev_step.parameters()
):
param_prev_step.data = param.detach().clone().data
param_prev_step.requires_grad = False
def reset_parameters(self) -> None:
nn.init.constant_(self.window_location, 0)
| modulus-sym-main | modulus/sym/models/moving_time_window.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Tuple, Optional, Union
import torch
import torch.nn as nn
from torch import Tensor
import modulus.sym.models.layers as layers
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
from modulus.sym.constants import NO_OP_NORM
class SirenArch(Arch):
"""Sinusoidal Representation Network (SIREN).
Parameters
----------
input_keys : List[Key]
Input key list.
output_keys : List[Key]
Output key list.
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
layer_size : int, optional
Layer size for every hidden layer of the model, by default 512
nr_layers : int, optional
Number of hidden layers of the model, by default 6
first_omega : float, optional
Scales first weight matrix by this factor, by default 30
omega : float, optional
Scales the weight matrix of all hidden layers by this factor, by default 30
normalization : Dict[str, Tuple[float, float]], optional
Normalization of input to network, by default None
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size]`
- Output variable tensor shape: :math:`[N, size]`
Example
-------
Siren model (2 -> 64 -> 64 -> 2)
>>> arch = .siren.SirenArch(
>>> [Key("x", size=2)],
>>> [Key("y", size=2)],
>>> layer_size = 64,
>>> nr_layers = 2)
>>> model = arch.make_node()
>>> input = {"x": torch.randn(64, 2)}
>>> output = model.evaluate(input)
Note
----
Reference: Sitzmann, Vincent, et al.
Implicit Neural Representations with Periodic Activation Functions.
https://arxiv.org/abs/2006.09661.
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
detach_keys: List[Key] = [],
layer_size: int = 512,
nr_layers: int = 6,
first_omega: float = 30.0,
omega: float = 30.0,
normalization: Dict[str, Tuple[float, float]] = None,
) -> None:
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
in_features = sum(self.input_key_dict.values())
out_features = sum(self.output_key_dict.values())
layers_list = []
layers_list.append(
layers.SirenLayer(
in_features,
layer_size,
layers.SirenLayerType.FIRST,
first_omega,
)
)
for _ in range(nr_layers - 1):
layers_list.append(
layers.SirenLayer(
layer_size, layer_size, layers.SirenLayerType.HIDDEN, omega
)
)
layers_list.append(
layers.SirenLayer(
layer_size, out_features, layers.SirenLayerType.LAST, omega
)
)
self.layers = nn.Sequential(*layers_list)
self.normalization: Optional[Dict[str, Tuple[float, float]]] = normalization
# iterate input keys and add NO_OP_NORM if it is not specified
if self.normalization is not None:
for key in self.input_key_dict:
if key not in self.normalization:
self.normalization[key] = NO_OP_NORM
self.register_buffer(
"normalization_tensor",
self._get_normalization_tensor(self.input_key_dict, self.normalization),
persistent=False,
)
def _tensor_forward(self, x: Tensor) -> Tensor:
x = self._tensor_normalize(x, self.normalization_tensor)
x = self.process_input(
x, self.input_scales_tensor, input_dict=self.input_key_dict, dim=-1
)
x = self.layers(x)
x = self.process_output(x, self.output_scales_tensor)
return x
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.concat_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
)
y = self._tensor_forward(x)
return self.split_output(y, self.output_key_dict, dim=-1)
def _dict_forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
This is the original forward function, left here for the correctness test.
"""
x = self.prepare_input(
self._normalize(in_vars, self.normalization),
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=-1,
input_scales=self.input_scales,
)
x = self.layers(x)
return self.prepare_output(
x, self.output_key_dict, dim=-1, output_scales=self.output_scales
)
def _normalize(
self,
in_vars: Dict[str, Tensor],
norms: Optional[Dict[str, Tuple[float, float]]],
) -> Dict[str, Tensor]:
if norms is None:
return in_vars
normalized_in_vars = {}
for k, v in in_vars.items():
if k in norms:
v = (v - norms[k][0]) / (norms[k][1] - norms[k][0])
v = 2 * v - 1
normalized_in_vars[k] = v
return normalized_in_vars
| modulus-sym-main | modulus/sym/models/siren.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Union, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
import numpy as np
import logging
import modulus.sym.models.layers as layers
from modulus.sym.models.layers import Activation
from modulus.sym.models.layers.spectral_layers import (
calc_latent_derivatives,
first_order_pino_grads,
second_order_pino_grads,
)
from modulus.sym.models.arch import Arch
from modulus.sym.models.fully_connected import ConvFullyConnectedArch
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.constants import JIT_PYTORCH_VERSION
logger = logging.getLogger(__name__)
class FNO1DEncoder(nn.Module):
def __init__(
self,
in_channels: int = 1,
nr_fno_layers: int = 4,
fno_layer_size: int = 32,
fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: Activation = Activation.GELU,
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.nr_fno_layers = nr_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(fno_modes, int):
fno_modes = [fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 1
self.activation_fn = layers.get_activation_fn(activation_fn)
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift layer
self.lift_layer = layers.Conv1dFCLayer(self.in_channels, self.fno_width)
# Build Neural Fourier Operators
for _ in range(self.nr_fno_layers):
self.spconv_layers.append(
layers.SpectralConv1d(self.fno_width, self.fno_width, fno_modes[0])
)
self.conv_layers.append(nn.Conv1d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding]
self.pad = padding[:1]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_layer(x)
# (left, right)
x = F.pad(x, (0, self.pad[0]), mode=self.padding_type)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(
conv(x) + w(x)
) # Spectral Conv + GELU causes JIT issue!
else:
x = conv(x) + w(x)
x = x[..., : self.ipad[0]]
return x
def meshgrid(self, shape: List[int], device: torch.device):
bsize, size_x = shape[0], shape[2]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1)
return grid_x
class FNO2DEncoder(nn.Module):
def __init__(
self,
in_channels: int = 1,
nr_fno_layers: int = 4,
fno_layer_size: int = 32,
fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: Activation = Activation.GELU,
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.nr_fno_layers = nr_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(fno_modes, int):
fno_modes = [fno_modes, fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 2
self.activation_fn = layers.get_activation_fn(activation_fn)
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift layer
self.lift_layer = layers.Conv2dFCLayer(self.in_channels, self.fno_width)
# Build Neural Fourier Operators
for _ in range(self.nr_fno_layers):
self.spconv_layers.append(
layers.SpectralConv2d(
self.fno_width, self.fno_width, fno_modes[0], fno_modes[1]
)
)
self.conv_layers.append(nn.Conv2d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding, padding]
padding = padding + [0, 0] # Pad with zeros for smaller lists
self.pad = padding[:2]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
assert (
x.dim() == 4
), "Only 4D tensors [batch, in_channels, grid_x, grid_y] accepted for 2D FNO"
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_layer(x)
# (left, right, top, bottom)
x = F.pad(x, (0, self.pad[0], 0, self.pad[1]), mode=self.padding_type)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(
conv(x) + w(x)
) # Spectral Conv + GELU causes JIT issue!
else:
x = conv(x) + w(x)
# remove padding
x = x[..., : self.ipad[1], : self.ipad[0]]
return x
def meshgrid(self, shape: List[int], device: torch.device):
bsize, size_x, size_y = shape[0], shape[2], shape[3]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_y = torch.linspace(0, 1, size_y, dtype=torch.float32, device=device)
grid_x, grid_y = torch.meshgrid(grid_x, grid_y, indexing="ij")
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1)
grid_y = grid_y.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1)
return torch.cat((grid_x, grid_y), dim=1)
class FNO3DEncoder(nn.Module):
def __init__(
self,
in_channels: int = 1,
nr_fno_layers: int = 4,
fno_layer_size: int = 32,
fno_modes: Union[int, List[int]] = 16,
padding: Union[int, List[int]] = 8,
padding_type: str = "constant",
activation_fn: Activation = Activation.GELU,
coord_features: bool = True,
) -> None:
super().__init__()
self.in_channels = in_channels
self.nr_fno_layers = nr_fno_layers
self.fno_width = fno_layer_size
self.coord_features = coord_features
# Spectral modes to have weights
if isinstance(fno_modes, int):
fno_modes = [fno_modes, fno_modes, fno_modes]
# Add relative coordinate feature
if self.coord_features:
self.in_channels = self.in_channels + 3
self.activation_fn = layers.get_activation_fn(activation_fn)
self.spconv_layers = nn.ModuleList()
self.conv_layers = nn.ModuleList()
# Initial lift layer
self.lift_layer = layers.Conv3dFCLayer(self.in_channels, self.fno_width)
# Build Neural Fourier Operators
for _ in range(self.nr_fno_layers):
self.spconv_layers.append(
layers.SpectralConv3d(
self.fno_width,
self.fno_width,
fno_modes[0],
fno_modes[1],
fno_modes[2],
)
)
self.conv_layers.append(nn.Conv3d(self.fno_width, self.fno_width, 1))
# Padding values for spectral conv
if isinstance(padding, int):
padding = [padding, padding, padding]
padding = padding + [0, 0, 0] # Pad with zeros for smaller lists
self.pad = padding[:3]
self.ipad = [-pad if pad > 0 else None for pad in self.pad]
self.padding_type = padding_type
def forward(self, x: Tensor) -> Tensor:
if self.coord_features:
coord_feat = self.meshgrid(list(x.shape), x.device)
x = torch.cat((x, coord_feat), dim=1)
x = self.lift_layer(x)
# (left, right, top, bottom, front, back)
x = F.pad(
x,
(0, self.pad[0], 0, self.pad[1], 0, self.pad[2]),
mode=self.padding_type,
)
# Spectral layers
for k, conv_w in enumerate(zip(self.conv_layers, self.spconv_layers)):
conv, w = conv_w
if k < len(self.conv_layers) - 1:
x = self.activation_fn(
conv(x) + w(x)
) # Spectral Conv + GELU causes JIT issue!
else:
x = conv(x) + w(x)
x = x[..., : self.ipad[2], : self.ipad[1], : self.ipad[0]]
return x
def meshgrid(self, shape: List[int], device: torch.device):
bsize, size_x, size_y, size_z = shape[0], shape[2], shape[3], shape[4]
grid_x = torch.linspace(0, 1, size_x, dtype=torch.float32, device=device)
grid_y = torch.linspace(0, 1, size_y, dtype=torch.float32, device=device)
grid_z = torch.linspace(0, 1, size_z, dtype=torch.float32, device=device)
grid_x, grid_y, grid_z = torch.meshgrid(grid_x, grid_y, grid_z, indexing="ij")
grid_x = grid_x.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
grid_y = grid_y.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
grid_z = grid_z.unsqueeze(0).unsqueeze(0).repeat(bsize, 1, 1, 1, 1)
return torch.cat((grid_x, grid_y, grid_z), dim=1)
def grid_to_points1d(vars_dict: Dict[str, Tensor]):
for var, value in vars_dict.items():
value = torch.permute(value, (0, 2, 1))
vars_dict[var] = value.reshape(-1, value.size(-1))
return vars_dict
def points_to_grid1d(vars_dict: Dict[str, Tensor], shape: List[int]):
for var, value in vars_dict.items():
value = value.reshape(shape[0], shape[2], value.size(-1))
vars_dict[var] = torch.permute(value, (0, 2, 1))
return vars_dict
def grid_to_points2d(vars_dict: Dict[str, Tensor]):
for var, value in vars_dict.items():
value = torch.permute(value, (0, 2, 3, 1))
vars_dict[var] = value.reshape(-1, value.size(-1))
return vars_dict
def points_to_grid2d(vars_dict: Dict[str, Tensor], shape: List[int]):
for var, value in vars_dict.items():
value = value.reshape(shape[0], shape[2], shape[3], value.size(-1))
vars_dict[var] = torch.permute(value, (0, 3, 1, 2))
return vars_dict
def grid_to_points3d(vars_dict: Dict[str, Tensor]):
for var, value in vars_dict.items():
value = torch.permute(value, (0, 2, 3, 4, 1))
vars_dict[var] = value.reshape(-1, value.size(-1))
return vars_dict
def points_to_grid3d(vars_dict: Dict[str, Tensor], shape: List[int]):
for var, value in vars_dict.items():
value = value.reshape(shape[0], shape[2], shape[3], shape[4], value.size(-1))
vars_dict[var] = torch.permute(value, (0, 4, 1, 2, 3))
return vars_dict
class FNOArch(Arch):
"""Fourier neural operator (FNO) model.
Note
----
The FNO architecture supports options for 1D, 2D and 3D fields which can
be controlled using the `dimension` parameter.
Parameters
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
dimension : int
Model dimensionality (supports 1, 2, 3).
decoder_net : Arch
Pointwise decoder network, input key should be the latent variable
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
nr_fno_layers : int, optional
Number of spectral convolution layers, by default 4
fno_modes : Union[int, List[int]], optional
Number of Fourier modes with learnable weights, by default 16
padding : int, optional
Padding size for FFT calculations, by default 8
padding_type : str, optional
Padding type for FFT calculations ('constant', 'reflect', 'replicate'
or 'circular'), by default "constant"
activation_fn : Activation, optional
Activation function, by default Activation.GELU
coord_features : bool, optional
Use coordinate meshgrid as additional input feature, by default True
Variable Shape
--------------
Input variable tensor shape:
- 1D: :math:`[N, size, W]`
- 2D: :math:`[N, size, H, W]`
- 3D: :math:`[N, size, D, H, W]`
Output variable tensor shape:
- 1D: :math:`[N, size, W]`
- 2D: :math:`[N, size, H, W]`
- 3D: :math:`[N, size, D, H, W]`
Example
-------
1D FNO model
>>> decoder = FullyConnectedArch([Key("z", size=32)], [Key("y", size=2)])
>>> fno_1d = FNOArch([Key("x", size=2)], dimension=1, decoder_net=decoder)
>>> model = fno_1d.make_node()
>>> input = {"x": torch.randn(20, 2, 64)}
>>> output = model.evaluate(input)
2D FNO model
>>> decoder = ConvFullyConnectedArch([Key("z", size=32)], [Key("y", size=2)])
>>> fno_2d = FNOArch([Key("x", size=2)], dimension=2, decoder_net=decoder)
>>> model = fno_2d.make_node()
>>> input = {"x": torch.randn(20, 2, 64, 64)}
>>> output = model.evaluate(input)
3D FNO model
>>> decoder = Siren([Key("z", size=32)], [Key("y", size=2)])
>>> fno_3d = FNOArch([Key("x", size=2)], dimension=3, decoder_net=decoder)
>>> model = fno_3d.make_node()
>>> input = {"x": torch.randn(20, 2, 64, 64, 64)}
>>> output = model.evaluate(input)
"""
def __init__(
self,
input_keys: List[Key],
dimension: int,
decoder_net: Arch,
detach_keys: List[Key] = [],
nr_fno_layers: int = 4,
fno_modes: Union[int, List[int]] = 16,
padding: int = 8,
padding_type: str = "constant",
activation_fn: Activation = Activation.GELU,
coord_features: bool = True,
) -> None:
super().__init__(input_keys=input_keys, output_keys=[], detach_keys=detach_keys)
self.dimension = dimension
self.nr_fno_layers = nr_fno_layers
self.fno_modes = fno_modes
self.padding = padding
self.padding_type = padding_type
self.activation_fn = activation_fn
self.coord_features = coord_features
# decoder net
self.decoder_net = decoder_net
self.calc_pino_gradients = False
self.output_keys = self.decoder_net.output_keys
self.output_key_dict = {str(var): var.size for var in self.output_keys}
self.output_scales = {str(k): k.scale for k in self.output_keys}
self.latent_key = self.decoder_net.input_keys
self.latent_key_dict = {str(var): var.size for var in self.latent_key}
assert (
len(self.latent_key) == 1
), "FNO decoder network should only have a single input key"
self.latent_key = str(self.latent_key[0])
in_channels = sum(self.input_key_dict.values())
self.fno_layer_size = sum(self.latent_key_dict.values())
if self.dimension == 1:
FNOModel = FNO1DEncoder
self.grid_to_points = grid_to_points1d # For JIT
self.points_to_grid = points_to_grid1d # For JIT
elif self.dimension == 2:
FNOModel = FNO2DEncoder
self.grid_to_points = grid_to_points2d # For JIT
self.points_to_grid = points_to_grid2d # For JIT
elif self.dimension == 3:
FNOModel = FNO3DEncoder
self.grid_to_points = grid_to_points3d # For JIT
self.points_to_grid = points_to_grid3d # For JIT
else:
raise NotImplementedError(
"Invalid dimensionality. Only 1D, 2D and 3D FNO implemented"
)
self.spec_encoder = FNOModel(
in_channels,
nr_fno_layers=self.nr_fno_layers,
fno_layer_size=self.fno_layer_size,
fno_modes=self.fno_modes,
padding=self.padding,
padding_type=self.padding_type,
activation_fn=self.activation_fn,
coord_features=self.coord_features,
)
def add_pino_gradients(
self, derivatives: List[Key], domain_length: List[float] = [1.0, 1.0]
) -> None:
"""Adds PINO "exact" gradient calculations model outputs.
Note
----
This will constraint the FNO decoder to a two layer fully-connected model with
Tanh activactions functions. This is done for computational efficiency since
gradients calculations are explicit. Auto-diff is far too slow for this method.
Parameters
----------
derivatives : List[Key]
List of derivative keys
domain_length : List[float], optional
Domain size of input grid. Needed for calculating the gradients of the latent
variables. By default [1.0, 1.0]
Raises
------
ValueError
If domain length list is not the same size as the FNO model dimenion
Note
----
For details on the "exact" gradient calculation refer to section 3.3 in:
https://arxiv.org/pdf/2111.03794.pdf
"""
assert (
len(domain_length) == self.dimension
), "Domain length must be same length as the dimension of the model"
self.domain_length = domain_length
logger.warning(
"Switching decoder to two layer FC model with Tanh activations for PINO"
)
self.decoder_net = ConvFullyConnectedArch(
input_keys=self.decoder_net.input_keys,
output_keys=self.decoder_net.output_keys,
layer_size=self.fno_layer_size,
nr_layers=1,
activation_fn=Activation.TANH,
skip_connections=False,
adaptive_activations=False,
)
self.calc_pino_gradients = True
self.first_order_pino = False
self.second_order_pino = False
self.derivative_keys = []
for var in derivatives:
dx_name = str(var).split("__") # Split name to get original var names
if len(dx_name) == 2: # First order
assert (
dx_name[1] in ["x", "y", "z"][: self.dimension]
), f"Invalid first-order derivative {str(var)} for {self.dimension}d FNO"
self.derivative_keys.append(var)
self.first_order_pino = True
elif len(dx_name) == 3:
assert (
dx_name[1] in ["x", "y", "z"][: self.dimension]
and dx_name[1] == dx_name[2]
), f"Invalid second-order derivative {str(var)} for {self.dimension}d FNO"
self.derivative_keys.append(var)
self.second_order_pino = True
elif len(dx_name) > 3:
raise ValueError(
"FNO only supports first order and laplacian second order derivatives"
)
# Add derivative keys into output keys
self.output_keys_fno = self.output_keys.copy()
self.output_key_fno_dict = {str(var): var.size for var in self.output_keys_fno}
self.output_keys = self.output_keys + self.derivative_keys
self.output_key_dict = {str(var): var.size for var in self.output_keys}
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
y_latent = self.spec_encoder(x)
y_shape = list(y_latent.size())
y_input = {self.latent_key: y_latent}
# Reshape to pointwise inputs if not a conv FC model
if self.decoder_net.var_dim == -1:
y_input = self.grid_to_points(y_input)
y = self.decoder_net(y_input)
# Convert back into grid
if self.decoder_net.var_dim == -1:
y = self.points_to_grid(y, y_shape)
if self.calc_pino_gradients:
output_grads = self.calc_pino_derivatives(y_latent)
y.update(output_grads)
return y
@torch.jit.ignore
def calc_pino_derivatives(self, latent: Tensor) -> Dict[str, Tensor]:
# Calculate the gradients of latent variables
# This is done using FFT and is the reason we need a domain size
lat_dx, lat_ddx = calc_latent_derivatives(latent, self.domain_length)
# Get weight matrices from decoder
weights, biases = self.decoder_net._impl.get_weight_list()
outputs = {}
# calc first order derivatives
if self.first_order_pino:
output_dx = first_order_pino_grads(
u=latent,
ux=lat_dx,
weights_1=weights[0],
weights_2=weights[1],
bias_1=biases[0],
)
# Build output dictionary manually (would normally use prepare_output)
dims = ["x", "y", "z"]
for d in range(len(output_dx)): # Loop through dimensions
for k, v in zip(
self.output_keys_fno,
torch.split(
output_dx[d], list(self.output_key_fno_dict.values()), dim=1
),
): # Loop through variables
if f"{k}__{dims[d]}__{dims[d]}" in self.output_key_dict.keys():
out_scale = self.decoder_net.output_scales[str(k)][
1
] # Apply out scaling to grads
outputs[f"{k}__{dims[d]}"] = v * out_scale
# calc first order derivatives
if self.second_order_pino:
output_dxx = second_order_pino_grads(
u=latent,
ux=lat_dx,
uxx=lat_ddx,
weights_1=weights[0],
weights_2=weights[1],
bias_1=biases[0],
)
# Build output dictionary manually (would normally use prepare_output)
dims = ["x", "y", "z"]
for d in range(len(output_dxx)): # Loop through dimensions
for k, v in zip(
self.output_keys_fno,
torch.split(
output_dxx[d], list(self.output_key_fno_dict.values()), dim=1
),
): # Loop through variables
if f"{k}__{dims[d]}__{dims[d]}" in self.output_key_dict.keys():
out_scale = self.decoder_net.output_scales[str(k)][
1
] # Apply out scaling to grads
outputs[f"{k}__{dims[d]}__{dims[d]}"] = v * out_scale
return outputs
| modulus-sym-main | modulus/sym/models/fno.py |
# ignore_header_test
""""""
"""
Pix2Pix model. This code was modified from, https://github.com/NVIDIA/pix2pixHD
The following license is provided from their source,
Copyright (C) 2019 NVIDIA Corporation. Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu.
BSD License. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
--------------------------- LICENSE FOR pytorch-CycleGAN-and-pix2pix ----------------
Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import torch.nn as nn
import functools
from typing import List, Dict
from torch.autograd import Variable
import numpy as np
from modulus.sym.key import Key
import modulus.sym.models.layers as layers
from modulus.sym.models.layers import Activation
from modulus.sym.models.arch import Arch
Tensor = torch.Tensor
class Pix2PixModelCore(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
dimension: int,
conv_layer_size: int = 64,
n_downsampling: int = 3,
n_upsampling: int = 3,
n_blocks: int = 3,
batch_norm: bool = False,
padding_type: str = "reflect",
activation_fn: Activation = Activation.RELU,
):
assert (
n_blocks >= 0 and n_downsampling >= 0 and n_upsampling >= 0
), "Invalid arch params"
assert padding_type in ["reflect", "zero", "replicate"], "Invalid padding type"
super().__init__()
activation = layers.get_activation_fn(activation_fn, module=True, inplace=True)
# set padding and convolutions
if dimension == 1:
padding = nn.ReflectionPad1d(3)
conv = nn.Conv1d
trans_conv = nn.ConvTranspose1d
norm = nn.BatchNorm1d
elif dimension == 2:
padding = nn.ReflectionPad2d(3)
conv = nn.Conv2d
trans_conv = nn.ConvTranspose2d
norm = nn.BatchNorm2d
elif dimension == 3:
padding = nn.ReflectionPad3d(3)
conv = nn.Conv3d
trans_conv = nn.ConvTranspose3d
norm = nn.BatchNorm3d
else:
raise ValueError(
f"Pix2Pix only supported dimensions 1, 2, 3. Got {dimension}"
)
model = [
padding,
conv(in_channels, conv_layer_size, kernel_size=7, padding=0),
]
if batch_norm:
model.append(norm(conv_layer_size))
model.append(activation)
### downsample
for i in range(n_downsampling):
mult = 2**i
model.append(
conv(
conv_layer_size * mult,
conv_layer_size * mult * 2,
kernel_size=3,
stride=2,
padding=1,
)
)
if batch_norm:
model.append(norm(conv_layer_size * mult * 2))
model.append(activation)
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [
ResnetBlock(
dimension,
conv_layer_size * mult,
padding_type=padding_type,
activation=activation,
use_batch_norm=batch_norm,
)
]
### upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model.append(
trans_conv(
int(conv_layer_size * mult),
int(conv_layer_size * mult / 2),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
)
)
if batch_norm:
model.append(norm(int(conv_layer_size * mult / 2)))
model.append(activation)
# super-resolution layers
for i in range(max([0, n_upsampling - n_downsampling])):
model.append(
trans_conv(
int(conv_layer_size),
int(conv_layer_size),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
)
)
if batch_norm:
model.append(norm(conv_layer_size))
model.append(activation)
model += [
padding,
conv(conv_layer_size, out_channels, kernel_size=7, padding=0),
]
self.model = nn.Sequential(*model)
def forward(self, input: Tensor) -> Tensor:
y = self.model(input)
return y
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(
self,
dimension: int,
channels: int,
padding_type: str = "zero",
activation: nn.Module = nn.ReLU(True),
use_batch_norm: bool = False,
use_dropout: bool = False,
):
super().__init__()
if dimension == 1:
conv = nn.Conv1d
if padding_type == "reflect":
padding = nn.ReflectionPad1d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad1d(1)
elif padding_type == "zero":
padding = 1
norm = nn.BatchNorm1d
elif dimension == 2:
conv = nn.Conv2d
if padding_type == "reflect":
padding = nn.ReflectionPad2d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad2d(1)
elif padding_type == "zero":
padding = 1
norm = nn.BatchNorm2d
elif dimension == 3:
conv = nn.Conv3d
if padding_type == "reflect":
padding = nn.ReflectionPad3d(1)
elif padding_type == "replicate":
padding = nn.ReplicationPad3d(1)
elif padding_type == "zero":
padding = 1
norm = nn.BatchNorm3d
conv_block = []
p = 0
if padding_type != "zero":
conv_block += [padding]
conv_block.append(conv(channels, channels, kernel_size=3, padding=p))
if use_batch_norm:
conv_block.append(norm(channels))
conv_block.append(activation)
if use_dropout:
conv_block += [nn.Dropout(0.5)]
if padding_type != "zero":
conv_block += [padding]
conv_block += [
conv(channels, channels, kernel_size=3, padding=p),
]
if use_batch_norm:
conv_block.append(norm(channels))
self.conv_block = nn.Sequential(*conv_block)
def forward(self, x: Tensor) -> Tensor:
out = x + self.conv_block(x)
return out
class Pix2PixArch(Arch):
"""Convolutional encoder-decoder based on pix2pix generator models.
Note
----
The pix2pix architecture supports options for 1D, 2D and 3D fields which can
be constroled using the `dimension` parameter.
Parameters
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
output_keys : List[Key]
Output key list. The key dimension size should equal the variables channel dim.
dimension : int
Model dimensionality (supports 1, 2, 3).
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
conv_layer_size : int, optional
Latent channel size after first convolution, by default 64
n_downsampling : int, optional
Number of downsampling/upsampling blocks, by default 3
n_blocks : int, optional
Number of residual blocks in middle of model, by default 3
scaling_factor : int, optional
Scaling factor to increase the output feature size compared to the input
(1, 2, 4, or 8), by default 1
activation_fn : Activation, optional
Activation function, by default :obj:`Activation.RELU`
batch_norm : bool, optional
Batch normalization, by default False
padding_type : str, optional
Padding type ('constant', 'reflect', 'replicate' or 'circular'),
by default "reflect"
Variable Shape
--------------
Input variable tensor shape:
- 1D: :math:`[N, size, W]`
- 2D: :math:`[N, size, H, W]`
- 3D: :math:`[N, size, D, H, W]`
Output variable tensor shape:
- 1D: :math:`[N, size, W]`
- 2D: :math:`[N, size, H, W]`
- 3D: :math:`[N, size, D, H, W]`
Note
----
Reference: Isola, Phillip, et al. “Image-To-Image translation with conditional
adversarial networks” Conference on Computer Vision and Pattern Recognition, 2017.
https://arxiv.org/abs/1611.07004
Reference: Wang, Ting-Chun, et al. “High-Resolution image synthesis and semantic
manipulation with conditional GANs” Conference on Computer Vision and Pattern
Recognition, 2018. https://arxiv.org/abs/1711.11585
Note
----
Based on the implementation: https://github.com/NVIDIA/pix2pixHD
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
dimension: int,
detach_keys: List[Key] = [],
conv_layer_size: int = 64,
n_downsampling: int = 3,
n_blocks: int = 3,
scaling_factor: int = 1,
activation_fn: Activation = Activation.RELU,
batch_norm: bool = False,
padding_type="reflect",
):
super().__init__(
input_keys=input_keys, output_keys=output_keys, detach_keys=detach_keys
)
in_channels = sum(self.input_key_dict.values())
out_channels = sum(self.output_key_dict.values())
self.var_dim = 1
# Scaling factor must be 1, 2, 4, or 8
scaling_factor = int(scaling_factor)
assert scaling_factor in {
1,
2,
4,
8,
}, "The scaling factor must be 1, 2, 4, or 8!"
n_upsampling = n_downsampling + int(np.log2(scaling_factor))
self._impl = Pix2PixModelCore(
in_channels,
out_channels,
dimension,
conv_layer_size,
n_downsampling,
n_upsampling,
n_blocks,
batch_norm,
padding_type,
activation_fn,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
input = self.prepare_input(
in_vars,
self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
output = self._impl(input)
return self.prepare_output(
output, self.output_key_dict, dim=1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/pix2pix.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from typing import Optional
from typing import Union
import torch
import torch.nn as nn
from torch import Tensor
from .weight_norm import WeightNormLinear
from .activation import Activation, get_activation_fn
class DGMLayer(nn.Module):
def __init__(
self,
in_features_1: int,
in_features_2: int,
out_features: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
weight_norm: bool = False,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(activation_fn)
self.weight_norm = weight_norm
self.activation_par = activation_par
if weight_norm:
self.linear_1 = WeightNormLinear(in_features_1, out_features, bias=False)
self.linear_2 = WeightNormLinear(in_features_2, out_features, bias=False)
else:
self.linear_1 = nn.Linear(in_features_1, out_features, bias=False)
self.linear_2 = nn.Linear(in_features_2, out_features, bias=False)
self.bias = nn.Parameter(torch.empty(out_features))
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.linear_1.weight)
nn.init.xavier_uniform_(self.linear_2.weight)
nn.init.constant_(self.bias, 0)
if self.weight_norm:
nn.init.constant_(self.linear_1.weight_g, 1.0)
nn.init.constant_(self.linear_2.weight_g, 1.0)
def forward(self, input_1: Tensor, input_2: Tensor) -> Tensor:
x = self.linear_1(input_1) + self.linear_2(input_2) + self.bias
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
| modulus-sym-main | modulus/sym/models/layers/dgm_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class SpectralConv1d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, modes1: int):
super().__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, 2)
)
self.reset_parameters()
# Complex multiplication
def compl_mul1d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
cweights = torch.view_as_complex(weights)
return torch.einsum("bix,iox->box", input, cweights)
def forward(self, x: Tensor) -> Tensor:
bsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
bsize,
self.out_channels,
x.size(-1) // 2 + 1,
device=x.device,
dtype=torch.cfloat,
)
out_ft[:, :, : self.modes1] = self.compl_mul1d(
x_ft[:, :, : self.modes1],
self.weights1,
)
# Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super().__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.weights2 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.reset_parameters()
# Complex multiplication
def compl_mul2d(self, input: Tensor, weights: Tensor) -> Tensor:
# (batch, in_channel, x, y), (in_channel, out_channel, x, y) -> (batch, out_channel, x, y)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixy,ioxy->boxy", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d(
x_ft[:, :, : self.modes1, : self.modes2],
self.weights1,
)
out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d(
x_ft[:, :, -self.modes1 :, : self.modes2],
self.weights2,
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super().__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights2 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights3 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights4 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.reset_parameters()
# Complex multiplication
def compl_mul3d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
# (batch, in_channel, x, y, z), (in_channel, out_channel, x, y, z) -> (batch, out_channel, x, y, z)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixyz,ioxyz->boxyz", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2
)
out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3
)
out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
self.weights3.data = self.scale * torch.rand(self.weights3.data.shape)
self.weights4.data = self.scale * torch.rand(self.weights4.data.shape)
# ==========================================
# Utils for PINO exact gradients
# ==========================================
def fourier_derivatives(x: Tensor, l: List[float]) -> Tuple[Tensor, Tensor]:
# check that input shape maches domain length
assert len(x.shape) - 2 == len(l), "input shape doesn't match domain dims"
# set pi from numpy
pi = float(np.pi)
# get needed dims
batchsize = x.size(0)
n = x.shape[2:]
dim = len(l)
# get device
device = x.device
# compute fourier transform
x_h = torch.fft.fftn(x, dim=list(range(2, dim + 2)))
# make wavenumbers
k_x = []
for i, nx in enumerate(n):
k_x.append(
torch.cat(
(
torch.arange(start=0, end=nx // 2, step=1, device=device),
torch.arange(start=-nx // 2, end=0, step=1, device=device),
),
0,
).reshape((i + 2) * [1] + [nx] + (dim - i - 1) * [1])
)
# compute laplacian in fourier space
j = torch.complex(
torch.tensor([0.0], device=device), torch.tensor([1.0], device=device)
) # Cuda graphs does not work here
wx_h = [j * k_x_i * x_h * (2 * pi / l[i]) for i, k_x_i in enumerate(k_x)]
wxx_h = [
j * k_x_i * wx_h_i * (2 * pi / l[i])
for i, (wx_h_i, k_x_i) in enumerate(zip(wx_h, k_x))
]
# inverse fourier transform out
wx = torch.cat(
[torch.fft.ifftn(wx_h_i, dim=list(range(2, dim + 2))).real for wx_h_i in wx_h],
dim=1,
)
wxx = torch.cat(
[
torch.fft.ifftn(wxx_h_i, dim=list(range(2, dim + 2))).real
for wxx_h_i in wxx_h
],
dim=1,
)
return (wx, wxx)
@torch.jit.ignore
def calc_latent_derivatives(
x: Tensor, domain_length: List[int] = 2
) -> Tuple[List[Tensor], List[Tensor]]:
dim = len(x.shape) - 2
# Compute derivatives of latent variables via fourier methods
# Padd domain by factor of 2 for non-periodic domains
padd = [(i - 1) // 2 for i in list(x.shape[2:])]
# Scale domain length by padding amount
domain_length = [
domain_length[i] * (2 * padd[i] + x.shape[i + 2]) / x.shape[i + 2]
for i in range(dim)
]
padding = padd + padd
x_p = F.pad(x, padding, mode="replicate")
dx, ddx = fourier_derivatives(x_p, domain_length)
# Trim padded domain
if len(x.shape) == 3:
dx = dx[..., padd[0] : -padd[0]]
ddx = ddx[..., padd[0] : -padd[0]]
dx_list = torch.split(dx, x.shape[1], dim=1)
ddx_list = torch.split(ddx, x.shape[1], dim=1)
elif len(x.shape) == 4:
dx = dx[..., padd[0] : -padd[0], padd[1] : -padd[1]]
ddx = ddx[..., padd[0] : -padd[0], padd[1] : -padd[1]]
dx_list = torch.split(dx, x.shape[1], dim=1)
ddx_list = torch.split(ddx, x.shape[1], dim=1)
else:
dx = dx[..., padd[0] : -padd[0], padd[1] : -padd[1], padd[2] : -padd[2]]
ddx = ddx[..., padd[0] : -padd[0], padd[1] : -padd[1], padd[2] : -padd[2]]
dx_list = torch.split(dx, x.shape[1], dim=1)
ddx_list = torch.split(ddx, x.shape[1], dim=1)
return dx_list, ddx_list
def first_order_pino_grads(
u: Tensor,
ux: List[Tensor],
weights_1: Tensor,
weights_2: Tensor,
bias_1: Tensor,
) -> Tuple[Tensor]:
# dim for einsum
dim = len(u.shape) - 2
dim_str = "xyz"[:dim]
# compute first order derivatives of input
# compute first layer
if dim == 1:
u_hidden = F.conv1d(u, weights_1, bias_1)
elif dim == 2:
weights_1 = weights_1.unsqueeze(-1)
weights_2 = weights_2.unsqueeze(-1)
u_hidden = F.conv2d(u, weights_1, bias_1)
elif dim == 3:
weights_1 = weights_1.unsqueeze(-1).unsqueeze(-1)
weights_2 = weights_2.unsqueeze(-1).unsqueeze(-1)
u_hidden = F.conv3d(u, weights_1, bias_1)
# compute derivative hidden layer
diff_tanh = 1 / torch.cosh(u_hidden) ** 2
# compute diff(f(g))
diff_fg = torch.einsum(
"mi" + dim_str + ",bm" + dim_str + ",km" + dim_str + "->bi" + dim_str,
weights_1,
diff_tanh,
weights_2,
)
# compute diff(f(g)) * diff(g)
vx = [
torch.einsum("bi" + dim_str + ",bi" + dim_str + "->b" + dim_str, diff_fg, w)
for w in ux
]
vx = [torch.unsqueeze(w, dim=1) for w in vx]
return vx
def second_order_pino_grads(
u: Tensor,
ux: Tensor,
uxx: Tensor,
weights_1: Tensor,
weights_2: Tensor,
bias_1: Tensor,
) -> Tuple[Tensor]:
# dim for einsum
dim = len(u.shape) - 2
dim_str = "xyz"[:dim]
# compute first order derivatives of input
# compute first layer
if dim == 1:
u_hidden = F.conv1d(u, weights_1, bias_1)
elif dim == 2:
weights_1 = weights_1.unsqueeze(-1)
weights_2 = weights_2.unsqueeze(-1)
u_hidden = F.conv2d(u, weights_1, bias_1)
elif dim == 3:
weights_1 = weights_1.unsqueeze(-1).unsqueeze(-1)
weights_2 = weights_2.unsqueeze(-1).unsqueeze(-1)
u_hidden = F.conv3d(u, weights_1, bias_1)
# compute derivative hidden layer
diff_tanh = 1 / torch.cosh(u_hidden) ** 2
# compute diff(f(g))
diff_fg = torch.einsum(
"mi" + dim_str + ",bm" + dim_str + ",km" + dim_str + "->bi" + dim_str,
weights_1,
diff_tanh,
weights_2,
)
# compute diagonal of hessian
# double derivative of hidden layer
diff_diff_tanh = -2 * diff_tanh * torch.tanh(u_hidden)
# compute diff(g) * hessian(f) * diff(g)
vxx1 = [
torch.einsum(
"bi"
+ dim_str
+ ",mi"
+ dim_str
+ ",bm"
+ dim_str
+ ",mj"
+ dim_str
+ ",bj"
+ dim_str
+ "->b"
+ dim_str,
w,
weights_1,
weights_2 * diff_diff_tanh,
weights_1,
w,
)
for w in ux
] # (b,x,y,t)
# compute diff(f) * hessian(g)
vxx2 = [
torch.einsum("bi" + dim_str + ",bi" + dim_str + "->b" + dim_str, diff_fg, w)
for w in uxx
]
vxx = [torch.unsqueeze(a + b, dim=1) for a, b in zip(vxx1, vxx2)]
return vxx
# @torch.jit.ignore
# def calc_derivatives(
# self,
# x: Tensor, # Latent variables
# y: Dict[str, Tensor], # Output vars
# x_list: List[Tensor],
# dx_list: List[Tensor],
# ddx_list: List[Tensor],
# dim: int = 2,
# ) -> Dict[str, Tensor]:
# # Loop through output variables independently
# y_out: Dict[str, Tensor] = {}
# for key in self.output_key_dict.keys():
# # First-order grads with back-prop
# outputs: List[torch.Tensor] = [y[key]]
# inputs: List[torch.Tensor] = [x]
# grad_outputs: List[Optional[torch.Tensor]] = [
# torch.ones_like(y[key], device=y[key].device)
# ]
# dydzeta = torch.autograd.grad(
# outputs,
# inputs,
# grad_outputs=grad_outputs,
# create_graph=True,
# retain_graph=True,
# )[0]
# for i, axis in enumerate(["x", "y", "z"]):
# if f"{key}__{axis}" in self.derivative_key_dict:
# # Chain rule: g'(x)*f'(g(x))
# y_out[f"{key}__{axis}"] = torch.sum(
# dx_list[i] * dydzeta, dim=1, keepdim=True
# )
# # Calc second order if needed
# if self.calc_ddx:
# y_ddx = self.calc_second_order_derivatives(
# x, key, x_list, dx_list, ddx_list, dydzeta, dim
# )
# y_out.update(y_ddx)
# return y_out
# @torch.jit.ignore
# def calc_second_order_derivatives(
# self,
# x: Tensor,
# key: str,
# x_list: List[Tensor],
# dx_list: List[Tensor],
# ddx_list: List[Tensor],
# dydzeta: Tensor,
# dim: int = 2,
# ) -> Dict[str, Tensor]:
# # Brute force Hessian calc with auto-diff
# hessian = torch.zeros(
# dydzeta.shape[0],
# dydzeta.shape[1],
# dydzeta.shape[1],
# dydzeta.shape[2],
# dydzeta.shape[3],
# ).to(x.device)
# grad_outputs: List[Optional[torch.Tensor]] = [
# torch.ones_like(dydzeta[:, :1], device=dydzeta.device)
# ]
# for i in range(dydzeta.shape[1]):
# for j in range(i, dydzeta.shape[1]):
# dyydzeta = torch.autograd.grad(
# dydzeta[:, j : j + 1],
# x_list[i],
# grad_outputs=grad_outputs,
# retain_graph=True,
# allow_unused=True,
# )[0]
# if dyydzeta is not None:
# hessian[:, i, j] = dyydzeta.squeeze(1)
# hessian[:, j, i] = dyydzeta.squeeze(1)
# # Loop through output variables independently
# y_out: Dict[str, Tensor] = {}
# # Add needed derivatives
# for i, axis in enumerate(["x", "y", "z"]):
# if f"{key}__{axis}__{axis}" in self.derivative_key_dict:
# dim_str = "ijk"[:dim]
# # Chain rule: g''(x)*f'(g(x)) + g'(x)*f''(g(x))*g'(x)
# y_out[f"{key}__{axis}__{axis}"] = torch.sum(
# ddx_list[i] * dydzeta, dim=1, keepdim=True
# ) + torch.einsum(
# f"bm{dim_str},bmn{dim_str},bn{dim_str}->b{dim_str}",
# dx_list[i],
# hessian,
# dx_list[i],
# ).unsqueeze(
# 1
# )
# return y_out
| modulus-sym-main | modulus/sym/models/layers/spectral_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .activation import Activation, get_activation_fn
from .dgm_layers import DGMLayer
from .fourier_layers import FourierLayer, FourierFilter, GaborFilter
from .fully_connected_layers import FCLayer, Conv1dFCLayer, Conv2dFCLayer, Conv3dFCLayer
from .siren_layers import SirenLayer, SirenLayerType
from .spectral_layers import SpectralConv1d, SpectralConv2d, SpectralConv3d
from .weight_norm import WeightNormLinear
| modulus-sym-main | modulus/sym/models/layers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class WeightNormLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
self.weight_g = nn.Parameter(torch.empty((out_features, 1)))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.weight)
nn.init.constant_(self.weight_g, 1.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, input: Tensor) -> Tensor:
norm = self.weight.norm(dim=1, p=2, keepdim=True)
weight = self.weight_g * self.weight / norm
return F.linear(input, weight, self.bias)
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
| modulus-sym-main | modulus/sym/models/layers/weight_norm.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Callable
from typing import Optional
from typing import Union
import torch.nn as nn
from torch import Tensor
from .weight_norm import WeightNormLinear
from .activation import Activation, get_activation_fn
logger = logging.getLogger(__name__)
class FCLayer(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
weight_norm: bool = False,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(
activation_fn, out_features=out_features
)
self.weight_norm = weight_norm
self.activation_par = activation_par
if weight_norm:
self.linear = WeightNormLinear(in_features, out_features, bias=True)
else:
self.linear = nn.Linear(in_features, out_features, bias=True)
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def reset_parameters(self) -> None:
nn.init.constant_(self.linear.bias, 0)
nn.init.xavier_uniform_(self.linear.weight)
if self.weight_norm:
nn.init.constant_(self.linear.weight_g, 1.0)
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
# FC like layer for image channels
class ConvFCLayer(nn.Module):
def __init__(
self,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(activation_fn)
self.activation_par = activation_par
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def apply_activation(self, x: Tensor) -> Tensor:
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
class Conv1dFCLayer(ConvFCLayer):
def __init__(
self,
in_features: int,
out_features: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
weight_norm: bool = False,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_features
self.out_channels = out_features
self.conv = nn.Conv1d(in_features, out_features, kernel_size=1, bias=True)
self.reset_parameters()
if weight_norm:
logger.warn("Weight norm not supported for Conv FC layers")
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv2dFCLayer(ConvFCLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
self.conv.bias.requires_grad = False
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv3dFCLayer(ConvFCLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
| modulus-sym-main | modulus/sym/models/layers/fully_connected_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
from typing import Callable
from typing import Union
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from modulus.sym.manager import JitManager, JitArchMode
class ActivationMeta(enum.EnumMeta):
def __getitem__(self, name):
try:
return super().__getitem__(name.upper())
except (KeyError) as error:
raise KeyError(f"Invalid activation function {name}")
class Activation(enum.Enum, metaclass=ActivationMeta):
ELU = enum.auto()
LEAKY_RELU = enum.auto()
MISH = enum.auto()
RELU = enum.auto()
GELU = enum.auto()
SELU = enum.auto()
PRELU = enum.auto()
SIGMOID = enum.auto()
SILU = enum.auto()
SIN = enum.auto()
SQUAREPLUS = enum.auto()
SOFTPLUS = enum.auto()
TANH = enum.auto()
STAN = enum.auto()
IDENTITY = enum.auto()
def identity(x: Tensor) -> Tensor:
return x
def squareplus(x: Tensor) -> Tensor:
b = 4
return 0.5 * (x + torch.sqrt(x * x + b))
def gelu(x: Tensor) -> Tensor:
# Applies GELU approximation, slower than sigmoid but more accurate. See: https://github.com/hendrycks/GELUs
# Standard GELU that is present in PyTorch does not JIT compile!
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
# return 0.5 * x * (1 + torch.tanh(torch.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3))))
class Stan(nn.Module):
"""
Self-scalable Tanh (Stan)
References: Gnanasambandam, Raghav and Shen, Bo and Chung, Jihoon and Yue, Xubo and others.
Self-scalable Tanh (Stan): Faster Convergence and Better Generalization
in Physics-informed Neural Networks. arXiv preprint arXiv:2204.12589, 2022.
"""
def __init__(self, out_features=1):
super().__init__()
self.beta = nn.Parameter(torch.ones(out_features))
def forward(self, x):
if x.shape[-1] != self.beta.shape[-1]:
raise ValueError(
f"The last dimension of the input must be equal to the dimension of Stan parameters. Got inputs: {x.shape}, params: {self.beta.shape}"
)
return torch.tanh(x) * (1.0 + self.beta * x)
def get_activation_fn(
activation: Union[Activation, Callable[[Tensor], Tensor]],
module: bool = False,
**kwargs, # Optional parameters
) -> Callable[[Tensor], Tensor]:
activation_mapping = {
Activation.ELU: F.elu,
Activation.LEAKY_RELU: F.leaky_relu,
Activation.MISH: F.mish,
Activation.RELU: F.relu,
Activation.GELU: F.gelu,
Activation.SELU: F.selu,
Activation.SIGMOID: torch.sigmoid,
Activation.SILU: F.silu,
Activation.SIN: torch.sin,
Activation.SQUAREPLUS: squareplus,
Activation.SOFTPLUS: F.softplus,
Activation.TANH: torch.tanh,
Activation.IDENTITY: identity,
}
# Some activations have parameters in them thus must
# be in a Module before forward call
module_activation_mapping = {
Activation.ELU: nn.ELU,
Activation.LEAKY_RELU: nn.LeakyReLU,
Activation.MISH: nn.Mish,
Activation.RELU: nn.ReLU,
Activation.GELU: nn.GLU,
Activation.SELU: nn.SELU,
Activation.PRELU: nn.PReLU,
Activation.SIGMOID: nn.Sigmoid,
Activation.SILU: nn.SiLU,
Activation.TANH: nn.Tanh,
Activation.STAN: Stan,
}
if activation in activation_mapping and not module:
activation_fn_ = activation_mapping[activation]
# wraps the function because torch.sin and F.gelu could not be scripted directly
def activation_fn(x: Tensor) -> Tensor:
return activation_fn_(x)
elif activation in module_activation_mapping:
activation_fn = module_activation_mapping[activation](**kwargs)
else:
activation_fn = activation
if JitManager().enabled and JitManager().arch_mode == JitArchMode.ONLY_ACTIVATION:
activation_fn = torch.jit.script(activation_fn)
return activation_fn
| modulus-sym-main | modulus/sym/models/layers/activation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
class FourierLayer(nn.Module):
def __init__(
self,
in_features: int,
frequencies,
) -> None:
super().__init__()
# To do: Need more robust way for these params
if isinstance(frequencies[0], str):
if "gaussian" in frequencies[0]:
nr_freq = frequencies[2]
np_f = (
np.random.normal(0, 1, size=(nr_freq, in_features)) * frequencies[1]
)
else:
nr_freq = len(frequencies[1])
np_f = []
if "full" in frequencies[0]:
np_f_i = np.meshgrid(
*[np.array(frequencies[1]) for _ in range(in_features)],
indexing="ij",
)
np_f.append(
np.reshape(
np.stack(np_f_i, axis=-1),
(nr_freq**in_features, in_features),
)
)
if "axis" in frequencies[0]:
np_f_i = np.zeros((nr_freq, in_features, in_features))
for i in range(in_features):
np_f_i[:, i, i] = np.reshape(
np.array(frequencies[1]), (nr_freq)
)
np_f.append(
np.reshape(np_f_i, (nr_freq * in_features, in_features))
)
if "diagonal" in frequencies[0]:
np_f_i = np.reshape(np.array(frequencies[1]), (nr_freq, 1, 1))
np_f_i = np.tile(np_f_i, (1, in_features, in_features))
np_f_i = np.reshape(np_f_i, (nr_freq * in_features, in_features))
np_f.append(np_f_i)
np_f = np.concatenate(np_f, axis=-2)
else:
np_f = frequencies # [nr_freq, in_features]
frequencies = torch.tensor(np_f, dtype=torch.get_default_dtype())
frequencies = frequencies.t().contiguous()
self.register_buffer("frequencies", frequencies)
def out_features(self) -> int:
return int(self.frequencies.size(1) * 2)
def forward(self, x: Tensor) -> Tensor:
x_hat = torch.matmul(x, self.frequencies)
x_sin = torch.sin(2.0 * math.pi * x_hat)
x_cos = torch.cos(2.0 * math.pi * x_hat)
x_i = torch.cat([x_sin, x_cos], dim=-1)
return x_i
class FourierFilter(nn.Module):
def __init__(
self,
in_features: int,
layer_size: int,
nr_layers: int,
input_scale: float,
) -> None:
super().__init__()
self.weight_scale = input_scale / math.sqrt(nr_layers + 1)
self.frequency = nn.Parameter(torch.empty(in_features, layer_size))
# The shape of phase tensor was supposed to be [1, layer_size], but it has issue
# with batched tensor in FuncArch.
# We could just rely on broadcast here.
self.phase = nn.Parameter(torch.empty(layer_size))
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.frequency)
nn.init.uniform_(self.phase, -math.pi, math.pi)
def forward(self, x: Tensor) -> Tensor:
frequency = self.weight_scale * self.frequency
x_i = torch.sin(torch.matmul(x, 2.0 * math.pi * frequency) + self.phase)
return x_i
class GaborFilter(nn.Module):
def __init__(
self,
in_features: int,
layer_size: int,
nr_layers: int,
input_scale: float,
alpha: float,
beta: float,
) -> None:
super().__init__()
self.layer_size = layer_size
self.alpha = alpha
self.beta = beta
self.weight_scale = input_scale / math.sqrt(nr_layers + 1)
self.frequency = nn.Parameter(torch.empty(in_features, layer_size))
self.phase = nn.Parameter(torch.empty(layer_size))
self.mu = nn.Parameter(torch.empty(in_features, layer_size))
self.gamma = nn.Parameter(torch.empty(layer_size))
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.frequency)
nn.init.uniform_(self.phase, -math.pi, math.pi)
nn.init.uniform_(self.mu, -1.0, 1.0)
with torch.no_grad():
self.gamma.copy_(
torch.from_numpy(
np.random.gamma(self.alpha, 1.0 / self.beta, (self.layer_size)),
)
)
def forward(self, x: Tensor) -> Tensor:
frequency = self.weight_scale * (self.frequency * self.gamma.sqrt())
x_c = x.unsqueeze(-1)
x_c = x_c - self.mu
# The norm dim changed from 1 to -2 to be compatible with BatchedTensor
x_c = torch.square(x_c.norm(p=2, dim=-2))
x_c = torch.exp(-0.5 * x_c * self.gamma)
x_i = x_c * torch.sin(torch.matmul(x, 2.0 * math.pi * frequency) + self.phase)
return x_i
| modulus-sym-main | modulus/sym/models/layers/fourier_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
import torch
import torch.nn as nn
from torch import Tensor
class SirenLayerType(enum.Enum):
FIRST = enum.auto()
HIDDEN = enum.auto()
LAST = enum.auto()
class SirenLayer(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
layer_type: SirenLayerType = SirenLayerType.HIDDEN,
omega_0: float = 30.0,
) -> None:
super().__init__()
self.in_features = in_features
self.layer_type = layer_type
self.omega_0 = omega_0
self.linear = nn.Linear(in_features, out_features, bias=True)
self.apply_activation = layer_type in {
SirenLayerType.FIRST,
SirenLayerType.HIDDEN,
}
self.reset_parameters()
def reset_parameters(self) -> None:
weight_ranges = {
SirenLayerType.FIRST: 1.0 / self.in_features,
SirenLayerType.HIDDEN: math.sqrt(6.0 / self.in_features) / self.omega_0,
SirenLayerType.LAST: math.sqrt(6.0 / self.in_features),
}
weight_range = weight_ranges[self.layer_type]
nn.init.uniform_(self.linear.weight, -weight_range, weight_range)
k_sqrt = math.sqrt(1.0 / self.in_features)
nn.init.uniform_(self.linear.bias, -k_sqrt, k_sqrt)
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.apply_activation:
x = torch.sin(self.omega_0 * x)
return x
| modulus-sym-main | modulus/sym/models/layers/siren_layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Dict, List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
from torch import Tensor
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class AFNO2D(nn.Module):
def __init__(
self,
hidden_size,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1,
hidden_size_factor=1,
):
super().__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisble by num_blocks {num_blocks}"
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
self.w1 = nn.Parameter(
self.scale
* torch.randn(
2,
self.num_blocks,
self.block_size,
self.block_size * self.hidden_size_factor,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
2,
self.num_blocks,
self.block_size * self.hidden_size_factor,
self.block_size,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(2, self.num_blocks, self.block_size)
)
def forward(self, x):
bias = x
dtype = x.dtype
x = x.float()
B, H, W, C = x.shape
x = torch.fft.rfft2(x, dim=(1, 2), norm="ortho")
x = x.reshape(B, H, W // 2 + 1, self.num_blocks, self.block_size)
o1_real = torch.zeros(
[
B,
H,
W // 2 + 1,
self.num_blocks,
self.block_size * self.hidden_size_factor,
],
device=x.device,
)
o1_imag = torch.zeros(
[
B,
H,
W // 2 + 1,
self.num_blocks,
self.block_size * self.hidden_size_factor,
],
device=x.device,
)
o2_real = torch.zeros(x.shape, device=x.device)
o2_imag = torch.zeros(x.shape, device=x.device)
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
] = F.relu(
torch.einsum(
"...bi,bio->...bo",
x[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
].real,
self.w1[0],
)
- torch.einsum(
"...bi,bio->...bo",
x[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
].imag,
self.w1[1],
)
+ self.b1[0]
)
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
] = F.relu(
torch.einsum(
"...bi,bio->...bo",
x[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
].imag,
self.w1[0],
)
+ torch.einsum(
"...bi,bio->...bo",
x[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
].real,
self.w1[1],
)
+ self.b1[1]
)
o2_real[:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes] = (
torch.einsum(
"...bi,bio->...bo",
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[0],
)
- torch.einsum(
"...bi,bio->...bo",
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[1],
)
+ self.b2[0]
)
o2_imag[:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes] = (
torch.einsum(
"...bi,bio->...bo",
o1_imag[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[0],
)
+ torch.einsum(
"...bi,bio->...bo",
o1_real[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes
],
self.w2[1],
)
+ self.b2[1]
)
x = torch.stack([o2_real, o2_imag], dim=-1)
x = F.softshrink(x, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
x = x.reshape(B, H, W // 2 + 1, C)
x = torch.fft.irfft2(x, s=(H, W), dim=(1, 2), norm="ortho")
x = x.type(dtype)
return x + bias
class Block(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.0,
drop=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
double_skip=True,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.filter = AFNO2D(
dim, num_blocks, sparsity_threshold, hard_thresholding_fraction
)
# self.drop_path = nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.double_skip = double_skip
def forward(self, x):
residual = x
x = self.norm1(x)
x = self.filter(x)
if self.double_skip:
x = x + residual
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = x + residual
return x
class AFNONet(nn.Module):
def __init__(
self,
img_size=(720, 1440),
patch_size=(16, 16),
in_channels=1,
out_channels=1,
embed_dim=768,
depth=12,
mlp_ratio=4.0,
drop_rate=0.0,
num_blocks=16,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
) -> None:
super().__init__()
assert (
img_size[0] % patch_size[0] == 0 and img_size[1] % patch_size[1] == 0
), f"img_size {img_size} should be divisible by patch_size {patch_size}"
self.in_chans = in_channels
self.out_chans = out_channels
self.img_size = img_size
self.patch_size = patch_size
self.num_features = self.embed_dim = embed_dim
self.num_blocks = num_blocks
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=self.patch_size,
in_chans=self.in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
self.h = img_size[0] // self.patch_size[0]
self.w = img_size[1] // self.patch_size[1]
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
norm_layer=norm_layer,
num_blocks=self.num_blocks,
sparsity_threshold=sparsity_threshold,
hard_thresholding_fraction=hard_thresholding_fraction,
)
for i in range(depth)
]
)
self.head = nn.Linear(
embed_dim,
self.out_chans * self.patch_size[0] * self.patch_size[1],
bias=False,
)
torch.nn.init.trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
x = x.reshape(B, self.h, self.w, self.embed_dim)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x: Tensor) -> Tensor:
x = self.forward_features(x)
x = self.head(x)
# Correct tensor shape back into [B, C, H, W]
# [b h w (p1 p2 c_out)]
out = x.view(list(x.shape[:-1]) + [self.patch_size[0], self.patch_size[1], -1])
# [b h w p1 p2 c_out]
out = torch.permute(out, (0, 5, 1, 3, 2, 4))
# [b c_out, h, p1, w, p2]
out = out.reshape(list(out.shape[:2]) + [self.img_size[0], self.img_size[1]])
# [b c_out, (h*p1), (w*p2)]
return out
class PatchEmbed(nn.Module):
def __init__(
self, img_size=(224, 224), patch_size=(16, 16), in_chans=3, embed_dim=768
):
super().__init__()
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class AFNOArch(Arch):
"""Adaptive Fourier neural operator (AFNO) model.
Note
----
AFNO is a model that is designed for 2D images only.
Parameters
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
output_keys : List[Key]
Output key list. The key dimension size should equal the variables channel dim.
img_shape : Tuple[int, int]
Input image dimensions (height, width)
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
patch_size : int, optional
Size of image patchs, by default 16
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
num_blocks : int, optional
Number of blocks in the frequency weight matrices, by default 4
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size, H, W]`
- Output variable tensor shape: :math:`[N, size, H, W]`
Example
-------
>>> afno = .afno.AFNOArch([Key("x", size=2)], [Key("y", size=2)], (64, 64))
>>> model = afno.make_node()
>>> input = {"x": torch.randn(20, 2, 64, 64)}
>>> output = model.evaluate(input)
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
img_shape: Tuple[int, int],
detach_keys: List[Key] = [],
patch_size: int = 16,
embed_dim: int = 256,
depth: int = 4,
num_blocks: int = 4,
) -> None:
super().__init__(input_keys=input_keys, output_keys=output_keys)
self.input_keys = input_keys
self.output_keys = output_keys
self.detach_keys = detach_keys
self.input_key_dict = {var.name: var.size for var in self.input_keys}
self.output_key_dict = {var.name: var.size for var in self.output_keys}
in_channels = sum(self.input_key_dict.values())
out_channels = sum(self.output_key_dict.values())
self._impl = AFNONet(
in_channels=in_channels,
out_channels=out_channels,
patch_size=(patch_size, patch_size),
img_size=img_shape,
embed_dim=embed_dim,
depth=depth,
num_blocks=num_blocks,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
mask=self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
y = self._impl(x)
return self.prepare_output(
y, output_var=self.output_key_dict, dim=1, output_scales=self.output_scales
)
| modulus-sym-main | modulus/sym/models/afno/afno.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .afno import AFNOArch
| modulus-sym-main | modulus/sym/models/afno/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from collections import OrderedDict
from copy import Error, deepcopy
from numpy.lib.arraypad import pad
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.fft
from torch import Tensor
from torch.nn.modules.container import Sequential
from torch.utils.checkpoint import checkpoint_sequential
from typing import Optional, Dict, List, Tuple
import math
# distributed stuff
import torch.distributed as dist
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.key import Key
from modulus.sym.models.arch import Arch
from modulus.sym.models.afno.distributed.mappings import copy_to_matmul_parallel_region
from modulus.sym.models.afno.distributed.mappings import (
scatter_to_matmul_parallel_region,
gather_from_matmul_parallel_region,
)
from modulus.sym.models.afno.distributed.layers import trunc_normal_, DropPath
from modulus.sym.models.afno.distributed.layers import (
DistributedPatchEmbed,
DistributedMLP,
DistributedAFNO2D,
)
import logging
logger = logging.getLogger(__name__)
class DistributedBlock(nn.Module):
def __init__(
self,
h,
w,
dim,
mlp_ratio=4.0,
drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
double_skip=True,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedBlock, self).__init__()
# model parallelism
# matmul parallelism
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# norm layer
self.norm1 = norm_layer((h, w))
# filter
self.filter = DistributedAFNO2D(
dim,
num_blocks,
sparsity_threshold,
hard_thresholding_fraction,
input_is_matmul_parallel=True,
output_is_matmul_parallel=True,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
# norm layer
self.norm2 = norm_layer((h, w))
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = DistributedMLP(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
input_is_matmul_parallel=True,
output_is_matmul_parallel=True,
)
self.double_skip = double_skip
def forward(self, x):
if not self.input_is_matmul_parallel:
x = scatter_to_matmul_parallel_region(x, dim=1)
residual = x
x = self.norm1(x)
x = self.filter(x)
if self.double_skip:
x = x + residual
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x + residual
if not self.output_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
return x
class DistributedAFNONet(nn.Module):
def __init__(
self,
img_size=(720, 1440),
patch_size=(16, 16),
in_chans=2,
out_chans=2,
embed_dim=768,
depth=12,
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
num_blocks=16,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super().__init__()
# comm sizes
matmul_comm_size = DistributedManager().group_size("model_parallel")
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.out_chans = out_chans
self.num_features = self.embed_dim = embed_dim
self.num_blocks = num_blocks
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = DistributedPatchEmbed(
img_size=img_size,
patch_size=self.patch_size,
in_chans=self.in_chans,
embed_dim=embed_dim,
input_is_matmul_parallel=self.input_is_matmul_parallel,
output_is_matmul_parallel=True,
)
num_patches = self.patch_embed.num_patches
# original: x = B, H*W, C
# self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
# new: x = B, C, H*W
self.embed_dim_local = self.embed_dim // matmul_comm_size
self.pos_embed = nn.Parameter(torch.zeros(1, self.embed_dim_local, num_patches))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.h = img_size[0] // self.patch_size[0]
self.w = img_size[1] // self.patch_size[1]
# add blocks
blks = []
for i in range(0, depth):
input_is_matmul_parallel = True # if i > 0 else False
output_is_matmul_parallel = True if i < (depth - 1) else False
blks.append(
DistributedBlock(
h=self.h,
w=self.w,
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
num_blocks=self.num_blocks,
sparsity_threshold=sparsity_threshold,
hard_thresholding_fraction=hard_thresholding_fraction,
input_is_matmul_parallel=input_is_matmul_parallel,
output_is_matmul_parallel=output_is_matmul_parallel,
)
)
self.blocks = nn.ModuleList(blks)
# head
if self.output_is_matmul_parallel:
self.out_chans_local = (
self.out_chans + matmul_comm_size - 1
) // matmul_comm_size
else:
self.out_chans_local = self.out_chans
self.head = nn.Conv2d(
self.embed_dim,
self.out_chans_local * self.patch_size[0] * self.patch_size[1],
1,
bias=False,
)
self.synchronized_head = False
# init weights
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
# reshape
x = x.reshape(B, self.embed_dim_local, self.h, self.w)
for blk in self.blocks:
x = blk(x)
return x
def forward(self, x):
# fw pass on features
x = self.forward_features(x)
# be careful if head is distributed
if self.output_is_matmul_parallel:
x = copy_to_matmul_parallel_region(x)
else:
if not self.synchronized_head:
# If output is not model parallel, synchronize all GPUs params for head
for param in self.head.parameters():
dist.broadcast(
param, 0, group=DistributedManager().group("model_parallel")
)
self.synchronized_head = True
x = self.head(x)
# new: B, C, H, W
b = x.shape[0]
xv = x.view(b, self.patch_size[0], self.patch_size[1], -1, self.h, self.w)
xvt = torch.permute(xv, (0, 3, 4, 1, 5, 2)).contiguous()
x = xvt.view(
b, -1, (self.h * self.patch_size[0]), (self.w * self.patch_size[1])
)
return x
class DistributedAFNOArch(Arch):
"""Distributed Adaptive Fourier neural operator (AFNO) model.
Note
----
AFNO is a model that is designed for 2D images only.
Parameters
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
output_keys : List[Key]
Output key list. The key dimension size should equal the variables channel dim.
img_shape : Tuple[int, int]
Input image dimensions (height, width)
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
patch_size : int, optional
Size of image patchs, by default 16
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
num_blocks : int, optional
Number of blocks in the frequency weight matrices, by default 4
channel_parallel_inputs : bool, optional
Are the inputs sharded along the channel dimension, by default False
channel_parallel_outputs : bool, optional
Should the outputs be sharded along the channel dimension, by default False
Variable Shape
--------------
- Input variable tensor shape: :math:`[N, size, H, W]`
- Output variable tensor shape: :math:`[N, size, H, W]`
Example
-------
>>> afno = .afno.DistributedAFNOArch([Key("x", size=2)], [Key("y", size=2)], (64, 64))
>>> model = afno.make_node()
>>> input = {"x": torch.randn(20, 2, 64, 64)}
>>> output = model(input)
"""
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
img_shape: Tuple[int, int],
detach_keys: List[Key] = [],
patch_size: int = 16,
embed_dim: int = 256,
depth: int = 4,
num_blocks: int = 4,
channel_parallel_inputs: bool = False,
channel_parallel_outputs: bool = False,
) -> None:
super().__init__(input_keys=input_keys, output_keys=output_keys)
self.input_keys = input_keys
self.output_keys = output_keys
self.detach_keys = detach_keys
self.input_key_dict = {var.name: var.size for var in self.input_keys}
self.output_key_dict = {var.name: var.size for var in self.output_keys}
in_channels = sum(self.input_key_dict.values())
out_channels = sum(self.output_key_dict.values())
if DistributedManager().group("model_parallel") is None:
raise RuntimeError(
"Distributed AFNO needs to have model parallel group created first. Check the MODEL_PARALLEL_SIZE environment variable"
)
comm_size = DistributedManager().group_size("model_parallel")
if channel_parallel_inputs:
assert (
in_channels % comm_size == 0
), "Error, in_channels needs to be divisible by model_parallel size"
self._impl = DistributedAFNONet(
img_size=img_shape,
patch_size=(patch_size, patch_size),
in_chans=in_channels,
out_chans=out_channels,
embed_dim=embed_dim,
depth=depth,
num_blocks=num_blocks,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
x = self.prepare_input(
in_vars,
mask=self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
y = self._impl(x)
return self.prepare_output(
y, output_var=self.output_key_dict, dim=1, output_scales=self.output_scales
)
def make_node(self, name: str, jit: bool = False, optimize: bool = True):
"""
Override make_node method to automatically turn JIT off
"""
if jit:
logger.warning(
"JIT compilation not supported for DistributedAFNOArch. Creating node with JIT turned off"
)
return super().make_node(name, False, optimize)
| modulus-sym-main | modulus/sym/models/afno/distributed/afno.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .afno import DistributedAFNONet, DistributedAFNOArch
| modulus-sym-main | modulus/sym/models/afno/distributed/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.models.afno.distributed.mappings import copy_to_matmul_parallel_region
from modulus.sym.models.afno.distributed.mappings import (
reduce_from_matmul_parallel_region,
)
from modulus.sym.models.afno.distributed.mappings import (
scatter_to_matmul_parallel_region,
)
from modulus.sym.models.afno.distributed.mappings import (
gather_from_matmul_parallel_region,
)
from modulus.sym.distributed.helpers import _transpose
from modulus.sym.distributed.helpers import pad_helper
from modulus.sym.distributed.helpers import truncate_helper
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn(
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2,
)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
@torch.jit.script
def drop_path(
x: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor:
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1.0 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class DistributedMLP(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedMLP, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# get effective embedding size:
comm_size = DistributedManager().group_size("model_parallel")
assert (
hidden_features % comm_size == 0
), "Error, hidden_features needs to be divisible by matmul_parallel_size"
hidden_features_local = hidden_features // comm_size
# first set of hp
self.w1 = nn.Parameter(torch.ones(hidden_features_local, in_features, 1, 1))
self.b1 = nn.Parameter(torch.zeros(hidden_features_local))
# second set of hp
self.w2 = nn.Parameter(torch.ones(out_features, hidden_features_local, 1, 1))
self.b2 = nn.Parameter(torch.zeros(out_features))
self.act = act_layer()
self.drop = nn.Dropout(drop) if drop > 0.0 else nn.Identity()
# init weights
self._init_weights()
def _init_weights(self):
trunc_normal_(self.w1, std=0.02)
nn.init.constant_(self.b1, 0.0)
trunc_normal_(self.w2, std=0.02)
nn.init.constant_(self.b2, 0.0)
def forward(self, x):
# gather if input is MP
if self.input_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
x = copy_to_matmul_parallel_region(x)
x = F.conv2d(x, self.w1, bias=self.b1)
x = self.act(x)
x = self.drop(x)
x = F.conv2d(x, self.w2, bias=None)
x = reduce_from_matmul_parallel_region(x)
x = x + torch.reshape(self.b2, (1, -1, 1, 1))
x = self.drop(x)
# scatter if output is MP
if self.output_is_matmul_parallel:
x = scatter_to_matmul_parallel_region(x, dim=1)
return x
class DistributedPatchEmbed(nn.Module):
def __init__(
self,
img_size=(224, 224),
patch_size=(16, 16),
in_chans=3,
embed_dim=768,
input_is_matmul_parallel=False,
output_is_matmul_parallel=True,
):
super(DistributedPatchEmbed, self).__init__()
# store params
self.input_parallel = input_is_matmul_parallel
self.output_parallel = output_is_matmul_parallel
# get comm sizes:
matmul_comm_size = DistributedManager().group_size("model_parallel")
# compute parameters
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = (img_size[0], img_size[1])
self.patch_size = patch_size
self.num_patches = num_patches
if self.input_parallel:
assert (
in_chans % matmul_comm_size == 0
), "Error, the in_chans needs to be divisible by matmul_parallel_size"
# get effective embedding size:
if self.output_parallel:
assert (
embed_dim % matmul_comm_size == 0
), "Error, the embed_dim needs to be divisible by matmul_parallel_size"
out_chans_local = embed_dim // matmul_comm_size
else:
out_chans_local = embed_dim
# the weights of this layer is shared across spatial parallel ranks
self.proj = nn.Conv2d(
in_chans, out_chans_local, kernel_size=patch_size, stride=patch_size
)
# make sure we reduce them across rank
self.proj.weight.is_shared_spatial = True
self.proj.bias.is_shared_spatial = True
def forward(self, x):
if self.input_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
if self.output_parallel:
x = copy_to_matmul_parallel_region(x)
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# new: B, C, H*W
x = self.proj(x).flatten(2)
return x
@torch.jit.script
def compl_mul_add_fwd(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = torch.einsum("bkixys,kiot->stbkoxy", a, b)
res = (
torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
+ c
)
return res
@torch.jit.script
def compl_mul_add_fwd_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
ac = torch.view_as_complex(a)
bc = torch.view_as_complex(b)
cc = torch.view_as_complex(c)
tmp = torch.einsum("bkixy,kio->bkoxy", ac, bc)
res = tmp + cc
return torch.view_as_real(res)
class DistributedAFNO2D(nn.Module):
def __init__(
self,
hidden_size,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1,
hidden_size_factor=1,
input_is_matmul_parallel=False,
output_is_matmul_parallel=False,
):
super(DistributedAFNO2D, self).__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisible by num_blocks {num_blocks}"
# get comm sizes:
matmul_comm_size = DistributedManager().group_size("model_parallel")
self.fft_handle = torch.fft.rfft2
self.ifft_handle = torch.fft.irfft2
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
assert (
self.num_blocks % matmul_comm_size == 0
), "Error, num_blocks needs to be divisible by matmul_parallel_size"
self.num_blocks_local = self.num_blocks // matmul_comm_size
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
use_complex_mult = False
self.mult_handle = (
compl_mul_add_fwd_c if use_complex_mult else compl_mul_add_fwd
)
# model parallelism
self.input_is_matmul_parallel = input_is_matmul_parallel
self.output_is_matmul_parallel = output_is_matmul_parallel
# new
# these weights need to be synced across all spatial ranks!
self.w1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size,
self.block_size * self.hidden_size_factor,
2,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
1,
1,
2,
)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks_local,
self.block_size * self.hidden_size_factor,
self.block_size,
2,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(self.num_blocks_local, self.block_size, 1, 1, 2)
)
# make sure we reduce them across rank
self.w1.is_shared_spatial = True
self.b1.is_shared_spatial = True
self.w2.is_shared_spatial = True
self.b2.is_shared_spatial = True
def forward(self, x):
if not self.input_is_matmul_parallel:
# distribute data
x = scatter_to_matmul_parallel_region(x, dim=1)
# bias
bias = x
dtype = x.dtype
x = x.float()
B, C, H, W = x.shape
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
x = self.fft_handle(x, (H, W), (-2, -1), "ortho")
x = x.view(B, self.num_blocks_local, self.block_size, H, W // 2 + 1)
# new
x = torch.view_as_real(x)
o2 = torch.zeros(x.shape, device=x.device)
o1 = F.relu(
self.mult_handle(
x[
:,
:,
:,
total_modes - kept_modes : total_modes + kept_modes,
:kept_modes,
:,
],
self.w1,
self.b1,
)
)
o2[
:, :, :, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, :
] = self.mult_handle(o1, self.w2, self.b2)
# finalize
x = F.softshrink(o2, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
x = x.reshape(B, C, H, W // 2 + 1)
x = self.ifft_handle(x, (H, W), (-2, -1), "ortho")
x = x.type(dtype) + bias
# gather
if not self.output_is_matmul_parallel:
x = gather_from_matmul_parallel_region(x, dim=1)
return x
| modulus-sym-main | modulus/sym/models/afno/distributed/layers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import torch
import torch.distributed as dist
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.distributed.helpers import split_tensor_along_dim
from modulus.sym.distributed.helpers import _reduce
from modulus.sym.distributed.helpers import _split
from modulus.sym.distributed.helpers import _gather
# matmul parallel
class _CopyToMatmulParallelRegion(torch.autograd.Function):
"""Pass the input to the matmul parallel region."""
@staticmethod
def symbolic(graph, input_):
return input_
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
return _reduce(grad_output, group=DistributedManager().group("model_parallel"))
class _ReduceFromMatmulParallelRegion(torch.autograd.Function):
"""All-reduce the input from the matmul parallel region."""
@staticmethod
def symbolic(graph, input_):
return _reduce(input_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_):
return _reduce(input_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return grad_output
class _ScatterToMatmulParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
@staticmethod
def symbolic(graph, input_, dim_):
return _split(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _split(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return (
_gather(
grad_output, ctx.dim, group=DistributedManager().group("model_parallel")
),
None,
)
class _GatherFromMatmulParallelRegion(torch.autograd.Function):
"""Gather the input from matmul parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_, dim_):
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
return (
_split(
grad_output, ctx.dim, group=DistributedManager().group("model_parallel")
),
None,
)
class _GatherWithinMatmulParallelRegion(torch.autograd.Function):
"""Gather the input from matmul parallel region and concatinate."""
@staticmethod
def symbolic(graph, input_, dim_):
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def forward(ctx, input_, dim_):
ctx.dim = dim_
return _gather(input_, dim_, group=DistributedManager().group("model_parallel"))
@staticmethod
def backward(ctx, grad_output):
red = _reduce(grad_output, group=DistributedManager().group("model_parallel"))
return (
_split(red, ctx.dim, group=DistributedManager().group("model_parallel")),
None,
)
# -----------------
# Helper functions.
# -----------------
# matmul parallel
def copy_to_matmul_parallel_region(input_):
return _CopyToMatmulParallelRegion.apply(input_)
def reduce_from_matmul_parallel_region(input_):
return _ReduceFromMatmulParallelRegion.apply(input_)
def scatter_to_matmul_parallel_region(input_, dim):
return _ScatterToMatmulParallelRegion.apply(input_, dim)
def gather_from_matmul_parallel_region(input_, dim):
return _GatherFromMatmulParallelRegion.apply(input_, dim)
def gather_within_matmul_parallel_region(input_, dim):
return _GatherWithinMatmulParallelRegion.apply(input_, dim)
| modulus-sym-main | modulus/sym/models/afno/distributed/mappings.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Modulus Neural Differential Equation Solver
"""
import os
import numpy as np
from typing import List, Union, Tuple, Callable
from omegaconf import DictConfig
import warnings
from modulus.sym.trainer import Trainer
from modulus.sym.domain import Domain
from modulus.sym.loss.aggregator import NTK
# base class for solver
class Solver(Trainer):
"""
Base solver class for solving single domain.
Parameters
----------
cfg : DictConfig
Hydra dictionary of configs.
domain : Domain
Domain to solve for.
"""
def __init__(self, cfg: DictConfig, domain: Domain):
# set domain
self.domain = domain
super(Solver, self).__init__(cfg)
# NTK setup:
if cfg.training.ntk.use_ntk:
ntk = NTK(
run_per_step=cfg.training.ntk.run_freq,
save_name=cfg.training.ntk.save_name,
)
self.domain.add_ntk(ntk)
@property
def network_dir(self):
return self._network_dir
@property
def initialization_network_dir(self):
return self._initialization_network_dir
def compute_losses(self, step: int):
return self.domain.compute_losses(step)
def get_saveable_models(self):
return self.domain.get_saveable_models()
def create_global_optimizer_model(self):
return self.domain.create_global_optimizer_model()
def load_data(self, static: bool = False):
self.domain.load_data(static)
def load_network(self):
return Trainer._load_network(
self.initialization_network_dir,
self.network_dir,
self.saveable_models,
self.optimizer,
self.aggregator,
self.scheduler,
self.scaler,
self.log,
self.manager,
self.device,
)
def load_optimizer(self):
return Trainer._load_optimizer(
self.network_dir,
self.optimizer,
self.aggregator,
self.scheduler,
self.scaler,
self.log,
self.device,
)
def load_model(self):
return Trainer._load_model(
self.initialization_network_dir,
self.network_dir,
self.saveable_models,
self.step,
self.log,
self.device,
)
def load_step(self):
return Trainer._load_step(
self.network_dir,
self.device,
)
def save_checkpoint(self, step: int):
Trainer._save_checkpoint(
self.network_dir,
self.saveable_models,
self.optimizer,
self.aggregator,
self.scheduler,
self.scaler,
step,
)
def record_constraints(self):
self.domain.rec_constraints(self.network_dir)
def record_validators(self, step: int):
return self.domain.rec_validators(
self.network_dir, self.writer, self.save_filetypes, step
)
@property
def has_validators(self):
return bool(self.domain.validators)
def record_inferencers(self, step: int):
self.domain.rec_inferencers(
self.network_dir, self.writer, self.save_filetypes, step
)
def record_stream(self, inferencer, name):
return self.domain.rec_stream(
inferencer,
name,
self.network_dir,
self.step,
self.save_results,
self.save_filetypes,
self.to_cpu,
)
@property
def has_inferencers(self):
return bool(self.domain.inferencers)
def record_monitors(self, step: int):
return self.domain.rec_monitors(self.network_dir, self.writer, step)
@property
def has_monitors(self):
return bool(self.domain.monitors)
def get_num_losses(self):
return self.domain.get_num_losses()
def solve(self, sigterm_handler=None):
if self.cfg.run_mode == "train":
self._train_loop(sigterm_handler)
elif self.cfg.run_mode == "eval":
self._eval()
else:
raise RuntimeError("Invalid run mode")
def train(self, sigterm_handler=None):
self._train_loop(sigterm_handler)
def eval(self):
self._eval()
def stream(self, save_results=False, to_cpu=True):
self.save_results = save_results
self.to_cpu = to_cpu
return self._stream()
| modulus-sym-main | modulus/sym/solver/solver.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .solver import Solver
from .sequential import SequentialSolver
from .multidomain import MultiDomainSolver
| modulus-sym-main | modulus/sym/solver/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from typing import List, Union, Tuple, Callable
from omegaconf import DictConfig
import warnings
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.trainer import Trainer
from modulus.sym.domain import Domain
from modulus.sym.loss.aggregator import NTK
from .solver import Solver
class SequentialSolver(Solver):
"""
Solver class for solving a sequence of domains.
This solver can be used to set up iterative methods
like the hFTB conjugate heat transfer method or
the moving time window method for transient problems.
Parameters
----------
cfg : DictConfig
Hydra dictionary of configs.
domains : List[Tuple[int, Domain]]
List of Domains to sequentially solve.
Each domain is given as a tuple where the first
element is an int for how many times to solve
the domain and the second element is the domain.
For example, `domains=[(1, domain_a), (4, domain_b)]`
would solve `domain_a` once and then solve `domain_b`
4 times in a row.
custom_update_operation : Union[Callable, None] = None
A callable function to update any weights in models.
This function will be called at the end of every
iteration.
"""
def __init__(
self,
cfg: DictConfig,
domains: List[Tuple[int, Domain]],
custom_update_operation: Union[Callable, None] = None,
):
# check that domains have different names
assert len(set([d.name for _, d in domains])) == len(
domains
), "domains need to have unique names, " + str([d.name for _, d in domains])
# check not using ntk with seq solver
assert (
not cfg.training.ntk.use_ntk
), "ntk is not supported with SequentialSolver"
# set domains
self.domains = domains
# set update operation after solving each domain
self.custom_update_operation = custom_update_operation
# load rest of initializations
Trainer.__init__(self, cfg)
# load current index
self.load_iteration_step()
def load_iteration_step(self):
try:
iteration_step_file = open(self._network_dir + "/current_step.txt", "r")
contents = iteration_step_file.readlines()[0]
domain_index = int(contents.split(" ")[0])
iteration_index = int(contents.split(" ")[1])
except:
domain_index = 0
iteration_index = 0
self.domain_index = domain_index
self.iteration_index = iteration_index
def save_iteration_step(self):
iteration_step_file = open(self._network_dir + "/current_step.txt", "w")
iteration_step_file.write(
str(self.domain_index) + " " + str(self.iteration_index)
)
@property
def domain(self):
return self.domains[self.domain_index][1]
@property
def network_dir(self):
dir_name = self._network_dir + "/" + self.domain.name
if self.domains[self.domain_index][0] > 1:
dir_name += "_" + str(self.iteration_index).zfill(4)
return dir_name
def solve(self, sigterm_handler=None):
if self.cfg.run_mode == "train":
# make directory if doesn't exist
if DistributedManager().rank == 0:
os.makedirs(self.network_dir, exist_ok=True)
# run train loop for each domain and each index
# solve for each domain in seq_train_domin
for domain_index in range(self.domain_index, len(self.domains)):
# solve for number of iterations in train_domain
for iteration_index in range(
self.iteration_index, self.domains[domain_index][0]
):
# set internal domain index and iteration index
self.domain_index = domain_index
self.iteration_index = iteration_index
# save current iteration step
self.save_iteration_step()
# solve for domain
self.log.info(
"Solving for Domain "
+ str(self.domain.name)
+ ", iteration "
+ str(self.iteration_index)
)
self._train_loop(sigterm_handler)
# run user defined custom update operation
if self.custom_update_operation is not None:
self.custom_update_operation()
elif self.cfg.run_mode == "eval":
raise NotImplementedError(
"eval mode not implemented for sequential training"
)
else:
raise RuntimeError("Invalid run mode")
| modulus-sym-main | modulus/sym/solver/sequential.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from typing import List, Union, Tuple, Callable
from omegaconf import DictConfig
import warnings
from modulus.sym.trainer import Trainer
from modulus.sym.domain import Domain
from modulus.sym.loss.aggregator import NTK
from .solver import Solver
class MultiDomainSolver(Solver):
"""
Solver class for solving multiple domains.
NOTE this Solver is currently experimental and not fully supported.
"""
def __init__(self, cfg: DictConfig, domains: List[Domain]):
# warning message for experimental
warnings.warn(
"This solver is currently experimental and unforeseen errors may occur."
)
# check that domains have different names
assert len(set([d.name for d in domains])) == len(
domains
), "domains need to have unique names, " + str([d.name for _, d in domains])
# check not using ntk with seq solver
assert (
not cfg.training.ntk.use_ntk
), "ntk is not supported with MultiDomainSolver"
# set number of domains per iteration
self.domain_batch_size = cfg["domain_batch_size"]
# set domains
self.domains = domains
# load rest of initializations
Trainer.__init__(self, cfg)
def compute_losses(self, step: int):
batch_index = np.random.choice(len(self.domains), self.domain_batch_size)
losses = {}
for i in batch_index:
# compute losses
constraint_losses = self.domains[i].compute_losses(step)
# add together losses of like kind
for loss_key, value in constraint_losses.items():
if loss_key not in list(losses.keys()):
losses[loss_key] = value
else:
losses[loss_key] += value
return losses
def get_saveable_models(self):
return self.domains[0].get_saveable_models()
def create_global_optimizer_model(self):
return self.domains[0].create_global_optimizer_model()
def get_num_losses(self):
return self.domains[0].get_num_losses()
| modulus-sym-main | modulus/sym/solver/multidomain.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Abs
import numpy as np
from .geometry import Geometry, csg_curve_naming
from .curve import SympyCurve
from .parameterization import Parameterization, Parameter, Bounds
from .helper import _sympy_sdf_to_sdf
class Point1D(Geometry):
"""
1D Point along x-axis
Parameters
----------
point : int or float
x coordinate of the point
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point, parameterization=Parameterization()):
# make sympy symbols to use
x = Symbol("x")
# curves for each side
curve_parameterization = Parameterization({Symbol(csg_curve_naming(0)): (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
pt_1 = SympyCurve(
functions={"x": point, "normal_x": 1.0},
area=1.0,
parameterization=curve_parameterization,
)
curves = [pt_1]
# calculate SDF
sdf = x - point
# calculate bounds
bounds = Bounds(
{Parameter("x"): (point, point)}, parameterization=parameterization
)
# initialize
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=1,
bounds=bounds,
parameterization=parameterization,
)
class Line1D(Geometry):
"""
1D Line along x-axis
Parameters
----------
point_1 : int or float
lower bound point of line
point_2 : int or float
upper bound point of line
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, parameterization=Parameterization()):
# make sympy symbols to use
x = Symbol("x")
# curves for each side
curve_parameterization = Parameterization({Symbol(csg_curve_naming(0)): (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
pt1 = SympyCurve(
functions={"x": point_1, "normal_x": -1},
area=1.0,
parameterization=curve_parameterization,
)
pt2 = SympyCurve(
functions={"x": point_2, "normal_x": 1},
area=1.0,
parameterization=curve_parameterization,
)
curves = [pt1, pt2]
# calculate SDF
dist = point_2 - point_1
center_x = point_1 + dist / 2
sdf = dist / 2 - Abs(x - center_x)
# calculate bounds
bounds = Bounds(
{Parameter("x"): (point_1, point_2)}, parameterization=parameterization
)
# initialize
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=1,
bounds=bounds,
parameterization=parameterization,
)
| modulus-sym-main | modulus/sym/geometry/primitives_1d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .geometry import Geometry
from .parameterization import Bounds, Parameterization, Parameter
| modulus-sym-main | modulus/sym/geometry/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primitives for 2D geometries
see https://www.iquilezles.org/www/articles/distfunctions/distfunctions.html
"""
import sys
from operator import mul
from sympy import Symbol, Abs, Max, Min, sqrt, sin, cos, acos, atan2, pi, Heaviside
from functools import reduce
pi = float(pi)
from sympy.vector import CoordSys3D
from .curve import SympyCurve
from .helper import _sympy_sdf_to_sdf
from .geometry import Geometry, csg_curve_naming
from .parameterization import Parameterization, Parameter, Bounds
class Line(Geometry):
"""
2D Line parallel to y-axis
Parameters
----------
point_1 : tuple with 2 ints or floats
lower bound point of line segment
point_2 : tuple with 2 ints or floats
upper bound point of line segment
normal : int or float
normal direction of line (+1 or -1)
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, normal=1, parameterization=Parameterization()):
assert point_1[0] == point_2[0], "Points must have same x-coordinate"
# make sympy symbols to use
l = Symbol(csg_curve_naming(0))
x = Symbol("x")
# curves for each side
curve_parameterization = Parameterization({l: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
dist_y = point_2[1] - point_1[1]
line_1 = SympyCurve(
functions={
"x": point_1[0],
"y": point_1[1] + l * dist_y,
"normal_x": 1e-10 + normal, # TODO rm 1e-10
"normal_y": 0,
},
parameterization=curve_parameterization,
area=dist_y,
)
curves = [line_1]
# calculate SDF
sdf = normal * (point_1[0] - x)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
},
parameterization=parameterization,
)
# initialize Line
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Channel2D(Geometry):
"""
2D Channel (no bounding curves in x-direction)
Parameters
----------
point_1 : tuple with 2 ints or floats
lower bound point of channel
point_2 : tuple with 2 ints or floats
upper bound point of channel
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, parameterization=Parameterization()):
# make sympy symbols to use
l = Symbol(csg_curve_naming(0))
y = Symbol("y")
# curves for each side
curve_parameterization = Parameterization({l: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
dist_x = point_2[0] - point_1[0]
dist_y = point_2[1] - point_1[1]
line_1 = SympyCurve(
functions={
"x": l * dist_x + point_1[0],
"y": point_1[1],
"normal_x": 0,
"normal_y": -1,
},
parameterization=curve_parameterization,
area=dist_x,
)
line_2 = SympyCurve(
functions={
"x": l * dist_x + point_1[0],
"y": point_2[1],
"normal_x": 0,
"normal_y": 1,
},
parameterization=curve_parameterization,
area=dist_x,
)
curves = [line_1, line_2]
# calculate SDF
center_y = point_1[1] + (dist_y) / 2
y_diff = Abs(y - center_y) - (point_2[1] - center_y)
outside_distance = sqrt(Max(y_diff, 0) ** 2)
inside_distance = Min(y_diff, 0)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
},
parameterization=parameterization,
)
# initialize Channel2D
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Rectangle(Geometry):
"""
2D Rectangle
Parameters
----------
point_1 : tuple with 2 ints or floats
lower bound point of rectangle
point_2 : tuple with 2 ints or floats
upper bound point of rectangle
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, parameterization=Parameterization()):
# make sympy symbols to use
l = Symbol(csg_curve_naming(0))
x, y = Symbol("x"), Symbol("y")
# curves for each side
curve_parameterization = Parameterization({l: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
dist_x = point_2[0] - point_1[0]
dist_y = point_2[1] - point_1[1]
line_1 = SympyCurve(
functions={
"x": l * dist_x + point_1[0],
"y": point_1[1],
"normal_x": 0,
"normal_y": -1,
},
parameterization=curve_parameterization,
area=dist_x,
)
line_2 = SympyCurve(
functions={
"x": point_2[0],
"y": l * dist_y + point_1[1],
"normal_x": 1,
"normal_y": 0,
},
parameterization=curve_parameterization,
area=dist_y,
)
line_3 = SympyCurve(
functions={
"x": l * dist_x + point_1[0],
"y": point_2[1],
"normal_x": 0,
"normal_y": 1,
},
parameterization=curve_parameterization,
area=dist_x,
)
line_4 = SympyCurve(
functions={
"x": point_1[0],
"y": -l * dist_y + point_2[1],
"normal_x": -1,
"normal_y": 0,
},
parameterization=curve_parameterization,
area=dist_y,
)
curves = [line_1, line_2, line_3, line_4]
# calculate SDF
center_x = point_1[0] + (dist_x) / 2
center_y = point_1[1] + (dist_y) / 2
x_diff = Abs(x - center_x) - (point_2[0] - center_x)
y_diff = Abs(y - center_y) - (point_2[1] - center_y)
outside_distance = sqrt(Max(x_diff, 0) ** 2 + Max(y_diff, 0) ** 2)
inside_distance = Min(Max(x_diff, y_diff), 0)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
},
parameterization=parameterization,
)
# initialize Rectangle
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Circle(Geometry):
"""
2D Circle
Parameters
----------
center : tuple with 2 ints or floats
center point of circle
radius : int or float
radius of circle
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, radius, parameterization=Parameterization()):
# make sympy symbols to use
theta = Symbol(csg_curve_naming(0))
x, y = Symbol("x"), Symbol("y")
# curve for perimeter of the circle
curve_parameterization = Parameterization({theta: (0, 2 * pi)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve = SympyCurve(
functions={
"x": center[0] + radius * cos(theta),
"y": center[1] + radius * sin(theta),
"normal_x": 1 * cos(theta),
"normal_y": 1 * sin(theta),
},
parameterization=curve_parameterization,
area=2 * pi * radius,
)
curves = [curve]
# calculate SDF
sdf = radius - sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - radius, center[0] + radius),
Parameter("y"): (center[1] - radius, center[1] + radius),
},
parameterization=parameterization,
)
# initialize Circle
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Triangle(Geometry):
"""
2D Isosceles Triangle
Symmetrical axis parallel to y-axis
Parameters
----------
center : tuple with 2 ints or floats
center of base of triangle
base : int or float
base of triangle
height : int or float
height of triangle
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, base, height, parameterization=Parameterization()):
# make sympy symbols to use
x, y = Symbol("x"), Symbol("y")
t, h = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
N = CoordSys3D("N")
P = x * N.i + y * N.j
O = center[0] * N.i + center[1] * N.j
H = center[0] * N.i + (center[1] + height) * N.j
B = (center[0] + base / 2) * N.i + center[1] * N.j
OP = P - O
OH = H - O
PH = OH - OP
angle = acos(PH.dot(OH) / sqrt(PH.dot(PH)) / sqrt(OH.dot(OH)))
apex_angle = atan2(base / 2, height)
hypo_sin = sqrt(height**2 + (base / 2) ** 2) * sin(apex_angle)
hypo_cos = sqrt(height**2 + (base / 2) ** 2) * cos(apex_angle)
dist = sqrt(PH.dot(PH)) * sin(Min(angle - apex_angle, pi / 2))
# curve for each side
curve_parameterization = Parameterization({t: (-1, 1), h: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + t * base / 2,
"y": center[1] + t * 0,
"normal_x": 0,
"normal_y": -1,
},
parameterization=curve_parameterization,
area=base,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + h * hypo_sin,
"y": center[1] + height - h * hypo_cos,
"normal_x": 1 * cos(apex_angle),
"normal_y": 1 * sin(apex_angle),
},
parameterization=curve_parameterization,
area=sqrt(height**2 + (base / 2) ** 2),
)
curve_3 = SympyCurve(
functions={
"x": center[0] - h * hypo_sin,
"y": center[1] + height - h * hypo_cos,
"normal_x": -1 * cos(apex_angle),
"normal_y": 1 * sin(apex_angle),
},
parameterization=curve_parameterization,
area=sqrt(height**2 + (base / 2) ** 2),
)
curves = [curve_1, curve_2, curve_3]
# calculate SDF
outside_distance = 1 * sqrt(Max(0, dist) ** 2 + Max(0, center[1] - y) ** 2)
inside_distance = -1 * Min(Abs(Min(0, dist)), Abs(Min(0, center[1] - y)))
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - base / 2, center[0] + base / 2),
Parameter("y"): (center[1], center[1] + height),
},
parameterization=parameterization,
)
# initialize Triangle
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Ellipse(Geometry):
"""
2D Ellipse
Parameters
----------
center : tuple with 2 ints or floats
center point of circle
radius : int or float
radius of circle
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, major, minor, parameterization=Parameterization()):
# make sympy symbols to use
theta = Symbol(csg_curve_naming(0))
x, y = Symbol("x"), Symbol("y")
mag = sqrt((minor * cos(theta)) ** 2 + (major * sin(theta)) ** 2)
area = pi * (
3 * (major + minor) - sqrt((3 * minor + major) * (3 * major + minor))
)
try:
area = float(area)
except:
pass
# curve for perimeter of the circle
curve_parameterization = Parameterization({theta: (0, 2 * pi)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve = SympyCurve(
functions={
"x": center[0] + major * cos(theta),
"y": center[1] + minor * sin(theta),
"normal_x": minor * cos(theta) / mag,
"normal_y": major * sin(theta) / mag,
},
parameterization=curve_parameterization,
area=area,
)
curves = [curve]
# calculate SDF
sdf = 1 - (((x - center[0]) / major) ** 2 + ((y - center[1]) / minor) ** 2)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - major, center[0] + major),
Parameter("y"): (center[1] - minor, center[1] + minor),
},
parameterization=parameterization,
)
# initialize Ellipse
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
class Polygon(Geometry):
"""
2D Polygon
Parameters
----------
points : list of tuple with 2 ints or floats
lower bound point of line segment
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, points, parameterization=Parameterization()):
# make sympy symbols to use
s = Symbol(csg_curve_naming(0))
x = Symbol("x")
y = Symbol("y")
# wrap points
wrapted_points = points + [points[0]]
# curves for each side
curve_parameterization = Parameterization({s: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curves = []
for v1, v2 in zip(wrapted_points[:-1], wrapted_points[1:]):
# area
dx = v2[0] - v1[0]
dy = v2[1] - v1[1]
area = (dx**2 + dy**2) ** 0.5
# generate normals
normal_x = dy / area
normal_y = -dx / area
line = SympyCurve(
functions={
"x": dx * s + v1[0],
"y": dy * s + v1[1],
"normal_x": dy / area,
"normal_y": -dx / area,
},
parameterization=curve_parameterization,
area=area,
)
curves.append(line)
# calculate SDF
sdfs = [(x - wrapted_points[0][0]) ** 2 + (y - wrapted_points[0][1]) ** 2]
conds = []
for v1, v2 in zip(wrapted_points[:-1], wrapted_points[1:]):
# sdf calculation
dx = v1[0] - v2[0]
dy = v1[1] - v2[1]
px = x - v2[0]
py = y - v2[1]
d_dot_d = dx**2 + dy**2
p_dot_d = px * dx + py * dy
max_min = Max(Min(p_dot_d / d_dot_d, 1.0), 0.0)
vx = px - dx * max_min
vy = py - dy * max_min
sdf = vx**2 + vy**2
sdfs.append(sdf)
# winding calculation
cond_1 = Heaviside(y - v2[1])
cond_2 = Heaviside(v1[1] - y)
cond_3 = Heaviside((dx * py) - (dy * px))
all_cond = cond_1 * cond_2 * cond_3
none_cond = (1.0 - cond_1) * (1.0 - cond_2) * (1.0 - cond_3)
cond = 1.0 - 2.0 * Min(all_cond + none_cond, 1.0)
conds.append(cond)
# set inside outside
sdf = Min(*sdfs)
cond = reduce(mul, conds)
sdf = sqrt(sdf) * -cond
# calculate bounds
min_x = Min(*[p[0] for p in points])
if min_x.is_number:
min_x = float(min_x)
max_x = Max(*[p[0] for p in points])
if max_x.is_number:
max_x = float(max_x)
min_y = Min(*[p[1] for p in points])
if min_y.is_number:
min_y = float(min_y)
max_y = Max(*[p[1] for p in points])
if max_y.is_number:
max_y = float(max_y)
bounds = Bounds(
{
Parameter("x"): (min_x, max_x),
Parameter("y"): (min_y, max_y),
},
parameterization=parameterization,
)
# initialize Polygon
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=2,
bounds=bounds,
parameterization=parameterization,
)
| modulus-sym-main | modulus/sym/geometry/primitives_2d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Primitives for 3D geometries
see https://www.iquilezles.org/www/articles/distfunctions/distfunctions.html
"""
from sympy import (
Symbol,
Function,
Abs,
Max,
Min,
sqrt,
pi,
sin,
cos,
atan,
atan2,
acos,
asin,
sign,
)
from sympy.vector import CoordSys3D
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from .geometry import Geometry, csg_curve_naming
from .helper import _sympy_sdf_to_sdf
from .curve import SympyCurve, Curve
from .parameterization import Parameterization, Parameter, Bounds
from ..constants import diff_str
class Plane(Geometry):
"""
3D Plane perpendicular to x-axis
Parameters
----------
point_1 : tuple with 3 ints or floats
lower bound point of plane
point_2 : tuple with 3 ints or floats
upper bound point of plane
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, normal=1, parameterization=Parameterization()):
assert (
point_1[0] == point_2[0]
), "Points must have same coordinate on normal dim"
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
s_1, s_2 = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
center = (
point_1[0] + (point_2[0] - point_1[0]) / 2,
point_1[1] + (point_2[1] - point_1[1]) / 2,
point_1[2] + (point_2[2] - point_1[2]) / 2,
)
side_y = point_2[1] - point_1[1]
side_z = point_2[2] - point_1[2]
# surface of the plane
curve_parameterization = Parameterization({s_1: (-1, 1), s_2: (-1, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0],
"y": center[1] + 0.5 * s_1 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 1e-10 + normal, # TODO rm 1e-10
"normal_y": 0,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_y * side_z,
)
curves = [curve_1]
# calculate SDF
sdf = normal * (center[0] - x)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
Parameter("z"): (point_1[2], point_2[2]),
},
parameterization=parameterization,
)
# initialize Plane
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Channel(Geometry):
"""
3D Channel (no bounding surfaces in x-direction)
Parameters
----------
point_1 : tuple with 3 ints or floats
lower bound point of channel
point_2 : tuple with 3 ints or floats
upper bound point of channel
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
s_1, s_2 = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
center = (
point_1[0] + (point_2[0] - point_1[0]) / 2,
point_1[1] + (point_2[1] - point_1[1]) / 2,
point_1[2] + (point_2[2] - point_1[2]) / 2,
)
side_x = point_2[0] - point_1[0]
side_y = point_2[1] - point_1[1]
side_z = point_2[2] - point_1[2]
# surface of the channel
curve_parameterization = Parameterization({s_1: (-1, 1), s_2: (-1, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * s_2 * side_y,
"z": center[2] + 0.5 * side_z,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=side_x * side_y,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * s_2 * side_y,
"z": center[2] - 0.5 * side_z,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=side_x * side_y,
)
curve_3 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 0,
"normal_y": 1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_x * side_z,
)
curve_4 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] - 0.5 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 0,
"normal_y": -1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_x * side_z,
)
curves = [curve_1, curve_2, curve_3, curve_4]
# calculate SDF
y_dist = Abs(y - center[1]) - 0.5 * side_y
z_dist = Abs(z - center[2]) - 0.5 * side_z
outside_distance = sqrt(Max(y_dist, 0) ** 2 + Max(z_dist, 0) ** 2)
inside_distance = Min(Max(y_dist, z_dist), 0)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
Parameter("z"): (point_1[2], point_2[2]),
},
parameterization=parameterization,
)
# initialize Channel
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Box(Geometry):
"""
3D Box/Cuboid
Parameters
----------
point_1 : tuple with 3 ints or floats
lower bound point of box
point_2 : tuple with 3 ints or floats
upper bound point of box
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, point_1, point_2, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
s_1, s_2 = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
center = (
point_1[0] + (point_2[0] - point_1[0]) / 2,
point_1[1] + (point_2[1] - point_1[1]) / 2,
point_1[2] + (point_2[2] - point_1[2]) / 2,
)
side_x = point_2[0] - point_1[0]
side_y = point_2[1] - point_1[1]
side_z = point_2[2] - point_1[2]
# surface of the box
curve_parameterization = Parameterization({s_1: (-1, 1), s_2: (-1, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * s_2 * side_y,
"z": center[2] + 0.5 * side_z,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=side_x * side_y,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * s_2 * side_y,
"z": center[2] - 0.5 * side_z,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=side_x * side_y,
)
curve_3 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] + 0.5 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 0,
"normal_y": 1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_x * side_z,
)
curve_4 = SympyCurve(
functions={
"x": center[0] + 0.5 * s_1 * side_x,
"y": center[1] - 0.5 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 0,
"normal_y": -1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_x * side_z,
)
curve_5 = SympyCurve(
functions={
"x": center[0] + 0.5 * side_x,
"y": center[1] + 0.5 * s_1 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": 1,
"normal_y": 0,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_y * side_z,
)
curve_6 = SympyCurve(
functions={
"x": center[0] - 0.5 * side_x,
"y": center[1] + 0.5 * s_1 * side_y,
"z": center[2] + 0.5 * s_2 * side_z,
"normal_x": -1,
"normal_y": 0,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side_y * side_z,
)
curves = [curve_1, curve_2, curve_3, curve_4, curve_5, curve_6]
# calculate SDF
x_dist = Abs(x - center[0]) - 0.5 * side_x
y_dist = Abs(y - center[1]) - 0.5 * side_y
z_dist = Abs(z - center[2]) - 0.5 * side_z
outside_distance = sqrt(
Max(x_dist, 0) ** 2 + Max(y_dist, 0) ** 2 + Max(z_dist, 0) ** 2
)
inside_distance = Min(Max(x_dist, y_dist, z_dist), 0)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (point_1[0], point_2[0]),
Parameter("y"): (point_1[1], point_2[1]),
Parameter("z"): (point_1[2], point_2[2]),
},
parameterization=parameterization,
)
# initialize Box
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class VectorizedBoxes(Geometry):
"""
Vectorized 3D Box/Cuboid for faster surface and interior sampling.
This primitive can be used if many boxes are required and is
significantly faster then combining many boxes together with Boolean
operations.
Parameters
----------
box_bounds : np.ndarray
An array specifying the bounds of boxes. Shape of array is
`[nr_boxes, 3, 2]` where the last dim stores the lower and
upper bounds respectively.
dx : float
delta x used for SDF derivative calculations.
"""
def __init__(self, box_bounds, dx=0.0001):
# compute box centers and sides once for optimization
box_centers = (
box_bounds[:, :, 0] + (box_bounds[:, :, 1] - box_bounds[:, :, 0]) / 2
)
side = box_bounds[:, :, 1] - box_bounds[:, :, 0]
# create curves
def _sample(box_bounds, box_centers, side):
def sample(nr_points, parameterization, quasirandom):
# area of all faces
face_area = np.concatenate(
2
* [
side[:, 0] * side[:, 1],
side[:, 0] * side[:, 2],
side[:, 1] * side[:, 2],
]
) # [6 * nr_boxes]
# calculate number or points per face
face_probabilities = face_area / np.linalg.norm(face_area, ord=1)
face_index = np.arange(face_area.shape[0])
points_per_face = np.random.choice(
face_index, nr_points, p=face_probabilities
)
points_per_face, _ = np.histogram(
points_per_face, np.arange(face_area.shape[0] + 1) - 0.5
)
# generate random values to use when sampling faces
s_1 = 2.0 * (np.random.rand(nr_points) - 0.5)
s_2 = 2.0 * (np.random.rand(nr_points) - 0.5)
# repeat side and center for each point
repeat_side = np.repeat(
np.concatenate(6 * [side], axis=0), points_per_face, axis=0
)
repeat_centers = np.repeat(
np.concatenate(6 * [box_centers], axis=0), points_per_face, axis=0
)
repeat_face_area = np.repeat(
face_area / points_per_face, points_per_face, axis=0
)
# sample face 1
nr_face_1 = np.sum(points_per_face[0 : box_bounds.shape[0]])
face_1_x = (
repeat_centers[:nr_face_1, 0]
+ 0.5 * s_1[:nr_face_1] * repeat_side[:nr_face_1, 0]
)
face_1_y = (
repeat_centers[:nr_face_1, 1]
+ 0.5 * s_2[:nr_face_1] * repeat_side[:nr_face_1, 1]
)
face_1_z = (
repeat_centers[:nr_face_1, 2] + 0.5 * repeat_side[:nr_face_1, 2]
)
face_1_normal_x = np.zeros_like(face_1_x)
face_1_normal_y = np.zeros_like(face_1_x)
face_1_normal_z = np.ones_like(face_1_x)
area_1 = repeat_face_area[:nr_face_1]
# sample face 2
nr_face_2 = (
np.sum(
points_per_face[box_bounds.shape[0] : 2 * box_bounds.shape[0]]
)
+ nr_face_1
)
face_2_x = (
repeat_centers[nr_face_1:nr_face_2, 0]
+ 0.5
* s_1[nr_face_1:nr_face_2]
* repeat_side[nr_face_1:nr_face_2, 0]
)
face_2_y = (
repeat_centers[nr_face_1:nr_face_2, 1]
+ 0.5 * repeat_side[nr_face_1:nr_face_2, 1]
)
face_2_z = (
repeat_centers[nr_face_1:nr_face_2, 2]
+ 0.5
* s_2[nr_face_1:nr_face_2]
* repeat_side[nr_face_1:nr_face_2, 2]
)
face_2_normal_x = np.zeros_like(face_2_x)
face_2_normal_y = np.ones_like(face_2_x)
face_2_normal_z = np.zeros_like(face_2_x)
area_2 = repeat_face_area[nr_face_1:nr_face_2]
# sample face 3
nr_face_3 = (
np.sum(
points_per_face[
2 * box_bounds.shape[0] : 3 * box_bounds.shape[0]
]
)
+ nr_face_2
)
face_3_x = (
repeat_centers[nr_face_2:nr_face_3, 0]
+ 0.5 * repeat_side[nr_face_2:nr_face_3, 0]
)
face_3_y = (
repeat_centers[nr_face_2:nr_face_3, 1]
+ 0.5
* s_1[nr_face_2:nr_face_3]
* repeat_side[nr_face_2:nr_face_3, 1]
)
face_3_z = (
repeat_centers[nr_face_2:nr_face_3, 2]
+ 0.5
* s_2[nr_face_2:nr_face_3]
* repeat_side[nr_face_2:nr_face_3, 2]
)
face_3_normal_x = np.ones_like(face_3_x)
face_3_normal_y = np.zeros_like(face_3_x)
face_3_normal_z = np.zeros_like(face_3_x)
area_3 = repeat_face_area[nr_face_2:nr_face_3]
# sample face 4
nr_face_4 = (
np.sum(
points_per_face[
3 * box_bounds.shape[0] : 4 * box_bounds.shape[0]
]
)
+ nr_face_3
)
face_4_x = (
repeat_centers[nr_face_3:nr_face_4, 0]
+ 0.5
* s_1[nr_face_3:nr_face_4]
* repeat_side[nr_face_3:nr_face_4, 0]
)
face_4_y = (
repeat_centers[nr_face_3:nr_face_4, 1]
+ 0.5
* s_2[nr_face_3:nr_face_4]
* repeat_side[nr_face_3:nr_face_4, 1]
)
face_4_z = (
repeat_centers[nr_face_3:nr_face_4, 2]
- 0.5 * repeat_side[nr_face_3:nr_face_4, 2]
)
face_4_normal_x = np.zeros_like(face_4_x)
face_4_normal_y = np.zeros_like(face_4_x)
face_4_normal_z = -np.ones_like(face_4_x)
area_4 = repeat_face_area[nr_face_3:nr_face_4]
# sample face 5
nr_face_5 = (
np.sum(
points_per_face[
4 * box_bounds.shape[0] : 5 * box_bounds.shape[0]
]
)
+ nr_face_4
)
face_5_x = (
repeat_centers[nr_face_4:nr_face_5, 0]
+ 0.5
* s_1[nr_face_4:nr_face_5]
* repeat_side[nr_face_4:nr_face_5, 0]
)
face_5_y = (
repeat_centers[nr_face_4:nr_face_5, 1]
- 0.5 * repeat_side[nr_face_4:nr_face_5, 1]
)
face_5_z = (
repeat_centers[nr_face_4:nr_face_5, 2]
+ 0.5
* s_2[nr_face_4:nr_face_5]
* repeat_side[nr_face_4:nr_face_5, 2]
)
face_5_normal_x = np.zeros_like(face_5_x)
face_5_normal_y = -np.ones_like(face_5_x)
face_5_normal_z = np.zeros_like(face_5_x)
area_5 = repeat_face_area[nr_face_4:nr_face_5]
# sample face 6
nr_face_6 = (
np.sum(points_per_face[5 * box_bounds.shape[0] :]) + nr_face_5
)
face_6_x = (
repeat_centers[nr_face_5:nr_face_6, 0]
- 0.5 * repeat_side[nr_face_5:nr_face_6, 0]
)
face_6_y = (
repeat_centers[nr_face_5:nr_face_6, 1]
+ 0.5
* s_1[nr_face_5:nr_face_6]
* repeat_side[nr_face_5:nr_face_6, 1]
)
face_6_z = (
repeat_centers[nr_face_5:nr_face_6, 2]
+ 0.5
* s_2[nr_face_5:nr_face_6]
* repeat_side[nr_face_5:nr_face_6, 2]
)
face_6_normal_x = -np.ones_like(face_6_x)
face_6_normal_y = np.zeros_like(face_6_x)
face_6_normal_z = np.zeros_like(face_6_x)
area_6 = repeat_face_area[nr_face_5:nr_face_6]
# gather for invar
invar = {
"x": np.concatenate(
[face_1_x, face_2_x, face_3_x, face_4_x, face_5_x, face_6_x],
axis=0,
)[:, None],
"y": np.concatenate(
[face_1_y, face_2_y, face_3_y, face_4_y, face_5_y, face_6_y],
axis=0,
)[:, None],
"z": np.concatenate(
[face_1_z, face_2_z, face_3_z, face_4_z, face_5_z, face_6_z],
axis=0,
)[:, None],
"normal_x": np.concatenate(
[
face_1_normal_x,
face_2_normal_x,
face_3_normal_x,
face_4_normal_x,
face_5_normal_x,
face_6_normal_x,
],
axis=0,
)[:, None],
"normal_y": np.concatenate(
[
face_1_normal_y,
face_2_normal_y,
face_3_normal_y,
face_4_normal_y,
face_5_normal_y,
face_6_normal_y,
],
axis=0,
)[:, None],
"normal_z": np.concatenate(
[
face_1_normal_z,
face_2_normal_z,
face_3_normal_z,
face_4_normal_z,
face_5_normal_z,
face_6_normal_z,
],
axis=0,
)[:, None],
"area": np.concatenate(
[area_1, area_2, area_3, area_4, area_5, area_6], axis=0
)[:, None],
}
return invar, {}
return sample
curves = [Curve(_sample(box_bounds, box_centers, side), dims=3)]
# create closure for SDF function
def _sdf(box_bounds, box_centers, side, dx):
def sdf(invar, param_ranges={}, compute_sdf_derivatives=False):
# get input and tile for each box
xyz = np.stack([invar["x"], invar["y"], invar["z"]], axis=-1)
xyz = np.tile(np.expand_dims(xyz, 1), (1, box_bounds.shape[0], 1))
# compute distance
outputs = {"sdf": VectorizedBoxes._sdf_box(xyz, box_centers, side)}
# compute distance derivatives if needed
if compute_sdf_derivatives:
for i, d in enumerate(["x", "y", "z"]):
# compute sdf plus dx/2
plus_xyz = np.copy(xyz)
plus_xyz[..., i] += dx / 2
computed_sdf_plus = VectorizedBoxes._sdf_box(
plus_xyz, box_centers, side
)
# compute sdf minus dx/2
minus_xyz = np.copy(xyz)
minus_xyz[..., i] -= dx / 2
computed_sdf_minus = VectorizedBoxes._sdf_box(
minus_xyz, box_centers, side
)
# store sdf derivative
outputs["sdf" + diff_str + d] = (
computed_sdf_plus - computed_sdf_minus
) / dx
return outputs
return sdf
# create bounds
bounds = Bounds(
{
"x": (np.min(box_bounds[:, 0, 0]), np.max(box_bounds[:, 0, 1])),
"y": (np.min(box_bounds[:, 1, 0]), np.max(box_bounds[:, 1, 1])),
"z": (np.min(box_bounds[:, 2, 0]), np.max(box_bounds[:, 2, 1])),
}
)
# initialize geometry
Geometry.__init__(
self, curves, _sdf(box_bounds, box_centers, side, dx), bounds=bounds, dims=3
)
@staticmethod
def _sdf_box(xyz, box_centers, side):
xyz_dist = np.abs(xyz - np.expand_dims(box_centers, 0)) - 0.5 * np.expand_dims(
side, 0
)
outside_distance = np.sqrt(np.sum(np.maximum(xyz_dist, 0) ** 2, axis=-1))
inside_distance = np.minimum(np.max(xyz_dist, axis=-1), 0)
return np.max(-(outside_distance + inside_distance), axis=-1)
class Sphere(Geometry):
"""
3D Sphere
Parameters
----------
center : tuple with 3 ints or floats
center of sphere
radius : int or float
radius of sphere
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, radius, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
r_1, r_2, r_3 = (
Symbol(csg_curve_naming(0)),
Symbol(csg_curve_naming(1)),
Symbol(csg_curve_naming(2)),
)
# surface of the sphere
curve_parameterization = Parameterization(
{r_1: (-1, 1), r_2: (-1, 1), r_3: (-1, 1)}
)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
norm = sqrt(r_1**2 + r_2**2 + r_3**2)
curve_1 = SympyCurve(
functions={
"x": center[0] + radius * r_1 / norm, # TODO GAUSSIAN DIST
"y": center[1] + radius * r_2 / norm,
"z": center[2] + radius * r_3 / norm,
"normal_x": r_1 / norm,
"normal_y": r_2 / norm,
"normal_z": r_3 / norm,
},
parameterization=curve_parameterization,
area=4 * pi * radius**2,
)
curves = [curve_1]
# calculate SDF
sdf = radius - sqrt(
(x - center[0]) ** 2 + (y - center[1]) ** 2 + (z - center[2]) ** 2
)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - radius, center[0] + radius),
Parameter("y"): (center[1] - radius, center[1] + radius),
Parameter("z"): (center[2] - radius, center[2] + radius),
},
parameterization=parameterization,
)
# initialize Sphere
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Cylinder(Geometry):
"""
3D Cylinder
Axis parallel to z-axis
Parameters
----------
center : tuple with 3 ints or floats
center of cylinder
radius : int or float
radius of cylinder
height : int or float
height of cylinder
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, radius, height, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
h, r = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
theta = Symbol(csg_curve_naming(2))
# surface of the cylinder
curve_parameterization = Parameterization(
{h: (-1, 1), r: (0, 1), theta: (0, 2 * pi)}
)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + radius * cos(theta),
"y": center[1] + radius * sin(theta),
"z": center[2] + 0.5 * h * height,
"normal_x": 1 * cos(theta),
"normal_y": 1 * sin(theta),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=height * 2 * pi * radius,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + sqrt(r) * radius * cos(theta),
"y": center[1] + sqrt(r) * radius * sin(theta),
"z": center[2] + 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=pi * radius**2,
)
curve_3 = SympyCurve(
functions={
"x": center[0] + sqrt(r) * radius * cos(theta),
"y": center[1] + sqrt(r) * radius * sin(theta),
"z": center[2] - 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=pi * radius**2,
)
curves = [curve_1, curve_2, curve_3]
# calculate SDF
r_dist = sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)
z_dist = Abs(z - center[2])
outside_distance = sqrt(
Min(0, radius - r_dist) ** 2 + Min(0, 0.5 * height - z_dist) ** 2
)
inside_distance = -1 * Min(
Abs(Min(0, r_dist - radius)), Abs(Min(0, z_dist - 0.5 * height))
)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - radius, center[0] + radius),
Parameter("y"): (center[1] - radius, center[1] + radius),
Parameter("z"): (center[2] - height / 2, center[2] + height / 2),
},
parameterization=parameterization,
)
# initialize Cylinder
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Torus(Geometry):
"""
3D Torus
Parameters
----------
center : tuple with 3 ints or floats
center of torus
radius : int or float
distance from center to center of tube (major radius)
radius_tube : int or float
radius of tube (minor radius)
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(
self, center, radius, radius_tube, parameterization=Parameterization()
):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
r_1, r_2, r_3 = (
Symbol(csg_curve_naming(0)),
Symbol(csg_curve_naming(1)),
Symbol(csg_curve_naming(2)),
)
N = CoordSys3D("N")
P = x * N.i + y * N.j + z * N.k
O = center[0] * N.i + center[1] * N.j + center[2] * N.k
OP_xy = (x - center[0]) * N.i + (y - center[1]) * N.j + (0) * N.k
OR = radius * OP_xy / sqrt(OP_xy.dot(OP_xy))
OP = P - O
RP = OP - OR
dist = sqrt(RP.dot(RP))
# surface of the torus
curve_parameterization = Parameterization(
{r_1: (0, 1), r_2: (0, 1), r_3: (0, 1)}
)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
theta = 2 * pi * r_1
phi = 2 * pi * r_2
curve_1 = SympyCurve(
functions={
"x": center[0] + (radius + radius_tube * cos(theta)) * cos(phi),
"y": center[1] + (radius + radius_tube * cos(theta)) * sin(phi),
"z": center[2] + radius_tube * sin(theta),
"normal_x": 1 * cos(theta) * cos(phi),
"normal_y": 1 * cos(theta) * sin(phi),
"normal_z": 1 * sin(theta),
},
parameterization=curve_parameterization,
area=4 * pi * pi * radius * radius_tube,
criteria=radius_tube * Abs(radius + radius_tube * cos(theta))
>= r_3 * radius_tube * (radius + radius_tube),
)
curves = [curve_1]
# calculate SDF
sdf = radius_tube - dist
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (
center[0] - radius - radius_tube,
center[0] + radius + radius_tube,
),
Parameter("y"): (
center[1] - radius - radius_tube,
center[1] + radius + radius_tube,
),
Parameter("z"): (center[2] - radius_tube, center[2] + radius_tube),
},
parameterization=parameterization,
)
# initialize Torus
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Cone(Geometry):
"""
3D Cone
Axis parallel to z-axis
Parameters
----------
center : tuple with 3 ints or floats
base center of cone
radius : int or float
base radius of cone
height : int or float
height of cone
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, radius, height, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
r, t = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
theta = Symbol(csg_curve_naming(2))
N = CoordSys3D("N")
P = x * N.i + y * N.j + z * N.k
O = center[0] * N.i + center[1] * N.j + center[2] * N.k
H = center[0] * N.i + center[1] * N.j + (center[2] + height) * N.k
R = (
(center[0] + radius * cos(atan2(y, x))) * N.i
+ (center[1] + radius * sin(atan2(y, x))) * N.j
+ (center[2]) * N.k
)
OP_xy = (x - center[0]) * N.i + (y - center[1]) * N.j + (0) * N.k
OR = radius * OP_xy / sqrt(OP_xy.dot(OP_xy))
OP = P - O
OH = H - O
RP = OP - OR
RH = OH - OR
PH = OH - OP
cone_angle = atan2(radius, height)
angle = acos(PH.dot(OH) / sqrt(PH.dot(PH)) / sqrt(OH.dot(OH)))
dist = sqrt(PH.dot(PH)) * sin(angle - cone_angle)
# surface of the cone
curve_parameterization = Parameterization(
{r: (0, 1), t: (0, 1), theta: (0, 2 * pi)}
)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + (sqrt(t)) * radius * cos(theta),
"y": center[1] + (sqrt(t)) * radius * sin(theta),
"z": center[2] + (1 - sqrt(t)) * height,
"normal_x": 1 * cos(cone_angle) * cos(theta),
"normal_y": 1 * cos(cone_angle) * sin(theta),
"normal_z": 1 * sin(cone_angle),
},
parameterization=curve_parameterization,
area=pi * radius * (sqrt(height**2 + radius**2)),
)
curve_2 = SympyCurve(
functions={
"x": center[0] + sqrt(r) * radius * cos(theta),
"y": center[1] + sqrt(r) * radius * sin(theta),
"z": center[2],
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=pi * radius**2,
)
curves = [curve_1, curve_2]
# calculate SDF
outside_distance = 1 * sqrt(Max(0, dist) ** 2 + Max(0, center[2] - z) ** 2)
inside_distance = -1 * Min(Abs(Min(0, dist)), Abs(Min(0, center[2] - z)))
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - radius, center[0] + radius),
Parameter("y"): (center[1] - radius, center[1] + radius),
Parameter("z"): (center[2], center[2] + height),
},
parameterization=parameterization,
)
# initialize Cone
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class TriangularPrism(Geometry):
"""
3D Uniform Triangular Prism
Axis parallel to z-axis
Parameters
----------
center : tuple with 3 ints or floats
center of prism
side : int or float
side of equilateral base
height : int or float
height of prism
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, side, height, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
s_1, s_2, s_3 = (
Symbol(csg_curve_naming(0)),
Symbol(csg_curve_naming(1)),
Symbol(csg_curve_naming(2)),
)
N = CoordSys3D("N")
P = x * N.i + y * N.j + z * N.k
O = center[0] * N.i + center[1] * N.j + center[2] * N.k
OP = P - O
OP_xy = OP - OP.dot(1 * N.k)
normal_1 = -1 * N.j
normal_2 = -sqrt(3) / 2 * N.i + 1 / 2 * N.j
normal_3 = sqrt(3) / 2 * N.i + 1 / 2 * N.j
r_ins = side / 2 / sqrt(3)
distance_side = Min(
Abs(r_ins - OP_xy.dot(normal_1)),
Abs(r_ins - OP_xy.dot(normal_2)),
Abs(r_ins - OP_xy.dot(normal_3)),
)
distance_top = Abs(z - center[2]) - 0.5 * height
v1 = O + (
-0.5 * side * N.i - 0.5 * sqrt(1 / 3) * side * N.j - height / 2 * side * N.k
)
v2 = O + (
0.5 * side * N.i - 0.5 * sqrt(1 / 3) * side * N.j - height / 2 * side * N.k
)
v3 = O + (1 * sqrt(1 / 3) * side * N.j - height / 2 * side * N.k)
# surface of the prism
curve_parameterization = Parameterization({s_1: (0, 1), s_2: (0, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": v1.dot(1 * N.i) + (v2 - v1).dot(1 * N.i) * s_1,
"y": v1.dot(1 * N.j) + (v2 - v1).dot(1 * N.j) * s_1,
"z": v1.dot(1 * N.k) + height * s_2,
"normal_x": 0,
"normal_y": -1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side * height,
)
curve_2 = SympyCurve(
functions={
"x": v1.dot(1 * N.i) + (v3 - v1).dot(1 * N.i) * s_1,
"y": v1.dot(1 * N.j) + (v3 - v1).dot(1 * N.j) * s_1,
"z": v1.dot(1 * N.k) + height * s_2,
"normal_x": -sqrt(3) / 2,
"normal_y": 1 / 2,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side * height,
)
curve_3 = SympyCurve(
functions={
"x": v2.dot(1 * N.i) + (v3 - v2).dot(1 * N.i) * s_1,
"y": v2.dot(1 * N.j) + (v3 - v2).dot(1 * N.j) * s_1,
"z": v2.dot(1 * N.k) + height * s_2,
"normal_x": sqrt(3) / 2,
"normal_y": 1 / 2,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=side * height,
)
curve_4 = SympyCurve(
functions={
"x": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.i)
),
"y": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.j)
),
"z": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.k)
),
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
curve_5 = SympyCurve(
functions={
"x": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.i)
),
"y": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.j)
),
"z": (
(
(1 - sqrt(s_1)) * v1
+ (sqrt(s_1) * (1 - s_2)) * v2
+ s_2 * sqrt(s_1) * v3
).dot(1 * N.k)
+ height
),
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
curves = [curve_1, curve_2, curve_3, curve_4, curve_5]
# calculate SDF
inside_distance = Max(
Min(
Max(OP_xy.dot(normal_1), OP_xy.dot(normal_2), OP_xy.dot(normal_3))
- r_ins,
0,
),
Min(Abs(z - center[2]) - 0.5 * height, 0),
)
outside_distance = sqrt(
Min(
r_ins
- Max(OP_xy.dot(normal_1), OP_xy.dot(normal_2), OP_xy.dot(normal_3)),
0,
)
** 2
+ Min(0.5 * height - Abs(z - center[2]), 0) ** 2
)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - side / 2, center[0] + side / 2),
Parameter("y"): (center[1] - side / 2, center[1] + side / 2),
Parameter("z"): (center[2], center[2] + height),
},
parameterization=parameterization,
)
# initialize TriangularPrism
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class Tetrahedron(Geometry):
"""
3D Tetrahedron
The 4 symmetrically placed points are on a unit sphere.
Centroid of the tetrahedron is at origin and lower face is parallel to
x-y plane
Reference: https://en.wikipedia.org/wiki/Tetrahedron
Parameters
----------
center : tuple with 3 ints or floats
centroid of tetrahedron
radius : int or float
radius of circumscribed sphere
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, radius, parameterization=Parameterization()):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
r_1, r_2 = Symbol(csg_curve_naming(0)), Symbol(csg_curve_naming(1))
N = CoordSys3D("N")
P = x * N.i + y * N.j + z * N.k
O = center[0] * N.i + center[1] * N.j + center[2] * N.k
side = sqrt(8 / 3) * radius
# vertices of the tetrahedron
v1 = (
center[0] + radius * sqrt(8 / 9),
center[1] + radius * 0,
center[2] + radius * (-1 / 3),
)
v2 = (
center[0] - radius * sqrt(2 / 9),
center[1] + radius * sqrt(2 / 3),
center[2] + radius * (-1 / 3),
)
v3 = (
center[0] - radius * sqrt(2 / 9),
center[1] - radius * sqrt(2 / 3),
center[2] + radius * (-1 / 3),
)
v4 = (
center[0] + radius * 0,
center[1] + radius * 0,
center[2] + radius * 1,
) # apex vector
vv1 = v1[0] * N.i + v1[1] * N.j + v1[2] * N.k
vv2 = v2[0] * N.i + v2[1] * N.j + v2[2] * N.k
vv3 = v3[0] * N.i + v3[1] * N.j + v2[2] * N.k
vv4 = v4[0] * N.i + v4[1] * N.j + v4[2] * N.k
v4P = P - vv4
# surface of the tetrahedron
curve_parameterization = Parameterization({r_1: (-1, 1), r_2: (-1, 1)})
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
# face between v1, v2, v3
normal_1 = ((vv3 - vv1).cross(vv2 - vv1)).normalize()
curve_1 = SympyCurve(
functions={
"x": (
center[0]
+ (1 - sqrt(r_1)) * v1[0]
+ (sqrt(r_1) * (1 - r_2)) * v2[0]
+ r_2 * sqrt(r_1) * v3[0]
),
"y": (
center[1]
+ (1 - sqrt(r_1)) * v1[1]
+ (sqrt(r_1) * (1 - r_2)) * v2[1]
+ r_2 * sqrt(r_1) * v3[1]
),
"z": (
center[2]
+ (1 - sqrt(r_1)) * v1[2]
+ (sqrt(r_1) * (1 - r_2)) * v2[2]
+ r_2 * sqrt(r_1) * v3[2]
),
"normal_x": normal_1.to_matrix(N)[0],
"normal_y": normal_1.to_matrix(N)[1],
"normal_z": normal_1.to_matrix(N)[2],
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
# face between v1, v2, v4
normal_2 = ((vv2 - vv1).cross(vv4 - vv1)).normalize()
curve_2 = SympyCurve(
functions={
"x": (
center[0]
+ (1 - sqrt(r_1)) * v1[0]
+ (sqrt(r_1) * (1 - r_2)) * v2[0]
+ r_2 * sqrt(r_1) * v4[0]
),
"y": (
center[1]
+ (1 - sqrt(r_1)) * v1[1]
+ (sqrt(r_1) * (1 - r_2)) * v2[1]
+ r_2 * sqrt(r_1) * v4[1]
),
"z": (
center[2]
+ (1 - sqrt(r_1)) * v1[2]
+ (sqrt(r_1) * (1 - r_2)) * v2[2]
+ r_2 * sqrt(r_1) * v4[2]
),
"normal_x": normal_2.to_matrix(N)[0],
"normal_y": normal_2.to_matrix(N)[1],
"normal_z": normal_2.to_matrix(N)[2],
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
# face between v1, v4, v3
normal_3 = ((vv4 - vv1).cross(vv3 - vv1)).normalize()
curve_3 = SympyCurve(
functions={
"x": (
center[0]
+ (1 - sqrt(r_1)) * v1[0]
+ (sqrt(r_1) * (1 - r_2)) * v4[0]
+ r_2 * sqrt(r_1) * v3[0]
),
"y": (
center[1]
+ (1 - sqrt(r_1)) * v1[1]
+ (sqrt(r_1) * (1 - r_2)) * v4[1]
+ r_2 * sqrt(r_1) * v3[1]
),
"z": (
center[2]
+ (1 - sqrt(r_1)) * v1[2]
+ (sqrt(r_1) * (1 - r_2)) * v4[2]
+ r_2 * sqrt(r_1) * v3[2]
),
"normal_x": normal_3.to_matrix(N)[0],
"normal_y": normal_3.to_matrix(N)[1],
"normal_z": normal_3.to_matrix(N)[2],
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
# face between v4, v2, v3
normal_4 = ((vv2 - vv4).cross(vv3 - vv4)).normalize()
curve_4 = SympyCurve(
functions={
"x": (
center[0]
+ (1 - sqrt(r_1)) * v4[0]
+ (sqrt(r_1) * (1 - r_2)) * v2[0]
+ r_2 * sqrt(r_1) * v3[0]
),
"y": (
center[1]
+ (1 - sqrt(r_1)) * v4[1]
+ (sqrt(r_1) * (1 - r_2)) * v2[1]
+ r_2 * sqrt(r_1) * v3[1]
),
"z": (
center[2]
+ (1 - sqrt(r_1)) * v4[2]
+ (sqrt(r_1) * (1 - r_2)) * v2[2]
+ r_2 * sqrt(r_1) * v3[2]
),
"normal_x": normal_4.to_matrix(N)[0],
"normal_y": normal_4.to_matrix(N)[1],
"normal_z": normal_4.to_matrix(N)[2],
},
parameterization=curve_parameterization,
area=sqrt(3) * side * side / 4,
)
curves = [curve_1, curve_2, curve_3, curve_4]
dist = Max(
v4P.dot(normal_2) / normal_2.magnitude(),
v4P.dot(normal_3) / normal_3.magnitude(),
v4P.dot(normal_4) / normal_4.magnitude(),
)
# calculate SDF
outside_distance = -1 * sqrt(Max(0, dist) ** 2 + Max(0, v1[2] - z) ** 2)
inside_distance = Min(Abs(Min(0, dist)), Abs(Min(0, v1[2] - z)))
sdf = outside_distance + inside_distance
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - radius, center[0] + radius),
Parameter("y"): (center[1] - radius, center[1] + radius),
Parameter("z"): (center[2] - radius, center[2] + radius),
},
parameterization=parameterization,
)
# initialize Tetrahedron
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class IsoTriangularPrism(Geometry):
"""
2D Isosceles Triangular Prism
Symmetrical axis parallel to y-axis
Parameters
----------
center : tuple with 3 ints or floats
center of base of triangle
base : int or float
base of triangle
height : int or float
height of triangle
height_prism : int or float
height of triangular prism
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(
self, center, base, height, height_prism, parameterization=Parameterization()
):
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
t, h, hz = (
Symbol(csg_curve_naming(0)),
Symbol(csg_curve_naming(1)),
Symbol(csg_curve_naming(2)),
)
N = CoordSys3D("N")
P = (x) * N.i + y * N.j + center[2] * N.k
Q = x * N.i + y * N.j + center[2] * N.k
O = center[0] * N.i + center[1] * N.j + center[2] * N.k
H = center[0] * N.i + (center[1] + height) * N.j + center[2] * N.k
B = (center[0] + base / 2) * N.i + center[1] * N.j + center[2] * N.k
B_p = (center[0] - base / 2) * N.i + center[1] * N.j + center[2] * N.k
OP = P - O
OH = H - O
PH = OH - OP
OQ = Q - O
QH = OH - OQ
HP = OP - OH
HB = B - H
HB_p = B_p - H
norm = ((HB_p).cross(HB)).normalize()
norm_HB = (norm.cross(HB)).normalize()
hypo = sqrt(height**2 + (base / 2) ** 2)
angle = acos(PH.dot(OH) / sqrt(PH.dot(PH)) / sqrt(OH.dot(OH)))
apex_angle = asin(base / 2 / hypo)
hypo_sin = sqrt(height**2 + (base / 2) ** 2) * sin(apex_angle)
hypo_cos = sqrt(height**2 + (base / 2) ** 2) * cos(apex_angle)
dist = sqrt(PH.dot(PH)) * sin(Min(angle - apex_angle, pi / 2))
a = (center[0] - base / 2) * N.i + center[1] * N.j + center[2] * N.k
b = (center[0] + base / 2) * N.i + center[1] * N.j + center[2] * N.k
c = center[0] * N.i + (center[1] + height) * N.j + center[2] * N.k
s_1, s_2 = Symbol(csg_curve_naming(3)), Symbol(csg_curve_naming(4))
# curve for each side
ranges = {t: (-1, 1), h: (0, 1), hz: (-1, 1), s_1: (0, 1), s_2: (0, 1)}
curve_parameterization = Parameterization(ranges)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + t * base / 2,
"y": center[1] + t * 0,
"z": center[2] + 0.5 * hz * height_prism,
"normal_x": 0,
"normal_y": -1,
"normal_z": 0,
},
parameterization=curve_parameterization,
area=base * height_prism,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + h * hypo_sin,
"y": center[1] + height - h * hypo_cos,
"z": center[2] + 0.5 * hz * height_prism,
"normal_x": 1 * cos(apex_angle),
"normal_y": 1 * sin(apex_angle),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=sqrt(height**2 + (base / 2) ** 2) * height_prism,
)
curve_3 = SympyCurve(
functions={
"x": center[0] - h * hypo_sin,
"y": center[1] + height - h * hypo_cos,
"z": center[2] + 0.5 * hz * height_prism,
"normal_x": -1 * cos(apex_angle),
"normal_y": 1 * sin(apex_angle),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=sqrt(height**2 + (base / 2) ** 2) * height_prism,
)
curve_4 = SympyCurve(
functions={
"x": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.i)
),
"y": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.j)
),
"z": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.k)
)
- height_prism / 2,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=0.5 * base * height,
)
curve_5 = SympyCurve(
functions={
"x": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.i)
),
"y": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.j)
),
"z": (
(
(1 - sqrt(s_1)) * a
+ (sqrt(s_1) * (1 - s_2)) * b
+ s_2 * sqrt(s_1) * c
).dot(1 * N.k)
+ height_prism / 2
),
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=0.5 * base * height,
)
curves = [curve_1, curve_2, curve_3, curve_4, curve_5]
# calculate SDF
z_dist = Abs(z - center[2])
outside_distance = 1 * sqrt(
sqrt(Max(0, dist) ** 2 + Max(0, center[1] - y) ** 2) ** 2
+ Min(0.5 * height_prism - Abs(z - center[2]), 0) ** 2
)
inside_distance = -1 * Min(
Abs(Min(0, dist)),
Abs(Min(0, center[1] - y)),
Abs(Min(Abs(z - center[2]) - 0.5 * height_prism, 0)),
)
sdf = -(outside_distance + inside_distance)
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - base / 2, center[0] + base / 2),
Parameter("y"): (center[1], center[1] + height),
Parameter("z"): (center[2], center[2] + height_prism),
},
parameterization=parameterization,
)
# initialize IsoTriangularPrism
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
class ElliCylinder(Geometry):
"""
3D Elliptical Cylinder
Axis parallel to z-axis
Approximation based on 4-arc ellipse construction
https://www.researchgate.net/publication/241719740_Approximating_an_ellipse_with_four_circular_arcs
Please manually ensure a>b
Parameters
----------
center : tuple with 3 ints or floats
center of base of ellipse
a : int or float
semi-major axis of ellipse
b : int or float
semi-minor axis of ellipse
height : int or float
height of elliptical cylinder
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, center, a, b, height, parameterization=Parameterization()):
# TODO Assertion creates issues while parameterization
# assert a > b, "a must be greater than b. To have a ellipse with larger b create a ellipse with flipped a and b and then rotate by pi/2"
# make sympy symbols to use
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
h = Symbol(csg_curve_naming(0))
r_1, r_2 = Symbol(csg_curve_naming(1)), Symbol(csg_curve_naming(2))
angle = Symbol(csg_curve_naming(3))
phi = asin(b / sqrt(a**2 + b**2))
# phi = atan2(b, a)
theta = pi / 2 - phi
r1 = (a * sin(theta) + b * cos(theta) - a) / (sin(theta) + cos(theta) - 1)
r2 = (a * sin(theta) + b * cos(theta) - b) / (sin(theta) + cos(theta) - 1)
# surface of the cylinder
ranges = {h: (-1, 1), r_1: (0, 1), r_2: (0, 1), angle: (-1, 1)}
curve_parameterization = Parameterization(ranges)
curve_parameterization = Parameterization.combine(
curve_parameterization, parameterization
)
curve_1 = SympyCurve(
functions={
"x": center[0] + a - r1 + r1 * cos(angle * theta),
"y": center[1] + r1 * sin(angle * theta),
"z": center[2] + 0.5 * h * height,
"normal_x": 1 * cos(angle * theta),
"normal_y": 1 * sin(angle * theta),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=height * 2 * theta * r1,
)
curve_2 = SympyCurve(
functions={
"x": center[0] + r2 * cos(pi / 2 + angle * phi),
"y": center[1] - r2 + b + r2 * sin(pi / 2 + angle * phi),
"z": center[2] + 0.5 * h * height,
"normal_x": 1 * cos(pi / 2 + angle * phi),
"normal_y": 1 * sin(pi / 2 + angle * phi),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=height * 2 * phi * r2,
)
curve_3 = SympyCurve(
functions={
"x": center[0] - a + r1 + r1 * cos(pi + angle * theta),
"y": center[1] + r1 * sin(pi + angle * theta),
"z": center[2] + 0.5 * h * height,
"normal_x": 1 * cos(pi + angle * theta),
"normal_y": 1 * sin(pi + angle * theta),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=height * 2 * theta * r1,
)
curve_4 = SympyCurve(
functions={
"x": center[0] + r2 * cos(3 * pi / 2 + angle * phi),
"y": center[1] + r2 - b + r2 * sin(3 * pi / 2 + angle * phi),
"z": center[2] + 0.5 * h * height,
"normal_x": 1 * cos(3 * pi / 2 + angle * phi),
"normal_y": 1 * sin(3 * pi / 2 + angle * phi),
"normal_z": 0,
},
parameterization=curve_parameterization,
area=height * 2 * phi * r2,
)
# Flat surfaces top
curve_5 = SympyCurve(
functions={
"x": center[0] + a - r1 + sqrt(r_1) * r1 * cos(angle * theta),
"y": center[1] + sqrt(r_1) * r1 * sin(angle * theta),
"z": center[2] + 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=theta * r1**2,
)
curve_6 = SympyCurve(
functions={
"x": center[0] + sqrt(r_2) * r2 * cos(pi / 2 + angle * phi),
"y": center[1] - r2 + b + sqrt(r_2) * r2 * sin(pi / 2 + angle * phi),
"z": center[2] + 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=phi * r2**2 - 0.5 * (r2 - b) * (a - r1) * 2,
criteria=center[1] - r2 + b + sqrt(r_2) * r2 * sin(pi / 2 + angle * phi)
> center[1],
)
# criteria=(((x-(center[0]+r2-b))**2+y**2)<r2**2))
curve_7 = SympyCurve(
functions={
"x": center[0] - a + r1 + sqrt(r_1) * r1 * cos(pi + angle * theta),
"y": center[1] + sqrt(r_1) * r1 * sin(pi + angle * theta),
"z": center[2] + 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=theta * r1**2,
)
curve_8 = SympyCurve(
functions={
"x": center[0] + sqrt(r_2) * r2 * cos(3 * pi / 2 + angle * phi),
"y": center[1]
+ r2
- b
+ sqrt(r_2) * r2 * sin(3 * pi / 2 + angle * phi),
"z": center[2] + 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": 1,
},
parameterization=curve_parameterization,
area=phi * r2**2 - 0.5 * (r2 - b) * (a - r1) * 2,
criteria=center[1] + r2 - b + sqrt(r_2) * r2 * sin(3 * pi / 2 + angle * phi)
< center[1],
)
# criteria=(((x-(center[0]-r2+b))**2+y**2)<r2**2))
# Flat surfaces bottom
curve_9 = SympyCurve(
functions={
"x": center[0] + a - r1 + sqrt(r_1) * r1 * cos(angle * theta),
"y": center[1] + sqrt(r_1) * r1 * sin(angle * theta),
"z": center[2] - 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=theta * r1**2,
)
curve_10 = SympyCurve(
functions={
"x": center[0] + sqrt(r_2) * r2 * cos(pi / 2 + angle * phi),
"y": center[1] - r2 + b + sqrt(r_2) * r2 * sin(pi / 2 + angle * phi),
"z": center[2] - 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=phi * r2**2 - 0.5 * (r2 - b) * (a - r1) * 2,
criteria=center[1] - r2 + b + sqrt(r_2) * r2 * sin(pi / 2 + angle * phi)
> center[1],
)
# criteria=(((x-(center[0]+r2-b))**2+y**2)<r2**2))
curve_11 = SympyCurve(
functions={
"x": center[0] - a + r1 + sqrt(r_1) * r1 * cos(pi + angle * theta),
"y": center[1] + sqrt(r_1) * r1 * sin(pi + angle * theta),
"z": center[2] - 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=theta * r1**2,
)
curve_12 = SympyCurve(
functions={
"x": center[0] + sqrt(r_2) * r2 * cos(3 * pi / 2 + angle * phi),
"y": center[1]
+ r2
- b
+ sqrt(r_2) * r2 * sin(3 * pi / 2 + angle * phi),
"z": center[2] - 0.5 * height,
"normal_x": 0,
"normal_y": 0,
"normal_z": -1,
},
parameterization=curve_parameterization,
area=phi * r2**2 - 0.5 * (r2 - b) * (a - r1) * 2,
criteria=center[1] + r2 - b + sqrt(r_2) * r2 * sin(3 * pi / 2 + angle * phi)
< center[1],
)
# criteria=(((x-(center[0]-r2+b))**2+y**2)<r2**2))
curves = [
curve_1,
curve_2,
curve_3,
curve_4,
curve_5,
curve_6,
curve_7,
curve_8,
curve_9,
curve_10,
curve_11,
curve_12,
]
# calculate SDF
c1 = (center[0] + (a - r1), center[1], center[2])
c2 = (center[0], center[1] - (r2 - b), center[2])
c3 = (center[0] - (a - r1), center[1], center[2])
c4 = (center[0], center[1] + (r2 - b), center[2])
l1_m = (c1[1] - c2[1]) / (c1[0] - c2[0])
l1_c = c1[1] - l1_m * c1[0]
l2_m = (c1[1] - c4[1]) / (c1[0] - c4[0])
l2_c = c1[1] - l2_m * c1[0]
l3_m = (c3[1] - c4[1]) / (c3[0] - c4[0])
l3_c = c3[1] - l3_m * c3[0]
l4_m = (c3[1] - c2[1]) / (c3[0] - c2[0])
l4_c = c3[1] - l4_m * c3[0]
# (sign((x-min)*(max-x))+1)/2 # gives 0 if outside range, 0.5 if on min/max, 1 if inside range
# if negative is desired (1-sign(x))/2
# if positive is desired (sign(x)+1)/2
outside_distance_1 = (
Max((sqrt(((x) - c1[0]) ** 2 + ((y) - c1[1]) ** 2) - r1), 0)
* ((1 - sign((y) - l1_m * (x) - l1_c)) / 2)
* ((sign((y) - l2_m * (x) - l2_c) + 1) / 2)
)
outside_distance_2 = (
Max((sqrt(((x) - c2[0]) ** 2 + ((y) - c2[1]) ** 2) - r2), 0)
* ((sign((y) - l1_m * (x) - l1_c) + 1) / 2)
* ((sign((y) - l4_m * (x) - l4_c) + 1) / 2)
)
outside_distance_3 = (
Max((sqrt(((x) - c3[0]) ** 2 + ((y) - c3[1]) ** 2) - r1), 0)
* ((sign((y) - l3_m * (x) - l3_c) + 1) / 2)
* ((1 - sign((y) - l4_m * (x) - l4_c)) / 2)
)
outside_distance_4 = (
Max((sqrt(((x) - c4[0]) ** 2 + ((y) - c4[1]) ** 2) - r2), 0)
* ((1 - sign((y) - l2_m * (x) - l2_c)) / 2)
* ((1 - sign((y) - l3_m * (x) - l3_c)) / 2)
)
curved_outside_distance = (
outside_distance_1
+ outside_distance_2
+ outside_distance_3
+ outside_distance_4
)
flat_outside_distance = Max(Abs(z - center[2]) - 0.5 * height, 0)
outside_distance = sqrt(
curved_outside_distance**2 + flat_outside_distance**2
)
# (sign((x-min)*(max-x))+1)/2 # gives 0 if outside range, 0.5 if on min/max, 1 if inside range
inside_distance_1 = (
Max((r1 - sqrt(((x) - c1[0]) ** 2 + ((y) - c1[1]) ** 2)), 0)
* ((1 - sign((y) - l1_m * (x) - l1_c)) / 2)
* ((sign((y) - l2_m * (x) - l2_c) + 1) / 2)
)
inside_distance_2 = (
Max((r2 - sqrt(((x) - c2[0]) ** 2 + ((y) - c2[1]) ** 2)), 0)
* ((sign((y) - l1_m * (x) - l1_c) + 1) / 2)
* ((sign((y) - l4_m * (x) - l4_c) + 1) / 2)
* ((sign(y - center[1]) + 1) / 2)
)
inside_distance_3 = (
Max((r1 - sqrt(((x) - c3[0]) ** 2 + ((y) - c3[1]) ** 2)), 0)
* ((sign((y) - l3_m * (x) - l3_c) + 1) / 2)
* ((1 - sign((y) - l4_m * (x) - l4_c)) / 2)
)
inside_distance_4 = (
Max((r2 - sqrt(((x) - c4[0]) ** 2 + ((y) - c4[1]) ** 2)), 0)
* ((1 - sign((y) - l2_m * (x) - l2_c)) / 2)
* ((1 - sign((y) - l3_m * (x) - l3_c)) / 2)
* ((sign(center[1] - y) + 1) / 2)
)
curved_inside_distance = (
inside_distance_1
+ inside_distance_2
+ inside_distance_3
+ inside_distance_4
)
flat_inside_distance = Max(0.5 * height - Abs(z - center[2]), 0)
inside_distance = Min(curved_inside_distance, flat_inside_distance)
sdf = -outside_distance + inside_distance
# calculate bounds
bounds = Bounds(
{
Parameter("x"): (center[0] - a, center[0] + a),
Parameter("y"): (center[0] - b, center[0] + b),
Parameter("y"): (center[0] - height / 2, center[0] + height / 2),
},
parameterization=parameterization,
)
# initialize Cylinder
super().__init__(
curves,
_sympy_sdf_to_sdf(sdf),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
| modulus-sym-main | modulus/sym/geometry/primitives_3d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines base class for all mesh type geometries
"""
import numpy as np
import csv
from stl import mesh as np_mesh
from sympy import Symbol
try:
import pysdf.sdf as pysdf
except:
print(
"Error importing pysdf. Make sure 'libsdf.so' is in LD_LIBRARY_PATH and pysdf is installed"
)
raise
from .geometry import Geometry
from .parameterization import Parameterization, Bounds, Parameter
from .curve import Curve
from modulus.sym.constants import diff_str
class Tessellation(Geometry):
"""
Constructive Tessellation Module that allows sampling on surface and interior
of a tessellated geometry.
Parameters
----------
mesh : Mesh (numpy-stl)
A mesh that defines the surface of the geometry.
airtight : bool
If the geometry is airtight or not. If false sample everywhere for interior.
parameterization : Parameterization
Parameterization of geometry.
"""
def __init__(self, mesh, airtight=True, parameterization=Parameterization()):
# make curves
def _sample(mesh):
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
# compute required points on per triangle
triangle_areas = _area_of_triangles(mesh.v0, mesh.v1, mesh.v2)
triangle_probabilities = triangle_areas / np.linalg.norm(
triangle_areas, ord=1
)
triangle_index = np.arange(triangle_probabilities.shape[0])
points_per_triangle = np.random.choice(
triangle_index, nr_points, p=triangle_probabilities
)
points_per_triangle, _ = np.histogram(
points_per_triangle,
np.arange(triangle_probabilities.shape[0] + 1) - 0.5,
)
# go through every triangle and sample it
invar = {
"x": [],
"y": [],
"z": [],
"normal_x": [],
"normal_y": [],
"normal_z": [],
"area": [],
}
for index, nr_p in enumerate(
points_per_triangle
): # TODO can be more efficent
x, y, z = _sample_triangle(
mesh.v0[index], mesh.v1[index], mesh.v2[index], nr_p
)
invar["x"].append(x)
invar["y"].append(y)
invar["z"].append(z)
normal_scale = np.linalg.norm(mesh.normals[index])
invar["normal_x"].append(
np.full(x.shape, mesh.normals[index, 0]) / normal_scale
)
invar["normal_y"].append(
np.full(x.shape, mesh.normals[index, 1]) / normal_scale
)
invar["normal_z"].append(
np.full(x.shape, mesh.normals[index, 2]) / normal_scale
)
invar["area"].append(
np.full(x.shape, triangle_areas[index] / x.shape[0])
)
invar["x"] = np.concatenate(invar["x"], axis=0)
invar["y"] = np.concatenate(invar["y"], axis=0)
invar["z"] = np.concatenate(invar["z"], axis=0)
invar["normal_x"] = np.concatenate(invar["normal_x"], axis=0)
invar["normal_y"] = np.concatenate(invar["normal_y"], axis=0)
invar["normal_z"] = np.concatenate(invar["normal_z"], axis=0)
invar["area"] = np.concatenate(invar["area"], axis=0)
# sample from the param ranges
params = parameterization.sample(nr_points, quasirandom=quasirandom)
return invar, params
return sample
curves = [Curve(_sample(mesh), dims=3, parameterization=parameterization)]
# make sdf function
def _sdf(triangles, airtight):
def sdf(invar, params, compute_sdf_derivatives=False):
# gather points
points = np.stack([invar["x"], invar["y"], invar["z"]], axis=1)
# normalize triangles and points
minx, maxx, miny, maxy, minz, maxz = _find_mins_maxs(points)
max_dis = max(max((maxx - minx), (maxy - miny)), (maxz - minz))
store_triangles = np.array(triangles, dtype=np.float64)
store_triangles[:, :, 0] -= minx
store_triangles[:, :, 1] -= miny
store_triangles[:, :, 2] -= minz
store_triangles *= 1 / max_dis
store_triangles = store_triangles.flatten()
points[:, 0] -= minx
points[:, 1] -= miny
points[:, 2] -= minz
points *= 1 / max_dis
points = points.astype(np.float64).flatten()
# compute sdf values
outputs = {}
if airtight:
sdf_field, sdf_derivative = pysdf.signed_distance_field(
store_triangles, points, include_hit_points=True
)
sdf_field = -np.expand_dims(max_dis * sdf_field, axis=1)
else:
sdf_field = np.zeros_like(invar["x"])
outputs["sdf"] = sdf_field
# get sdf derivatives
if compute_sdf_derivatives:
sdf_derivative = -(sdf_derivative - points)
sdf_derivative = np.reshape(
sdf_derivative, (sdf_derivative.shape[0] // 3, 3)
)
sdf_derivative = sdf_derivative / np.linalg.norm(
sdf_derivative, axis=1, keepdims=True
)
outputs["sdf" + diff_str + "x"] = sdf_derivative[:, 0:1]
outputs["sdf" + diff_str + "y"] = sdf_derivative[:, 1:2]
outputs["sdf" + diff_str + "z"] = sdf_derivative[:, 2:3]
return outputs
return sdf
# compute bounds
bounds = Bounds(
{
Parameter("x"): (
float(np.min(mesh.vectors[:, :, 0])),
float(np.max(mesh.vectors[:, :, 0])),
),
Parameter("y"): (
float(np.min(mesh.vectors[:, :, 1])),
float(np.max(mesh.vectors[:, :, 1])),
),
Parameter("z"): (
float(np.min(mesh.vectors[:, :, 2])),
float(np.max(mesh.vectors[:, :, 2])),
),
},
parameterization=parameterization,
)
# initialize geometry
super(Tessellation, self).__init__(
curves,
_sdf(mesh.vectors, airtight),
dims=3,
bounds=bounds,
parameterization=parameterization,
)
@classmethod
def from_stl(
cls,
filename,
airtight=True,
parameterization=Parameterization(),
):
"""
makes mesh from STL file
Parameters
----------
filename : str
filename of mesh.
airtight : bool
If the geometry is airtight or not. If false sample everywhere for interior.
parameterization : Parameterization
Parameterization of geometry.
"""
# read in mesh
mesh = np_mesh.Mesh.from_file(filename)
return cls(mesh, airtight, parameterization)
# helper for sampling triangle
def _sample_triangle(
v0, v1, v2, nr_points
): # ref https://math.stackexchange.com/questions/18686/uniform-random-point-in-triangle
r1 = np.random.uniform(0, 1, size=(nr_points, 1))
r2 = np.random.uniform(0, 1, size=(nr_points, 1))
s1 = np.sqrt(r1)
x = v0[0] * (1.0 - s1) + v1[0] * (1.0 - r2) * s1 + v2[0] * r2 * s1
y = v0[1] * (1.0 - s1) + v1[1] * (1.0 - r2) * s1 + v2[1] * r2 * s1
z = v0[2] * (1.0 - s1) + v1[2] * (1.0 - r2) * s1 + v2[2] * r2 * s1
return x, y, z
# area of array of triangles
def _area_of_triangles(
v0, v1, v2
): # ref https://math.stackexchange.com/questions/128991/how-to-calculate-the-area-of-a-3d-triangle
a = np.sqrt(
(v0[:, 0] - v1[:, 0]) ** 2
+ (v0[:, 1] - v1[:, 1]) ** 2
+ (v0[:, 2] - v1[:, 2]) ** 2
+ 1e-10
)
b = np.sqrt(
(v1[:, 0] - v2[:, 0]) ** 2
+ (v1[:, 1] - v2[:, 1]) ** 2
+ (v1[:, 2] - v2[:, 2]) ** 2
+ 1e-10
)
c = np.sqrt(
(v0[:, 0] - v2[:, 0]) ** 2
+ (v0[:, 1] - v2[:, 1]) ** 2
+ (v0[:, 2] - v2[:, 2]) ** 2
+ 1e-10
)
s = (a + b + c) / 2
area = np.sqrt(s * (s - a) * (s - b) * (s - c) + 1e-10)
return area
# helper for min max
def _find_mins_maxs(points):
minx = float(np.min(points[:, 0]))
miny = float(np.min(points[:, 1]))
minz = float(np.min(points[:, 2]))
maxx = float(np.max(points[:, 0]))
maxy = float(np.max(points[:, 1]))
maxz = float(np.max(points[:, 2]))
return minx, maxx, miny, maxy, minz, maxz
| modulus-sym-main | modulus/sym/geometry/tessellation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines different Curve objects
"""
import types
import numpy as np
import sympy
import symengine
from chaospy.distributions.sampler.sequences.primes import create_primes
from chaospy.distributions.sampler.sequences.van_der_corput import (
create_van_der_corput_samples as create_samples,
)
from modulus.sym.utils.sympy import np_lambdify
from .parameterization import Parameterization, Parameter
from .helper import _sympy_func_to_func
class Curve:
"""A Curve object that keeps track of the surface/perimeter of a geometry.
The curve object also contains normals and area/length of curve.
"""
def __init__(self, sample, dims, parameterization=Parameterization()):
# store attributes
self._sample = sample
self._dims = dims
self.parameterization = parameterization
def sample(
self, nr_points, criteria=None, parameterization=None, quasirandom=False
):
# use internal parameterization if not given
if parameterization is None:
parameterization = self.parameterization
# continually sample points throwing out points that don't satisfy criteria
invar = {
key: np.empty((0, 1))
for key in self.dims + ["normal_" + x for x in self.dims] + ["area"]
}
params = {key: np.empty((0, 1)) for key in parameterization.parameters}
total_sampled = 0
total_tried = 0
nr_try = 0
while True:
# sample curve
local_invar, local_params = self._sample(
nr_points, parameterization, quasirandom
)
# compute given criteria and remove points
if criteria is not None:
computed_criteria = criteria(local_invar, local_params)
local_invar = {
key: value[computed_criteria[:, 0], :]
for key, value in local_invar.items()
}
local_params = {
key: value[computed_criteria[:, 0], :]
for key, value in local_params.items()
}
# store invar
for key in local_invar.keys():
invar[key] = np.concatenate([invar[key], local_invar[key]], axis=0)
# store params
for key in local_params.keys():
params[key] = np.concatenate([params[key], local_params[key]], axis=0)
# keep track of sampling
total_sampled = next(iter(invar.values())).shape[0]
total_tried += nr_points
nr_try += 1
# break when finished sampling
if total_sampled >= nr_points:
for key, value in invar.items():
invar[key] = value[:nr_points]
for key, value in params.items():
params[key] = value[:nr_points]
break
# check if couldn't sample
if nr_try > 1000 and total_sampled < 1:
raise Exception("Unable to sample curve")
return invar, params
@property
def dims(self):
"""
Returns
-------
dims : list of strings
output can be ['x'], ['x','y'], or ['x','y','z']
"""
return ["x", "y", "z"][: self._dims]
def approx_area(
self,
parameterization=Parameterization(),
criteria=None,
approx_nr=10000,
quasirandom=False,
):
"""
Parameters
----------
parameterization: dict with of Parameters and their ranges
If the curve is parameterized then you can provide ranges
for the parameters with this.
criteria : None, SymPy boolean exprs
Calculate area discarding regions that don't satisfy
this criteria.
approx_nr : int
Area might be difficult to compute if parameterized. In
this case the area is approximated by sampling `area`,
`approx_nr` number of times. This amounts to monte carlo
integration.
Returns
-------
area : float
area of curve
"""
s, p = self._sample(
nr_points=approx_nr,
parameterization=parameterization,
quasirandom=quasirandom,
)
computed_criteria = criteria(s, p)
total_area = np.sum(s["area"][computed_criteria[:, 0], :])
return total_area
def scale(self, x, parameterization=Parameterization()):
"""
scale curve
Parameters
----------
x : float, SymPy Symbol/Exprs
scale factor.
"""
def _sample(internal_sample, dims, x):
if isinstance(x, (float, int)):
pass
elif isinstance(x, sympy.Basic):
x = _sympy_func_to_func(x)
else:
raise TypeError("Scaling by type " + str(type(x)) + "is not supported")
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
# sample points
invar, params = internal_sample(
nr_points, parameterization, quasirandom
)
# compute scale if needed
if isinstance(x, (float, int)):
computed_scale = x
else:
computed_scale = s(params)
# scale invar
for d in dims:
invar[d] *= x
invar["area"] *= x ** (len(dims) - 1)
return invar, params
return sample
return Curve(
_sample(self._sample, self.dims, x),
len(self.dims),
self.parameterization.union(parameterization),
)
def translate(self, xyz, parameterization=Parameterization()):
"""
translate curve
Parameters
----------
xyz : tuple of floats, ints, SymPy Symbol/Exprs
translate curve by these values.
"""
def _sample(internal_sample, dims, xyz):
compiled_xyz = []
for i, x in enumerate(xyz):
if isinstance(x, (float, int)):
compiled_xyz.append(x)
elif isinstance(x, sympy.Basic):
compiled_xyz.append(_sympy_func_to_func(x))
else:
raise TypeError(
"Translate by type " + str(type(x)) + "is not supported"
)
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
# sample points
invar, params = internal_sample(
nr_points, parameterization, quasirandom
)
# compute translation if needed
computed_translation = []
for x in compiled_xyz:
if isinstance(x, (float, int)):
computed_translation.append(x)
else:
computed_translation.append(x(params))
# translate invar
for d, x in zip(dims, computed_translation):
invar[d] += x
return invar, params
return sample
return Curve(
_sample(self._sample, self.dims, xyz),
len(self.dims),
self.parameterization.union(parameterization),
)
def rotate(self, angle, axis, parameterization=Parameterization()):
"""
rotate curve
Parameters
----------
x : float, SymPy Symbol/Exprs
scale factor.
"""
def _sample(internal_sample, dims, angle, axis):
if isinstance(angle, (float, int)):
pass
elif isinstance(angle, sympy.Basic):
angle = _sympy_func_to_func(angle)
else:
raise TypeError(
"Scaling by type " + str(type(angle)) + "is not supported"
)
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
# sample points
invar, params = internal_sample(
nr_points, parameterization, quasirandom
)
# compute translation if needed
if isinstance(angle, (float, int)):
computed_angle = angle
else:
computed_angle = angle(params)
# angle invar
rotated_invar = {**invar}
rotated_dims = [key for key in self.dims if key != axis]
rotated_invar[rotated_dims[0]] = (
np.cos(computed_angle) * invar[rotated_dims[0]]
- np.sin(computed_angle) * invar[rotated_dims[1]]
)
rotated_invar["normal_" + rotated_dims[0]] = (
np.cos(computed_angle) * invar["normal_" + rotated_dims[0]]
- np.sin(computed_angle) * invar["normal_" + rotated_dims[1]]
)
rotated_invar[rotated_dims[1]] = (
np.sin(computed_angle) * invar[rotated_dims[0]]
+ np.cos(computed_angle) * invar[rotated_dims[1]]
)
rotated_invar["normal_" + rotated_dims[1]] = (
np.sin(computed_angle) * invar["normal_" + rotated_dims[0]]
+ np.cos(computed_angle) * invar["normal_" + rotated_dims[1]]
)
return rotated_invar, params
return sample
return Curve(
_sample(self._sample, self.dims, angle, axis),
len(self.dims),
self.parameterization.union(parameterization),
)
def invert_normal(self):
def _sample(internal_sample, dims):
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
s, p = internal_sample(nr_points, parameterization, quasirandom)
for d in dims:
s["normal_" + d] = -s["normal_" + d]
return s, p
return sample
return Curve(
_sample(self._sample, self.dims), len(self.dims), self.parameterization
)
class SympyCurve(Curve):
"""Curve defined by sympy functions
Parameters
----------
functions : dictionary of SymPy Exprs
Parameterized curve in 1, 2 or 3 dimensions. For example, a
circle might have::
functions = {'x': cos(theta),
\t'y': sin(theta),
\t'normal_x': cos(theta),
\t'normal_y': sin(theta)}
TODO: refactor to remove normals.
ranges : dictionary of Sympy Symbols and ranges
This gives the ranges for the parameters in the parameterized
curve. For example, a circle might have `ranges = {theta: (0, 2*pi)}`.
area : float, int, SymPy Exprs
The surface area/perimeter of the curve.
criteria : SymPy Boolean Function
If this boolean expression is false then we do not
sample their on curve. This can be used to enforce
uniform sample probability.
"""
def __init__(self, functions, parameterization, area, criteria=None):
# lambdify functions
lambdify_functions = {}
for key, func in functions.items():
try:
func = float(func)
except:
pass
if isinstance(func, float):
lambdify_functions[key] = float(func)
elif isinstance(func, (sympy.Basic, symengine.Basic, Parameter)):
lambdify_functions[key] = _sympy_func_to_func(func)
else:
raise TypeError("function type not supported: " + str(type(func)))
# lambdify area function
try:
area = float(area)
except:
pass
if isinstance(area, float):
area_fn = float(area)
elif isinstance(area, (sympy.Basic, symengine.Basic, Parameter)):
area_fn = _sympy_func_to_func(area)
else:
raise TypeError("area type not supported: " + str(type(area)))
lambdify_functions["area"] = area_fn
# lambdify criteria function
if criteria is not None:
criteria = _sympy_func_to_func(criteria)
# create closure for sample function
def _sample(lambdify_functions, criteria, internal_parameterization):
def sample(
nr_points, parameterization=Parameterization(), quasirandom=False
):
# use internal parameterization if not given
i_parameterization = internal_parameterization.copy()
for key, value in parameterization.param_ranges.items():
i_parameterization.param_ranges[key] = value
# continually sample points throwing out points that don't satisfy criteria
invar = {
str(key): np.empty((0, 1)) for key in lambdify_functions.keys()
}
params = {
str(key): np.empty((0, 1))
for key in parameterization.param_ranges.keys()
}
total_sampled = 0
total_tried = 0
nr_try = 0
while True:
# sample parameter ranges
local_params = i_parameterization.sample(nr_points, quasirandom)
# compute curve points from functions
local_invar = {}
for key, func in lambdify_functions.items():
if isinstance(func, (float, int)):
local_invar[key] = np.full_like(
next(iter(local_params.values())), func
)
else:
local_invar[key] = func(local_params)
local_invar["area"] /= next(iter(local_params.values())).shape[0]
# remove points that don't satisfy curve criteria if needed
if criteria is not None:
# compute curve criteria
computed_criteria = criteria(local_params).astype(bool)
# remove elements points based on curve criteria
local_invar = {
key: value[computed_criteria[:, 0], :]
for key, value in local_invar.items()
}
local_params = {
key: value[computed_criteria[:, 0], :]
for key, value in local_params.items()
}
# only store external parameters
for key in list(local_params.keys()):
if key not in parameterization.parameters:
local_params.pop(key)
# store invar
for key in local_invar.keys():
invar[key] = np.concatenate(
[invar[key], local_invar[key]], axis=0
)
# store params
for key in local_params.keys():
params[key] = np.concatenate(
[params[key], local_params[key]], axis=0
)
# keep track of sampling
total_sampled = next(iter(invar.values())).shape[0]
total_tried += next(iter(local_invar.values())).shape[0]
nr_try += 1
# break when finished sampling
if total_sampled >= nr_points:
for key, value in invar.items():
invar[key] = value[:nr_points]
for key, value in params.items():
params[key] = value[:nr_points]
break
# check if couldn't sample
if nr_try > 10000 and total_sampled < 1:
raise Exception("Unable to sample curve")
return invar, params
return sample
# initialize curve
Curve.__init__(
self,
_sample(lambdify_functions, criteria, parameterization),
len(functions) // 2,
parameterization=parameterization,
)
| modulus-sym-main | modulus/sym/geometry/curve.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines base class for all geometries
"""
import copy
import numpy as np
import itertools
import sympy
from typing import Callable, Union, List
from modulus.sym.utils.sympy import np_lambdify
from modulus.sym.constants import diff_str
from .parameterization import Parameterization, Bounds
from .helper import (
_concat_numpy_dict_list,
_sympy_sdf_to_sdf,
_sympy_criteria_to_criteria,
_sympy_func_to_func,
)
def csg_curve_naming(index):
return "PRIMITIVE_PARAM_" + str(index).zfill(5)
class Geometry:
"""
Base class for all geometries
"""
def __init__(
self,
curves,
sdf,
dims,
bounds,
parameterization=Parameterization(),
interior_epsilon=1e-6,
):
# store attributes
self.curves = curves
self.sdf = sdf
self._dims = dims
self.bounds = bounds
self.parameterization = parameterization
self.interior_epsilon = interior_epsilon # to check if in domain or outside
@property
def dims(self):
"""
Returns
-------
dims : List[srt]
output can be ['x'], ['x','y'], or ['x','y','z']
"""
return ["x", "y", "z"][: self._dims]
def scale(
self,
x: Union[float, sympy.Basic],
parameterization: Parameterization = Parameterization(),
):
"""
Scales geometry.
Parameters
----------
x : Union[float, sympy.Basic]
Scale factor. Can be a sympy expression if parameterizing.
parameterization : Parameterization
Parameterization if scale factor is parameterized.
"""
# create scaled sdf function
def _scale_sdf(sdf, dims, x):
if isinstance(x, (float, int)):
pass
elif isinstance(x, sympy.Basic):
x = _sympy_func_to_func(x)
else:
raise TypeError("Scaling by type " + str(type(x)) + "is not supported")
def scale_sdf(invar, params, compute_sdf_derivatives=False):
# compute scale if needed
if isinstance(x, (float, int)):
computed_scale = x
else:
computed_scale = x(params)
# scale input to sdf function
scaled_invar = {**invar}
for key in dims:
scaled_invar[key] = scaled_invar[key] / computed_scale
# compute sdf
computed_sdf = sdf(scaled_invar, params, compute_sdf_derivatives)
# scale output sdf values
if isinstance(x, (float, int)):
computed_sdf["sdf"] *= x
else:
computed_sdf["sdf"] *= x(params)
return computed_sdf
return scale_sdf
new_sdf = _scale_sdf(self.sdf, self.dims, x)
# add parameterization
new_parameterization = self.parameterization.union(parameterization)
# scale bounds
new_bounds = self.bounds.scale(x, parameterization)
# scale curves
new_curves = [c.scale(x, parameterization) for c in self.curves]
# return scaled geometry
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def translate(
self,
xyz: List[Union[float, sympy.Basic]],
parameterization: Parameterization = Parameterization(),
):
"""
Translates geometry.
Parameters
----------
xyz : List[Union[float, sympy.Basic]]
Translation. Can be a sympy expression if parameterizing.
parameterization : Parameterization
Parameterization if translation is parameterized.
"""
# create translated sdf function
def _translate_sdf(sdf, dims, xyx):
compiled_xyz = []
for i, x in enumerate(xyz):
if isinstance(x, (float, int)):
compiled_xyz.append(x)
elif isinstance(x, sympy.Basic):
compiled_xyz.append(_sympy_func_to_func(x))
else:
raise TypeError(
"Translate by type " + str(type(x)) + "is not supported"
)
def translate_sdf(invar, params, compute_sdf_derivatives=False):
# compute translation if needed
computed_translation = []
for x in compiled_xyz:
if isinstance(x, (float, int)):
computed_translation.append(x)
else:
computed_translation.append(x(params))
# translate input to sdf function
translated_invar = {**invar}
for i, key in enumerate(dims):
translated_invar[key] = (
translated_invar[key] - computed_translation[i]
)
# compute sdf
computed_sdf = sdf(translated_invar, params, compute_sdf_derivatives)
return computed_sdf
return translate_sdf
new_sdf = _translate_sdf(self.sdf, self.dims, xyz)
# add parameterization
new_parameterization = self.parameterization.union(parameterization)
# translate bounds
new_bounds = self.bounds.translate(xyz, parameterization)
# translate curves
new_curves = [c.translate(xyz, parameterization) for c in self.curves]
# return translated geometry
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def rotate(
self,
angle: Union[float, sympy.Basic],
axis: str = "z",
center: Union[None, List[float]] = None,
parameterization=Parameterization(),
):
"""
Rotates geometry.
Parameters
----------
angle : Union[float, sympy.Basic]
Angle of rotate in radians. Can be a sympy expression if parameterizing.
axis : str
Axis of rotation. Default is `"z"`.
center : Union[None, List[Union[float, sympy.Basic]]] = None
If given then center the rotation around this point.
parameterization : Parameterization
Parameterization if translation is parameterized.
"""
# create rotated sdf function
def _rotate_sdf(sdf, dims, angle, axis, center):
if isinstance(angle, (float, int)):
pass
elif isinstance(angle, sympy.Basic):
angle = _sympy_func_to_func(angle)
else:
raise TypeError(
"Scaling by type " + str(type(angle)) + "is not supported"
)
def rotate_sdf(invar, params, compute_sdf_derivatives=False):
# compute translation if needed
if isinstance(angle, (float, int)):
computed_angle = angle
else:
computed_angle = angle(params)
# rotate input to sdf function
rotated_invar = {**invar}
if center is not None:
for i, key in enumerate(dims):
rotated_invar[key] = rotated_invar[key] - center[i]
_rotated_invar = {**rotated_invar}
rotated_dims = [key for key in dims if key != axis]
_rotated_invar[rotated_dims[0]] = (
np.cos(computed_angle) * rotated_invar[rotated_dims[0]]
+ np.sin(computed_angle) * rotated_invar[rotated_dims[1]]
)
_rotated_invar[rotated_dims[1]] = (
-np.sin(computed_angle) * rotated_invar[rotated_dims[0]]
+ np.cos(computed_angle) * rotated_invar[rotated_dims[1]]
)
if center is not None:
for i, key in enumerate(dims):
_rotated_invar[key] = _rotated_invar[key] + center[i]
# compute sdf
computed_sdf = sdf(_rotated_invar, params, compute_sdf_derivatives)
return computed_sdf
return rotate_sdf
new_sdf = _rotate_sdf(self.sdf, self.dims, angle, axis, center)
# add parameterization
new_parameterization = self.parameterization.union(parameterization)
# rotate bounds
if center is not None:
new_bounds = self.bounds.translate([-x for x in center])
new_bounds = new_bounds.rotate(angle, axis, parameterization)
new_bounds = new_bounds.translate(center)
else:
new_bounds = self.bounds.rotate(angle, axis, parameterization)
# rotate curves
new_curves = []
for c in self.curves:
if center is not None:
new_c = c.translate([-x for x in center])
new_c = new_c.rotate(angle, axis, parameterization)
new_c = new_c.translate(center)
else:
new_c = c.rotate(angle, axis, parameterization)
new_curves.append(new_c)
# return rotated geometry
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def repeat(
self,
spacing: float,
repeat_lower: List[int],
repeat_higher: List[int],
center: Union[None, List[float]] = None,
):
"""
Finite Repetition of geometry.
Parameters
----------
spacing : float
Spacing between each repetition.
repeat_lower : List[int]
How many repetitions going in negative direction.
repeat_upper : List[int]
How many repetitions going in positive direction.
center : Union[None, List[Union[float, sympy.Basic]]] = None
If given then center the rotation around this point.
"""
# create repeated sdf function
def _repeat_sdf(
sdf, dims, spacing, repeat_lower, repeat_higher, center
): # TODO make spacing, repeat_lower, and repeat_higher parameterizable
def repeat_sdf(invar, params, compute_sdf_derivatives=False):
# clamp position values
clamped_invar = {**invar}
if center is not None:
for i, key in enumerate(dims):
clamped_invar[key] = clamped_invar[key] - center[i]
for d, rl, rh in zip(dims, repeat_lower, repeat_higher):
clamped_invar[d] = clamped_invar[d] - spacing * np.minimum(
np.maximum(np.around(clamped_invar[d] / spacing), rl), rh
)
if center is not None:
for i, key in enumerate(dims):
clamped_invar[key] = clamped_invar[key] + center[i]
# compute sdf
computed_sdf = sdf(clamped_invar, params, compute_sdf_derivatives)
return computed_sdf
return repeat_sdf
new_sdf = _repeat_sdf(
self.sdf, self.dims, spacing, repeat_lower, repeat_higher, center
)
# repeat bounds and curves
new_bounds = self.bounds.copy()
new_curves = []
for t in itertools.product(
*[list(range(rl, rh + 1)) for rl, rh in zip(repeat_lower, repeat_higher)]
):
new_bounds = new_bounds.union(
self.bounds.translate([spacing * a for a in t])
)
new_curves += [c.translate([spacing * a for a in t]) for c in self.curves]
# return repeated geometry
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
self.parameterization.copy(),
interior_epsilon=self.interior_epsilon,
)
def copy(self):
return copy.deepcopy(self)
def boundary_criteria(self, invar, criteria=None, params={}):
# check if moving in or out of normal direction changes SDF
invar_normal_plus = {**invar}
invar_normal_minus = {**invar}
for key in self.dims:
invar_normal_plus[key] = (
invar_normal_plus[key]
+ self.interior_epsilon * invar_normal_plus["normal_" + key]
)
invar_normal_minus[key] = (
invar_normal_minus[key]
- self.interior_epsilon * invar_normal_minus["normal_" + key]
)
sdf_normal_plus = self.sdf(
invar_normal_plus, params, compute_sdf_derivatives=False
)["sdf"]
sdf_normal_minus = self.sdf(
invar_normal_minus, params, compute_sdf_derivatives=False
)["sdf"]
on_boundary = np.greater_equal(0, sdf_normal_plus * sdf_normal_minus)
# check if points satisfy the criteria function
if criteria is not None:
# convert sympy criteria if needed
satify_criteria = criteria(invar, params)
# update on_boundary
on_boundary = np.logical_and(on_boundary, satify_criteria)
return on_boundary
def sample_boundary(
self,
nr_points: int,
criteria: Union[sympy.Basic, None] = None,
parameterization: Union[Parameterization, None] = None,
quasirandom: bool = False,
):
"""
Samples the surface or perimeter of the geometry.
Parameters
----------
nr_points : int
number of points to sample on boundary.
criteria : Union[sympy.Basic, None]
Only sample points that satisfy this criteria.
parameterization : Union[Parameterization, None], optional
If the geometry is parameterized then you can provide ranges
for the parameters with this. By default the sampling will be
done with the internal parameterization.
quasirandom : bool
If true then sample the points using the Halton sequences.
Default is False.
Returns
-------
points : Dict[str, np.ndarray]
Dictionary contain a point cloud sampled uniformly.
For example in 2D it would be
```
points = {'x': np.ndarray (N, 1),
'y': np.ndarray (N, 1),
'normal_x': np.ndarray (N, 1),
'normal_y': np.ndarray (N, 1),
'area': np.ndarray (N, 1)}
```
The `area` value can be used for Monte Carlo integration
like the following,
`total_area = np.sum(points['area'])`
"""
# compile criteria from sympy if needed
if criteria is not None:
if isinstance(criteria, sympy.Basic):
criteria = _sympy_criteria_to_criteria(criteria)
elif isinstance(criteria, Callable):
pass
else:
raise TypeError(
"criteria type is not supported: " + str(type(criteria))
)
# use internal parameterization if not given
if parameterization is None:
parameterization = self.parameterization
elif isinstance(parameterization, dict):
parameterization = Parameterization(parameterization)
# create boundary criteria closure
def _boundary_criteria(criteria):
def boundary_criteria(invar, params):
return self.boundary_criteria(invar, criteria=criteria, params=params)
return boundary_criteria
closed_boundary_criteria = _boundary_criteria(criteria)
# compute required points on each curve
curve_areas = np.array(
[
curve.approx_area(parameterization, criteria=closed_boundary_criteria)
for curve in self.curves
]
)
assert np.sum(curve_areas) > 0, "Geometry has no surface"
curve_probabilities = curve_areas / np.linalg.norm(curve_areas, ord=1)
curve_index = np.arange(len(self.curves))
points_per_curve = np.random.choice(
curve_index, nr_points, p=curve_probabilities
)
points_per_curve, _ = np.histogram(
points_per_curve, np.arange(len(self.curves) + 1) - 0.5
)
# continually sample each curve until reached desired number of points
list_invar = []
list_params = []
for n, a, curve in zip(points_per_curve, curve_areas, self.curves):
if n > 0:
i, p = curve.sample(
n,
criteria=closed_boundary_criteria,
parameterization=parameterization,
)
i["area"] = np.full_like(i["area"], a / n)
list_invar.append(i)
list_params.append(p)
invar = _concat_numpy_dict_list(list_invar)
params = _concat_numpy_dict_list(list_params)
invar.update(params)
return invar
def sample_interior(
self,
nr_points: int,
bounds: Union[Bounds, None] = None,
criteria: Union[sympy.Basic, None] = None,
parameterization: Union[Parameterization, None] = None,
compute_sdf_derivatives: bool = False,
quasirandom: bool = False,
):
"""
Samples the interior of the geometry.
Parameters
----------
nr_points : int
number of points to sample.
bounds : Union[Bounds, None]
Bounds to sample points from. For example,
`bounds = Bounds({Parameter('x'): (0, 1), Parameter('y'): (0, 1)})`.
By default the internal bounds will be used.
criteria : Union[sympy.Basic, None]
Only sample points that satisfy this criteria.
parameterization: Union[Parameterization, None]
If the geometry is parameterized then you can provide ranges
for the parameters with this.
compute_sdf_derivatives : bool
Compute sdf derivatives if true.
quasirandom : bool
If true then sample the points using the Halton sequences.
Default is False.
Returns
-------
points : Dict[str, np.ndarray]
Dictionary contain a point cloud sampled uniformly.
For example in 2D it would be
```
points = {'x': np.ndarray (N, 1),
'y': np.ndarray (N, 1),
'sdf': np.ndarray (N, 1),
'area': np.ndarray (N, 1)}
```
The `area` value can be used for Monte Carlo integration
like the following,
`total_area = np.sum(points['area'])`
"""
# compile criteria from sympy if needed
if criteria is not None:
if isinstance(criteria, sympy.Basic):
criteria = _sympy_criteria_to_criteria(criteria)
elif isinstance(criteria, Callable):
pass
else:
raise TypeError(
"criteria type is not supported: " + str(type(criteria))
)
# use internal bounds if not given
if bounds is None:
bounds = self.bounds
elif isinstance(bounds, dict):
bounds = Bounds(bounds)
# use internal parameterization if not given
if parameterization is None:
parameterization = self.parameterization
elif isinstance(parameterization, dict):
parameterization = Parameterization(parameterization)
# continually sample until reached desired number of points
invar = {}
params = {}
total_tried = 0
nr_try = 0
while True:
# sample invar and params
local_invar = bounds.sample(nr_points, parameterization, quasirandom)
local_params = parameterization.sample(nr_points, quasirandom)
# evaluate SDF function on points
local_invar.update(
self.sdf(
local_invar,
local_params,
compute_sdf_derivatives=compute_sdf_derivatives,
)
)
# remove points outside of domain
criteria_index = np.greater(local_invar["sdf"], 0)
if criteria is not None:
criteria_index = np.logical_and(
criteria_index, criteria(local_invar, local_params)
)
for key in local_invar.keys():
local_invar[key] = local_invar[key][criteria_index[:, 0], :]
for key in local_params.keys():
local_params[key] = local_params[key][criteria_index[:, 0], :]
# add sampled points to list
for key in local_invar.keys():
if key not in invar.keys(): # TODO this can be condensed
invar[key] = local_invar[key]
else:
invar[key] = np.concatenate([invar[key], local_invar[key]], axis=0)
for key in local_params.keys():
if key not in params.keys(): # TODO this can be condensed
params[key] = local_params[key]
else:
params[key] = np.concatenate(
[params[key], local_params[key]], axis=0
)
# check if finished
total_sampled = next(iter(invar.values())).shape[0]
total_tried += nr_points
nr_try += 1
if total_sampled >= nr_points:
for key, value in invar.items():
invar[key] = value[:nr_points]
for key, value in params.items():
params[key] = value[:nr_points]
break
# report error if could not sample
if nr_try > 100 and total_sampled < 1:
raise RuntimeError(
"Could not sample interior of geometry. Check to make sure non-zero volume"
)
# compute area value for monte carlo integration
volume = (total_sampled / total_tried) * bounds.volume(parameterization)
invar["area"] = np.full_like(next(iter(invar.values())), volume / nr_points)
# add params to invar
invar.update(params)
return invar
@staticmethod
def _convert_criteria(criteria):
return criteria
def __add__(self, other):
def _add_sdf(sdf_1, sdf_2, dims):
def add_sdf(invar, params, compute_sdf_derivatives=False):
computed_sdf_1 = sdf_1(invar, params, compute_sdf_derivatives)
computed_sdf_2 = sdf_2(invar, params, compute_sdf_derivatives)
computed_sdf = {}
computed_sdf["sdf"] = np.maximum(
computed_sdf_1["sdf"], computed_sdf_2["sdf"]
)
if compute_sdf_derivatives:
for d in dims:
computed_sdf["sdf" + diff_str + d] = np.where(
computed_sdf_1["sdf"] > computed_sdf_2["sdf"],
computed_sdf_1["sdf" + diff_str + d],
computed_sdf_2["sdf" + diff_str + d],
)
return computed_sdf
return add_sdf
new_sdf = _add_sdf(self.sdf, other.sdf, self.dims)
new_parameterization = self.parameterization.union(other.parameterization)
new_bounds = self.bounds.union(other.bounds)
return Geometry(
self.curves + other.curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def __sub__(self, other):
def _sub_sdf(sdf_1, sdf_2, dims):
def sub_sdf(invar, params, compute_sdf_derivatives=False):
computed_sdf_1 = sdf_1(invar, params, compute_sdf_derivatives)
computed_sdf_2 = sdf_2(invar, params, compute_sdf_derivatives)
computed_sdf = {}
computed_sdf["sdf"] = np.minimum(
computed_sdf_1["sdf"], -computed_sdf_2["sdf"]
)
if compute_sdf_derivatives:
for d in dims:
computed_sdf["sdf" + diff_str + d] = np.where(
computed_sdf_1["sdf"] < -computed_sdf_2["sdf"],
computed_sdf_1["sdf" + diff_str + d],
-computed_sdf_2["sdf" + diff_str + d],
)
return computed_sdf
return sub_sdf
new_sdf = _sub_sdf(self.sdf, other.sdf, self.dims)
new_parameterization = self.parameterization.union(other.parameterization)
new_bounds = self.bounds.union(other.bounds)
new_curves = self.curves + [c.invert_normal() for c in other.curves]
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def __invert__(self):
def _invert_sdf(sdf, dims):
def invert_sdf(invar, params, compute_sdf_derivatives=False):
computed_sdf = sdf(invar, params, compute_sdf_derivatives)
computed_sdf["sdf"] = -computed_sdf["sdf"]
if compute_sdf_derivatives:
for d in dims:
computed_sdf["sdf" + diff_str + d] = -computed_sdf[
"sdf" + diff_str + d
]
return computed_sdf
return invert_sdf
new_sdf = _invert_sdf(self.sdf, self.dims)
new_parameterization = self.parameterization.copy()
new_bounds = self.bounds.copy()
new_curves = [c.invert_normal() for c in self.curves]
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
def __and__(self, other):
def _and_sdf(sdf_1, sdf_2, dims):
def and_sdf(invar, params, compute_sdf_derivatives=False):
computed_sdf_1 = sdf_1(invar, params, compute_sdf_derivatives)
computed_sdf_2 = sdf_2(invar, params, compute_sdf_derivatives)
computed_sdf = {}
computed_sdf["sdf"] = np.minimum(
computed_sdf_1["sdf"], computed_sdf_2["sdf"]
)
if compute_sdf_derivatives:
for d in dims:
computed_sdf["sdf" + diff_str + d] = np.where(
computed_sdf_1["sdf"] < computed_sdf_2["sdf"],
computed_sdf_1["sdf" + diff_str + d],
computed_sdf_2["sdf" + diff_str + d],
)
return computed_sdf
return and_sdf
new_sdf = _and_sdf(self.sdf, other.sdf, self.dims)
new_parameterization = self.parameterization.union(other.parameterization)
new_bounds = self.bounds.union(other.bounds)
new_curves = self.curves + other.curves
return Geometry(
new_curves,
new_sdf,
len(self.dims),
new_bounds,
new_parameterization,
interior_epsilon=self.interior_epsilon,
)
| modulus-sym-main | modulus/sym/geometry/geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import sympy
import itertools
from modulus.sym.utils.sympy import np_lambdify
from modulus.sym.constants import diff_str
def _concat_numpy_dict_list(numpy_dict_list):
concat_variable = {}
for key in numpy_dict_list[0].keys():
concat_variable[key] = np.concatenate([x[key] for x in numpy_dict_list], axis=0)
return concat_variable
def _sympy_sdf_to_sdf(sdf, dx=0.0001):
sdf_inputs = list(set([str(x) for x in sdf.free_symbols]))
fn_sdf = np_lambdify(sdf, sdf_inputs)
def _sdf(fn_sdf, sdf_inputs, dx):
def sdf(invar, params, compute_sdf_derivatives=False):
# get inputs to sdf sympy expression
inputs = {}
for key, value in itertools.chain(invar.items(), params.items()):
if key in sdf_inputs:
inputs[key] = value
# compute sdf
computed_sdf = fn_sdf(**inputs)
outputs = {"sdf": computed_sdf}
# compute sdf derivatives if needed
if compute_sdf_derivatives:
for d in [x for x in invar.keys() if x in ["x", "y", "z"]]:
# If primative is function of this direction
if d in sdf_inputs:
# compute sdf plus dx/2
inputs_plus = {**inputs}
inputs_plus[d] = inputs_plus[d] + (dx / 2)
computed_sdf_plus = fn_sdf(**inputs_plus)
# compute sdf minus dx/2
inputs_minus = {**inputs}
inputs_minus[d] = inputs_minus[d] - (dx / 2)
computed_sdf_minus = fn_sdf(**inputs_minus)
# store sdf derivative
outputs["sdf" + diff_str + d] = (
computed_sdf_plus - computed_sdf_minus
) / dx
else:
# Fill deriv with zeros for compatibility
outputs["sdf" + diff_str + d] = np.zeros_like(computed_sdf)
return outputs
return sdf
return _sdf(fn_sdf, sdf_inputs, dx)
def _sympy_criteria_to_criteria(criteria):
criteria_inputs = list(set([str(x) for x in criteria.free_symbols]))
fn_criteria = np_lambdify(criteria, criteria_inputs)
def _criteria(fn_criteria, criteria_inputs):
def criteria(invar, params):
# get inputs to criteria sympy expression
inputs = {}
for key, value in itertools.chain(invar.items(), params.items()):
if key in criteria_inputs:
inputs[key] = value
# compute criteria
return fn_criteria(**inputs)
return criteria
return _criteria(fn_criteria, criteria_inputs)
def _sympy_func_to_func(func):
func_inputs = list(
set([str(x) for x in func.free_symbols])
) # TODO set conversion is hacky fix
fn_func = np_lambdify(func, func_inputs)
def _func(fn_func, func_inputs):
def func(params):
# get inputs to sympy expression
inputs = {}
for key, value in params.items():
if key in func_inputs:
inputs[key] = value
# compute func
return fn_func(**inputs)
return func
return _func(fn_func, func_inputs)
| modulus-sym-main | modulus/sym/geometry/helper.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from typing import Dict, List, Union, Tuple, Callable, Optional
import sympy
from typing import Callable
from chaospy.distributions.sampler.sequences.primes import create_primes
from chaospy.distributions.sampler.sequences.van_der_corput import (
create_van_der_corput_samples as create_samples,
)
from modulus.sym.utils.sympy import np_lambdify
class Parameter(sympy.Symbol):
"""A Symbolic object used to parameterize geometries.
Currently this only overloads the Sympy Symbol class however
capabilities may be expanded in the future.
Parameters
----------
name : str
Name given to parameter.
"""
def __new__(cls, name: str):
obj = sympy.Symbol.__new__(cls, name)
return obj
class Parameterization:
"""A object used to store parameterization information
about geometries.
Parameters
----------
param_ranges : Dict[Parameter, Union[float, Tuple[float, float], np.ndarray (N, 1)]
Dictionary of Parameters and their ranges. The ranges can be one of the following
types,
:obj: Float will sample the parameter equal to this value.
:obj: Tuple of two float as the bounding range to sample parameter from.
:obj: `np.ndarray` as a discrete list of possible values for the parameter.
"""
def __init__(
self,
param_ranges: Dict[
Parameter, Union[float, Tuple[float, float], np.ndarray]
] = {},
):
# store param ranges
self.param_ranges = param_ranges
@property
def parameters(self):
return [str(x) for x in self.param_ranges.keys()]
def sample(self, nr_points: int, quasirandom: bool = False):
"""Sample parameterization values.
Parameters
----------
nr_points : int
Number of points sampled from parameterization.
quasirandom : bool
If true then sample the points using Halton sequences.
Default is False.
"""
return {
str(key): value
for key, value in _sample_ranges(
nr_points, self.param_ranges, quasirandom
).items()
}
def union(self, other):
new_param_ranges = self.param_ranges.copy()
for key, value in other.param_ranges.items():
new_param_ranges[key] = value
return Parameterization(new_param_ranges)
@classmethod
def combine(cls, p1, p2):
assert len(set(p1.parameters).intersection(set(p2.parameters))) == 0, (
"Combining parameterizations when they have overlapping parameters: p1 "
+ str(p1)
+ ", p2 "
+ str(p2)
)
new_param_ranges = p1.param_ranges.copy()
new_param_ranges.update(p2.param_ranges.copy())
return cls(new_param_ranges)
def copy(self):
return Parameterization(self.param_ranges.copy())
def __str__(self):
return str(self.param_ranges)
class OrderedParameterization(Parameterization):
"""A object used to store ordered parameterization information
about user-specified keys.
Parameters
----------
param_ranges : Dict[Parameter, Union[float, Tuple[float, float], np.ndarray (N, 1)]
Dictionary of Parameters and their ranges. The ranges can be one of the following
types,
:obj: Float will sample the parameter equal to this value.
:obj: Tuple of two float as the bounding range to sample parameter from.
:obj: `np.ndarray` as a discrete list of possible values for the parameter.
"""
def __init__(self, param_ranges, key):
super().__init__(param_ranges)
self.key = key
def sample(
self, nr_points: int, quasirandom: bool = False, sort: Optional = "ascending"
):
"""Sample ordered parameterization values.
Parameters
----------
nr_points : int
Number of points sampled from parameterization.
quasirandom : bool
If true then sample the points using Halton sequences.
Default is False.
sort : None or {'ascending','descending'}
If 'ascending' then sample the sorted points in ascending order.
If 'descending' then sample the sorted points in descending order.
Default is 'ascending'.
"""
sample_dict = {}
for key, value in _sample_ranges(
nr_points, self.param_ranges, quasirandom
).items():
# sort the samples for the given key
if key == self.key:
if sort == "ascending":
value = np.sort(value, axis=0)
elif sort == "descending":
value = np.sort(value, axis=0)[::-1]
else:
raise ValueError(
"Sort must be one of None, 'ascending', or 'descending' (got {})".format(
str(sort)
)
)
sample_dict[str(key)] = value
return sample_dict
class Bounds:
"""A object used to store bounds for geometries.
Parameters
----------
bound_ranges : Dict[Parameter, Tuple[Union[float, sympy.Basic], Union[float, sympy.Basic]]
Dictionary of Parameters with names `"x"`, `"y"`, or `"z"`. The value given for each of these is
a tuple of the lower and upper bound. Sympy expressions can be used to define these upper and lower
bounds.
parameterization : Parameterization
A Parameterization object used when the upper and lower bounds are parameterized.
"""
def __init__(
self,
bound_ranges: Dict[
Parameter, Tuple[Union[float, sympy.Basic], Union[float, sympy.Basic]]
],
parameterization: Parameterization = Parameterization(),
):
# store internal parameterization
self.parameterization = parameterization
# store bounds
self.bound_ranges = bound_ranges
@property
def dims(self):
"""
Returns
-------
dims : list of strings
output can be ['x'], ['x','y'], or ['x','y','z']
"""
return [str(x) for x in self.bound_ranges.keys()]
def sample(
self,
nr_points: int,
parameterization: Union[None, Parameterization] = None,
quasirandom: bool = False,
):
"""Sample points in Bounds.
Parameters
----------
nr_points : int
Number of points sampled from parameterization.
parameterization : Parameterization
Given if sampling bounds with different parameterization then the internal one stored in Bounds. Default is to not use this.
quasirandom : bool
If true then sample the points using Halton sequences.
Default is False.
"""
if parameterization is not None:
parameterization = self.parameterization
computed_bound_ranges = self._compute_bounds(parameterization)
return {
str(key): value
for key, value in _sample_ranges(
nr_points, computed_bound_ranges, quasirandom
).items()
}
def volume(self, parameterization: Union[None, Parameterization] = None):
"""Compute volume of bounds.
Parameters
----------
parameterization : Parameterization
Given if sampling bounds with different parameterization then the internal one stored in Bounds. Default is to not use this.
"""
# compute bounds from parameterization
computed_bound_ranges = self._compute_bounds(parameterization)
return np.prod(
[value[1] - value[0] for value in computed_bound_ranges.values()]
)
def union(self, other):
new_parameterization = self.parameterization.union(other.parameterization)
new_bound_ranges = {}
for (key, (lower_1, upper_1)), (lower_2, upper_2) in zip(
self.bound_ranges.items(), other.bound_ranges.values()
):
# compute new lower bound
if isinstance(lower_1, sympy.Basic) or isinstance(lower_2, sympy.Basic):
new_lower = sympy.Min(lower_1, lower_2)
elif isinstance(lower_1, (float, int)):
new_lower = min(lower_1, lower_2)
# compute new upper bound
if isinstance(upper_1, sympy.Basic) or isinstance(upper_2, sympy.Basic):
new_upper = sympy.Max(upper_1, upper_2)
elif isinstance(upper_1, (float, int)):
new_upper = max(upper_1, upper_2)
# add to list of bound ranges
new_bound_ranges[key] = (new_lower, new_upper)
return Bounds(new_bound_ranges, new_parameterization)
def intersection(self, other):
new_parameterization = self.parameterization.union(other.parameterization)
new_bound_ranges = {}
for (key, (lower_1, upper_1)), (lower_2, upper_2) in zip(
self.bound_ranges.items(), other.bound_ranges.values()
):
# compute new lower bound
if isinstance(lower_1, sympy.Basic) or isinstance(lower_2, sympy.Basic):
new_lower = sympy.Max(lower_1, lower_2)
elif isinstance(lower_1, (float, int)):
new_lower = max(lower_1, lower_2)
# compute new upper bound
if isinstance(upper_1, sympy.Basic) or isinstance(upper_2, sympy.Basic):
new_upper = sympy.Min(upper_1, upper_2)
elif isinstance(upper_1, (float, int)):
new_upper = min(upper_1, upper_2)
# add to list of bound ranges
new_bound_ranges[key] = (new_lower, new_upper)
return Bounds(new_bound_ranges, new_parameterization)
def scale(self, x, parameterization=Parameterization()):
scaled_bound_ranges = {
key: (lower * x, upper * x)
for key, (lower, upper) in self.bound_ranges.items()
}
return Bounds(
scaled_bound_ranges, self.parameterization.union(parameterization)
)
def translate(self, xyz, parameterization=Parameterization()):
translated_bound_ranges = {
key: (lower + x, upper + x)
for (key, (lower, upper)), x in zip(self.bound_ranges.items(), xyz)
}
return Bounds(
translated_bound_ranges, self.parameterization.union(parameterization)
)
def rotate(self, angle, axis, parameterization=Parameterization()):
# rotate bounding box
rotated_dims = [Parameter(key) for key in self.dims if key != axis]
bounding_points = itertools.product(
*[value for value in self.bound_ranges.values()]
)
rotated_bounding_points = []
for p in bounding_points:
p = {Parameter(key): value for key, value in zip(self.dims, p)}
rotated_p = {**p}
rotated_p[rotated_dims[0]] = (
sympy.cos(angle) * p[rotated_dims[0]]
- sympy.sin(angle) * p[rotated_dims[1]]
)
rotated_p[rotated_dims[1]] = (
sympy.sin(angle) * p[rotated_dims[0]]
+ sympy.cos(angle) * p[rotated_dims[1]]
)
rotated_bounding_points.append(rotated_p)
# find new bounds from rotated bounds
rotated_bound_ranges = {**self.bound_ranges}
for d in self.dims:
# find upper and lower bound
a = [p[Parameter(d)] for p in rotated_bounding_points]
lower = sympy.Min(*a)
upper = sympy.Max(*a)
if lower.is_number:
lower = float(lower)
if upper.is_number:
upper = float(upper)
rotated_bound_ranges[Parameter(d)] = (lower, upper)
return Bounds(
rotated_bound_ranges, self.parameterization.union(parameterization)
)
def copy(self):
return Bounds(self.bound_ranges.copy(), self.parameterization.copy())
def _compute_bounds(self, parameterization=None, nr_sample=10000):
# TODO this currently guesses the bounds by randomly sampling parameterization. This can be improved in the future.
# get new parameterization if provided
if parameterization is not None:
parameterization = self.parameterization
# set bound ranges
computed_bound_ranges = {}
for key, (lower, upper) in self.bound_ranges.items():
# compute lower
if isinstance(lower, (float, int)):
computed_lower = lower
elif isinstance(lower, sympy.Basic):
fn_lower = np_lambdify(lower, parameterization.parameters)
computed_lower = np.min(fn_lower(**parameterization.sample(nr_sample)))
else:
raise ValueError(
"Bound has non numeric or sympy values: " + str(self.bound_ranges)
)
# compute upper
if isinstance(upper, (float, int)):
computed_upper = upper
elif isinstance(upper, sympy.Basic):
fn_upper = np_lambdify(upper, parameterization.parameters)
computed_upper = np.max(fn_upper(**parameterization.sample(nr_sample)))
else:
raise ValueError(
"Bound has non numeric or sympy values: " + str(self.bound_ranges)
)
# store new range
computed_bound_ranges[key] = (computed_lower, computed_upper)
return computed_bound_ranges
def __str__(self):
return (
"bound_ranges: "
+ str(self.bound_ranges)
+ " param_ranges: "
+ str(self.parameterization)
)
def _sample_ranges(batch_size, ranges, quasirandom=False):
parameterization = {}
if quasirandom:
prime_index = 0
primes = create_primes(1000)
for key, value in ranges.items():
# sample parameter
if isinstance(value, tuple):
if quasirandom:
indices = [idx for idx in range(batch_size)]
rand_param = (
value[0]
+ (value[1] - value[0])
* create_samples(indices, number_base=primes[prime_index]).reshape(
-1, 1
)
).astype(float)
prime_index += 1
else:
rand_param = np.random.uniform(value[0], value[1], size=(batch_size, 1))
elif isinstance(value, (float, int)):
rand_param = np.zeros((batch_size, 1)) + value
elif isinstance(value, np.ndarray):
np_index = np.random.choice(value.shape[0], batch_size)
rand_param = value[np_index, :]
elif isinstance(value, Callable):
rand_param = value(batch_size)
else:
raise ValueError(
"range type: "
+ str(type(value))
+ " not supported, try (tuple, or np.ndarray)"
)
# if dependent sample break up parameter
if isinstance(key, tuple):
for i, k in enumerate(key):
parameterization[k] = rand_param[:, i : i + 1]
else:
parameterization[key] = rand_param
return parameterization
| modulus-sym-main | modulus/sym/geometry/parameterization.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines a Discrete geometry
"""
import numpy as np
import csv
from stl import mesh as np_mesh
from sympy import Symbol
from .geometry import Geometry
from .parameterization import Parameterization, Bounds, Parameter
from modulus.sym.constants import diff_str
class DiscreteGeometry(Geometry):
"""
Constructs a geometry for a discrete list of geometries
"""
def __init__(
self, geometries, parameterization=Parameterization(), interior_epsilon=1e-6
):
# make sdf function
def _sdf(list_sdf, discrete_parameterization, dims):
def sdf(invar, params, compute_sdf_derivatives=False):
# make output array to gather sdf values
outputs = {"sdf": np.full_like(next(iter(invar.values())), np.nan)}
if compute_sdf_derivatives:
for d in dims:
outputs["sdf" + diff_str + d] = np.full_like(
next(iter(invar.values())), -1000
)
# compute sdf values for given parameterizations
for i, f in enumerate(list_sdf):
# get sdf index for each point evaluating on
sdf_index = np.full_like(
next(iter(invar.values())), True
) # TODO this could be simplified
for key in discrete_parameterization.parameters:
expanded_d = np.tile(
discrete_parameterization.param_ranges[Parameter(key)][
i : i + 1
],
(params[key].shape[0], 1),
)
sdf_index = np.logical_and(
sdf_index, (params[key] == expanded_d)
)
# compute sdf values on indexed sdf function
sdf_indexed_invar = {
key: value[sdf_index[:, 0], :] for key, value in invar.items()
}
sdf_indexed_params = {
key: value[sdf_index[:, 0], :] for key, value in params.items()
}
computed_sdf = f(
sdf_indexed_invar, sdf_indexed_params, compute_sdf_derivatives
)
# update output values
for key, value in computed_sdf.items():
outputs[key][sdf_index[:, 0], :] = value
return outputs
return sdf
new_sdf = _sdf(
[g.sdf for g in geometries], parameterization, geometries[0].dims
)
# compute bounds
bounds = geometries[0].bounds
for g in geometries[1:]:
bounds = bounds.union(g.bounds)
# make curves
new_curves = []
for g in geometries:
new_curves += g.curves
# initialize geometry
super().__init__(
new_curves,
new_sdf,
dims=len(geometries[0].dims),
bounds=bounds,
parameterization=parameterization,
)
class DiscreteCurve:
def __init__(self, curves, discrete_parameterization=Parameterization()):
# store attributes
self.curves = curves
self._dims = len(curves[0].dims)
self.discrete_parameterization = discrete_parameterization
def sample(
self, nr_points, criteria=None, parameterization=None, quasirandom=False
):
# use internal parameterization if not given
if parameterization is None:
parameterization = self.parameterization
# continually sample points throwing out points that don't satisfy criteria
invar = {
key: np.empty((0, 1))
for key in self.dims + ["normal_" + x for x in self.dims] + ["area"]
}
params = {key: np.empty((0, 1)) for key in parameterization.parameters}
total_sampled = 0
total_tried = 0
nr_try = 0
while True:
# sample curve
local_invar, local_params = self._sample(
nr_points, parameterization, quasirandom
)
# compute given criteria and remove points
if criteria is not None:
computed_criteria = criteria(local_invar, local_params)
local_invar = {
key: value[computed_criteria[:, 0], :]
for key, value in local_invar.items()
}
local_params = {
key: value[computed_criteria[:, 0], :]
for key, value in local_params.items()
}
# store invar
for key in local_invar.keys():
invar[key] = np.concatenate([invar[key], local_invar[key]], axis=0)
# store params
for key in local_params.keys():
params[key] = np.concatenate([params[key], local_params[key]], axis=0)
# keep track of sampling
total_sampled = next(iter(invar.values())).shape[0]
total_tried += nr_points
nr_try += 1
# break when finished sampling
if total_sampled >= nr_points:
for key, value in invar.items():
invar[key] = value[:nr_points]
for key, value in params.items():
params[key] = value[:nr_points]
break
# check if couldn't sample
if nr_try > 1000 and total_sampled < 1:
raise Exception("Unable to sample curve")
return invar, params
| modulus-sym-main | modulus/sym/geometry/discrete_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from typing import List, Tuple, Dict
class ADF(torch.nn.Module):
"""
Used for hard imposition of boundary conditions.
Currently supports 2d geometries and Dirichlet boundary conditions.
Contributors: M. A. Nabian, R. Gladstone, H. Meidani, N. Sukumar, A. Srivastava
Reference: "Sukumar, N. and Srivastava, A., 2021.
Exact imposition of boundary conditions with distance functions in physics-informed deep neural networks.
Computer Methods in Applied Mechanics and Engineering, p.114333."
"""
def __init__(self):
super().__init__()
self.mu: float = 2.0
self.m: float = 2.0
self.eps: float = 1e-8
def forward(self, invar):
raise RuntimeError("No forward method was defined for ADF or its child class")
@staticmethod
def r_equivalence(omegas: List[torch.Tensor], m: float = 2.0) -> torch.Tensor:
"""
Computes the R-equivalence of a collection of approximate distance functions
Parameters
----------
omegas : List[torch.Tensor]
List of ADFs used to compute the R-equivalence.
m: float
Normalization order
Returns
-------
omega_E : torch.Tensor
R-equivalence distance
"""
omega_E = torch.zeros_like(omegas[0])
for omega in omegas:
omega_E += 1.0 / omega**m
omega_E = 1.0 / omega_E ** (1.0 / m)
return omega_E
@staticmethod
def transfinite_interpolation(
bases: List[torch.Tensor], indx: int, eps: float = 1e-8
) -> torch.Tensor:
"""
Performs transfinite interpolation of the boundary conditions
Parameters
----------
bases: List[torch.Tensor]
List of ADFs used for the transfinite interpolation.
indx: int
index of the interpolation basis
eps: float
Small value to avoid division by zero
Returns
-------
w : torch.Tensor
Interpolation basis corresponding to the input index
"""
bases_reduced = [bases[i] for i in range(len(bases)) if i != indx]
numerator = torch.prod(torch.stack(bases_reduced), dim=0)
denominator = 0.0
for j in range(len(bases)):
denom_term = [bases[i] for i in range(len(bases)) if i != j]
denominator += torch.prod(torch.stack(denom_term), dim=0)
w = torch.div(numerator, denominator + eps)
return w
@staticmethod
def infinite_line_adf(
points: Tuple[torch.Tensor], point_1: Tuple[float], point_2: Tuple[float]
) -> torch.Tensor:
"""
Computes the pointwise approximate distance for an infinite line
Parameters
----------
points: Tuple[torch.Tensor]
ADF will be computed on these points
point_1: Tuple[float]
One of the two points that form the infinite line
point_2: Tuple[float]
One of the two points that form the infinite line
Returns
-------
omega : torch.Tensor
pointwise approximate distance
"""
L = ADF._distance(point_1, point_2)
omega = (
(points[0] - point_1[0]) * (point_2[1] - point_1[1])
- (points[1] - point_1[1]) * (point_2[0] - point_1[0])
) / L
return omega
@staticmethod
def line_segment_adf(
points: Tuple[torch.Tensor], point_1: Tuple[float], point_2: Tuple[float]
) -> torch.Tensor:
"""
Computes the pointwise approximate distance for a line segment
Parameters
----------
points: Tuple[torch.Tensor]
ADF will be computed on these points
point_1: Tuple[float]
Point on one end of the line segment
point_2: Tuple[float]
Point on the other ned of the line segment
Returns
-------
omega : torch.Tensor
pointwise approximate distance
"""
L = ADF._distance(point_1, point_2)
center = ADF._center(point_1, point_2)
f = ADF.infinite_line_adf(points, point_1, point_2)
t = ADF.circle_adf(points, L / 2, center)
phi = torch.sqrt(t**2 + f**4)
omega = torch.sqrt(f**2 + ((phi - t) / 2) ** 2)
return omega
@staticmethod
def circle_adf(
points: Tuple[torch.Tensor], radius: float, center: Tuple[float]
) -> torch.Tensor:
"""
Computes the pointwise approximate distance for a circle
Parameters
----------
points: Tuple[torch.Tensor]
ADF will be computed on these points
radius: float
Radius of the circle
center: Tuple[float]
Center of the circle
Returns
-------
omega : torch.Tensor
pointwise approximate distance
"""
omega = (
radius**2 - ((points[0] - center[0]) ** 2 + (points[1] - center[1]) ** 2)
) / (2 * radius)
return omega
@staticmethod
def trimmed_circle_adf(
points: Tuple[torch.Tensor],
point_1: Tuple[float],
point_2: Tuple[float],
sign: int,
radius: float,
center: float,
) -> torch.Tensor:
"""
Computes the pointwise approximate distance of a trimmed circle
Parameters
----------
points: Tuple[torch.Tensor]
ADF will be computed on these points
point_1: Tuple[float]
One of the two points that form the trimming infinite line
point_2: Tuple[float]
One of the two points that form the trimming infinite line
sign: int
Specifies the trimming side
radius: float
Radius of the circle
center: Tuple[float]
Center of the circle
Returns
-------
omega : torch.Tensor
pointwise approximate distance
"""
assert sign != 0, "sign should be non-negative"
f = ADF.circle_adf(points, radius, center)
t = np.sign(sign) * ADF.infinite_line_adf(points, point_1, point_2)
phi = torch.sqrt(t**2 + f**4)
omega = torch.sqrt(f**2 + ((phi - t) / 2) ** 2)
return omega
@staticmethod
def _distance(point_1: Tuple[float], point_2: Tuple[float]) -> torch.Tensor:
"""
Computes the distance between two points
point_1: Tuple[float]
The first point
point_2: Tuple[float]
The second point
Returns
-------
distance : torch.Tensor
distance between the two points
"""
distance = np.sqrt(
(point_2[0] - point_1[0]) ** 2 + (point_2[1] - point_1[1]) ** 2
)
return distance
@staticmethod
def _center(point_1: Tuple[float], point_2: Tuple[float]) -> Tuple[float]:
"""
Computes the center of the two points
point_1: Tuple[float]
The first point
point_2: Tuple[float]
The second point
Returns
-------
center : torch.Tensor
Center the two points
"""
center = ((point_1[0] + point_2[0]) / 2, (point_1[1] + point_2[1]) / 2)
return center
| modulus-sym-main | modulus/sym/geometry/adf.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Domain
"""
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
import itertools
import os
from modulus.sym.domain.validator import Validator
from modulus.sym.domain.inferencer import Inferencer
from modulus.sym.domain.monitor import Monitor
from modulus.sym.loss.aggregator import NTK
from modulus.sym.models.arch import FuncArch
class Domain:
"""
Domain object that contains all needed information about
constraints, validators, inferencers, and monitors.
Parameters
----------
name : str
Unique name for domain.
encoding : Union[np.ndarray, None]
Possible encoding vector for domain. Currently not in use.
"""
def __init__(self, name: str = "domain", encoding=None):
super().__init__()
self.name = name
self.encoding = encoding
self.constraints = {}
self.validators = {}
self.inferencers = {}
self.monitors = {}
self.ntk = None
def rec_constraints(self, base_dir: str):
constraint_data_dir = base_dir + "/constraints/"
# exist_ok=True to handle race conditions
os.makedirs(constraint_data_dir, exist_ok=True)
for key, constraint in self.constraints.items():
constraint.save_batch(constraint_data_dir + key)
def rec_validators(
self, base_dir: str, writer: SummaryWriter, save_filetypes: str, step: int
):
"""Run and save results of validator nodes"""
validator_data_dir = base_dir + "/validators/"
# exist_ok=True to handle race conditions
os.makedirs(validator_data_dir, exist_ok=True)
metrics = {}
for key, validator in self.validators.items():
valid_losses = validator.save_results(
key, validator_data_dir, writer, save_filetypes, step
)
# If validator returned add to metrics
if isinstance(valid_losses, dict):
metrics.update(valid_losses)
return metrics
def rec_inferencers(
self, base_dir: str, writer: SummaryWriter, save_filetypes: str, step: int
):
"""Run and save results of inferencer nodes"""
inferencer_data_dir = base_dir + "/inferencers/"
# exist_ok=True to handle race conditions
os.makedirs(inferencer_data_dir, exist_ok=True)
for key, inferencer in self.inferencers.items():
inferencer.save_results(
key, inferencer_data_dir, writer, save_filetypes, step
)
def rec_stream(
self,
inferencer,
name,
base_dir: str,
step: int,
save_results: bool,
save_filetypes: str,
to_cpu: bool,
):
"""Run and save results of stream"""
inferencer_data_dir = base_dir + "/inferencers/"
if save_results:
# exist_ok=True to handle race conditions
os.makedirs(inferencer_data_dir, exist_ok=True)
return inferencer.save_stream(
name,
inferencer_data_dir,
None,
step,
save_results,
save_filetypes,
to_cpu,
)
def rec_monitors(self, base_dir: str, writer: SummaryWriter, step: int):
"""Run and save results of monitor nodes"""
monitor_data_dir = base_dir + "/monitors/"
# exist_ok=True to handle race conditions
os.makedirs(monitor_data_dir, exist_ok=True)
metrics = {}
for key, monitor in self.monitors.items():
metrics.update(monitor.save_results(key, writer, step, monitor_data_dir))
return metrics
def get_num_losses(self):
return len(
set(itertools.chain(*[c.output_names for c in self.constraints.values()]))
)
def load_data(self, static: bool = False):
for key, constraint in self.constraints.items():
if static:
constraint.load_data_static()
else:
constraint.load_data()
def compute_losses(self, step: int):
losses = {}
if self.ntk is None:
for key, constraint in self.constraints.items():
# TODO: Test streaming here
torch.cuda.nvtx.range_push(f"Constraint Forward: {key}")
constraint.forward()
torch.cuda.nvtx.range_pop()
for key, constraint in self.constraints.items():
for loss_key, value in constraint.loss(step).items():
if loss_key not in list(losses.keys()):
losses[loss_key] = value
else:
losses[loss_key] += value
else:
losses, self.ntk_weights = self.ntk(
self.constraints, self.ntk_weights, step
)
return losses
def get_saveable_models(self):
models = []
for c in self.constraints.values():
# strip DDP specific module layer
if hasattr(c.model, "module"):
model = c.model.module
else:
model = c.model
for m in model.evaluation_order:
# For FuncArch, we only need to save the wrapped Arch model
if isinstance(m, FuncArch):
m = m.arch
if (m not in models) and m.saveable:
models.append(m)
models = sorted(models, key=lambda x: x.name)
assert len(set([m.name for m in models])) == len(
models
), "Every model in graph needs a unique name: " + str([m.name for m in models])
return models
def create_global_optimizer_model(self):
models = []
# TODO: Add aggregator parameters into module list here
for c in self.constraints.values():
# strip DDP specific module layer
if hasattr(c.model, "module"):
model = c.model.module
else:
model = c.model
for m in model.optimizer_list:
if isinstance(m, FuncArch):
m = m.arch
if m not in models:
models.append(m)
models = sorted(models, key=lambda x: x.name)
assert len(set([m.name for m in models])) == len(
models
), "Every model in graph needs a unique name: " + str([m.name for m in models])
models = nn.ModuleList(models)
return models
def add_constraint(
self,
constraint,
name: str = None,
):
"""
Method to add a constraint to domain.
Parameters
----------
constraint : Constraint
Constraint to be added to domain.
name : str
Unique name of constraint. If duplicate is
found then name is iterated to avoid duplication.
"""
# add constraint to list
name = Domain._iterate_name(name, "pointwise_bc", list(self.constraints.keys()))
self.constraints[name] = constraint
def add_validator(
self,
validator: Validator,
name: str = None,
):
"""
Method to add a validator to domain.
Parameters
----------
validator : Validator
Validator to be added to domain.
name : str
Unique name of validator. If duplicate is
found then name is iterated to avoid duplication.
"""
# add validator
name = Domain._iterate_name(name, "validator", list(self.validators.keys()))
self.validators[name] = validator
def add_inferencer(
self,
inferencer: Inferencer,
name: str = None,
):
"""
Method to add a inferencer to domain.
Parameters
----------
inferencer : Inferencer
Inferencer to be added to domain.
name : str
Unique name of inferencer. If duplicate is
found then name is iterated to avoid duplication.
"""
# add inferencer
name = Domain._iterate_name(name, "inferencer", list(self.inferencers.keys()))
self.inferencers[name] = inferencer
def add_monitor(
self,
monitor: Monitor,
name: str = None,
):
"""
Method to add a monitor to domain.
Parameters
----------
monitor : Monitor
Monitor to be added to domain.
name : str
Unique name of monitor. If duplicate is
found then name is iterated to avoid duplication.
"""
# add monitor
name = Domain._iterate_name(name, "monitor", list(self.monitors.keys()))
self.monitors[name] = monitor
def add_ntk(self, ntk: NTK):
self.ntk = ntk
self.ntk_weights = {}
@staticmethod
def _iterate_name(input_name, default_name, current_names):
if input_name is None:
name = default_name
else:
name = input_name
if name in current_names:
i = 2
while True:
if name + "_" + str(i) not in current_names:
name = name + "_" + str(i)
break
i += 1
return name
| modulus-sym-main | modulus/sym/domain/domain.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .domain import Domain
| modulus-sym-main | modulus/sym/domain/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Monitor:
"""
Monitor base class
"""
def save_results(self, name, writer, step, data_dir):
raise NotImplementedError("Subclass of Monitor needs to implement this")
| modulus-sym-main | modulus/sym/domain/monitor/monitor.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .monitor import Monitor
from .pointwise import PointwiseMonitor
| modulus-sym-main | modulus/sym/domain/monitor/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Monitor for Solver class
"""
import numpy as np
from modulus.sym.domain.monitor import Monitor
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.constants import TF_SUMMARY
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.io import dict_to_csv, csv_to_dict
class PointwiseMonitor(Monitor):
"""
Pointwise Inferencer that allows inferencing on pointwise data
Parameters
----------
invar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays as input.
output_names : List[str]
List of outputs needed for metric.
metrics : Dict[str, Callable]
Dictionary of pytorch functions whose input is a dictionary
torch tensors whose keys are the `output_names`. The keys
to `metrics` will be used to label the metrics in tensorboard/csv outputs.
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
requires_grad : bool = False
If automatic differentiation is needed for computing results.
"""
def __init__(self, invar, output_names, metrics, nodes, requires_grad=False):
# construct model from nodes
self.requires_grad = requires_grad
self.model = Graph(
nodes, Key.convert_list(invar.keys()), Key.convert_list(output_names)
)
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
# set metrics
self.metrics = metrics
self.monitor_outvar_store = {}
# set invar
self.invar = Constraint._set_device(invar, device=self.device)
def save_results(self, name, writer, step, data_dir):
# run forward inference
invar = Constraint._set_device(
self.invar, device=self.device, requires_grad=self.requires_grad
)
outvar = self.model(invar)
metrics = {key: func({**invar, **outvar}) for key, func in self.metrics.items()}
for k, m in metrics.items():
# add tensorboard scalars
if TF_SUMMARY:
writer.add_scalar("monitor/" + name + "/" + k, m, step, new_style=True)
else:
writer.add_scalar("Monitors/" + name + "/" + k, m, step, new_style=True)
# write csv files
if k not in self.monitor_outvar_store.keys():
try:
self.monitor_outvar_store[k] = csv_to_dict(data_dir + k + ".csv")
except:
self.monitor_outvar_store[k] = {
"step": np.array([[step]]),
k: m.detach().cpu().numpy().reshape(-1, 1),
}
else:
monitor_outvar = {
"step": np.array([[step]]),
k: m.detach().cpu().numpy().reshape(-1, 1),
}
self.monitor_outvar_store[k] = {
key: np.concatenate([value_1, value_2], axis=0)
for (key, value_1), (key, value_2) in zip(
self.monitor_outvar_store[k].items(), monitor_outvar.items()
)
}
dict_to_csv(self.monitor_outvar_store[k], filename=data_dir + k + ".csv")
return metrics
| modulus-sym-main | modulus/sym/domain/monitor/pointwise.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class Validator:
"""
Validator base class
"""
def forward_grad(self, invar):
pred_outvar = self.model(invar)
return pred_outvar
def forward_nograd(self, invar):
with torch.no_grad():
pred_outvar = self.model(invar)
return pred_outvar
def save_results(self, name, results_dir, writer, save_filetypes, step):
raise NotImplementedError("Subclass of Validator needs to implement this")
@staticmethod
def _l2_relative_error(true_var, pred_var): # TODO replace with metric classes
new_var = {}
for key in true_var.keys():
new_var["l2_relative_error_" + str(key)] = torch.sqrt(
torch.mean(torch.square(true_var[key] - pred_var[key]))
/ torch.var(true_var[key])
)
return new_var
| modulus-sym-main | modulus/sym/domain/validator/validator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import torch
import numpy as np
from modulus.sym.domain.validator import Validator
from modulus.sym.domain.constraint import Constraint
from modulus.sym.utils.io.vtk import grid_to_vtk
from modulus.sym.utils.io import GridValidatorPlotter, DeepONetValidatorPlotter
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.constants import TF_SUMMARY
from modulus.sym.distributed import DistributedManager
from modulus.sym.dataset import Dataset, DictGridDataset
class GridValidator(Validator):
"""Data-driven grid field validator
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
dataset: Dataset
dataset which contains invar and true outvar examples
batch_size : int, optional
Batch size used when running validation, by default 100
plotter : GridValidatorPlotter
Modulus plotter for showing results in tensorboard.
requires_grad : bool = False
If automatic differentiation is needed for computing results.
num_workers : int, optional
Number of dataloader workers, by default 0
"""
def __init__(
self,
nodes: List[Node],
dataset: Dataset,
batch_size: int = 100,
plotter: GridValidatorPlotter = None,
requires_grad: bool = False,
num_workers: int = 0,
):
# get dataset and dataloader
self.dataset = dataset
self.dataloader = Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers,
distributed=False,
infinite=False,
)
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(self.dataset.invar_keys),
Key.convert_list(self.dataset.outvar_keys),
)
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
# set foward method
self.requires_grad = requires_grad
self.forward = self.forward_grad if requires_grad else self.forward_nograd
# set plotter
self.plotter = plotter
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
losses = GridValidator._l2_relative_error(true_outvar_cpu, pred_outvar_cpu)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
# save batch to vtk/npz file TODO clean this up after graph unroll stuff
if "np" in save_filetypes:
np.savez(
results_dir + name, {**invar, **named_true_outvar, **named_pred_outvar}
)
if "vtk" in save_filetypes:
grid_to_vtk(
{**invar, **named_true_outvar, **named_pred_outvar}, results_dir + name
)
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Validators",
name,
results_dir,
writer,
step,
invar,
true_outvar,
pred_outvar,
)
# add tensorboard scalars
for k, loss in losses.items():
if TF_SUMMARY:
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
else:
writer.add_scalar(
"Validators/" + name + "/" + k, loss, step, new_style=True
)
return losses
class _DeepONet_Validator(Validator):
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
true_outvar: Dict[str, np.array],
batch_size: int,
plotter: DeepONetValidatorPlotter,
requires_grad: bool,
):
# TODO: add support for other datasets?
# get dataset and dataloader
self.dataset = DictGridDataset(invar=invar_branch, outvar=true_outvar)
self.dataloader = Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=0,
distributed=False,
infinite=False,
)
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(invar_branch.keys())
+ Key.convert_list(invar_trunk.keys()),
Key.convert_list(true_outvar.keys()),
)
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
# set foward method
self.requires_grad = requires_grad
self.forward = self.forward_grad if requires_grad else self.forward_nograd
# set plotter
self.plotter = plotter
class DeepONet_Physics_Validator(_DeepONet_Validator):
"""
DeepONet Validator
"""
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
true_outvar: Dict[str, np.array],
batch_size: int = 100,
plotter: DeepONetValidatorPlotter = None,
requires_grad: bool = False,
tile_trunk_input: bool = True,
):
super().__init__(
nodes=nodes,
invar_branch=invar_branch,
invar_trunk=invar_trunk,
true_outvar=true_outvar,
batch_size=batch_size,
plotter=plotter,
requires_grad=requires_grad,
)
if tile_trunk_input:
for k, v in invar_trunk.items():
invar_trunk[k] = np.tile(v, (batch_size, 1))
self.invar_trunk = invar_trunk
self.batch_size = batch_size
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
invar_trunk_gpu = Constraint._set_device(
self.invar_trunk, device=self.device, requires_grad=self.requires_grad
)
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward({**invar, **invar_trunk_gpu})
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
losses = DeepONet_Physics_Validator._l2_relative_error(
true_outvar_cpu, pred_outvar_cpu
)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
# save batch to vtk/npz file TODO clean this up after graph unroll stuff
if "np" in save_filetypes:
np.savez(
results_dir + name, {**invar, **named_true_outvar, **named_pred_outvar}
)
ndim = next(iter(self.invar_trunk.values())).shape[-1]
invar_plotter = dict()
true_outvar_plotter = dict()
pred_outvar_plotter = dict()
for k, v in self.invar_trunk.items():
invar_plotter[k] = self.invar_trunk[k].reshape((self.batch_size, -1, ndim))
for k, v in true_outvar.items():
true_outvar_plotter[k] = true_outvar[k].reshape((self.batch_size, -1))
for k, v in pred_outvar.items():
pred_outvar_plotter[k] = pred_outvar[k].reshape((self.batch_size, -1))
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Validators",
name,
results_dir,
writer,
step,
invar_plotter,
true_outvar_plotter,
pred_outvar_plotter,
)
# add tensorboard scalars
for k, loss in losses.items():
if TF_SUMMARY:
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
else:
writer.add_scalar(
"Validators/" + name + "/" + k, loss, step, new_style=True
)
return losses
@staticmethod
def _l2_relative_error(true_var, pred_var): # TODO replace with metric classes
new_var = {}
for key in true_var.keys():
new_var["l2_relative_error_" + str(key)] = torch.sqrt(
torch.mean(
torch.square(torch.reshape(true_var[key], (-1, 1)) - pred_var[key])
)
/ torch.var(true_var[key])
)
return new_var
class DeepONet_Data_Validator(_DeepONet_Validator):
"""
DeepONet Validator
"""
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
true_outvar: Dict[str, np.array],
batch_size: int = 100,
plotter: DeepONetValidatorPlotter = None,
requires_grad: bool = False,
):
super().__init__(
nodes=nodes,
invar_branch=invar_branch,
invar_trunk=invar_trunk,
true_outvar=true_outvar,
batch_size=batch_size,
plotter=plotter,
requires_grad=requires_grad,
)
self.invar_trunk_plotter = dict()
ndim = next(iter(invar_trunk.values())).shape[-1]
for k, v in invar_trunk.items():
self.invar_trunk_plotter[k] = np.tile(v, (batch_size, 1)).reshape(
(batch_size, -1, ndim)
)
self.invar_trunk = invar_trunk
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
invar_trunk_gpu = Constraint._set_device(
self.invar_trunk, device=self.device, requires_grad=self.requires_grad
)
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward({**invar, **invar_trunk_gpu})
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
losses = DeepONet_Data_Validator._l2_relative_error(
true_outvar_cpu, pred_outvar_cpu
)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
# save batch to vtk/npz file TODO clean this up after graph unroll stuff
if "np" in save_filetypes:
np.savez(
results_dir + name, {**invar, **named_true_outvar, **named_pred_outvar}
)
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Validators",
name,
results_dir,
writer,
step,
self.invar_trunk_plotter,
true_outvar,
pred_outvar,
)
# add tensorboard scalars
for k, loss in losses.items():
if TF_SUMMARY:
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
else:
writer.add_scalar(
"Validators/" + name + "/" + k, loss, step, new_style=True
)
return losses
| modulus-sym-main | modulus/sym/domain/validator/discrete.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .validator import Validator
from .continuous import PointwiseValidator, PointVTKValidator
from .discrete import GridValidator, DeepONet_Physics_Validator, DeepONet_Data_Validator
| modulus-sym-main | modulus/sym/domain/validator/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from typing import List, Dict
from pathlib import Path
from modulus.sym.domain.validator import Validator
from modulus.sym.domain.constraint import Constraint
from modulus.sym.utils.io.vtk import var_to_polyvtk, VTKBase
from modulus.sym.utils.io import ValidatorPlotter
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.constants import TF_SUMMARY
from modulus.sym.dataset import DictPointwiseDataset
from modulus.sym.distributed import DistributedManager
class PointwiseValidator(Validator):
"""
Pointwise Validator that allows walidating on pointwise data
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
invar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays as input.
true_outvar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays used to validate against validation.
batch_size : int, optional
Batch size used when running validation, by default 1024
plotter : ValidatorPlotter
Modulus plotter for showing results in tensorboard.
requires_grad : bool = False
If automatic differentiation is needed for computing results.
"""
def __init__(
self,
nodes: List[Node],
invar: Dict[str, np.array],
true_outvar: Dict[str, np.array],
batch_size: int = 1024,
plotter: ValidatorPlotter = None,
requires_grad: bool = False,
):
# TODO: add support for other datasets?
# get dataset and dataloader
self.dataset = DictPointwiseDataset(invar=invar, outvar=true_outvar)
self.dataloader = Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=0,
distributed=False,
infinite=False,
)
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(self.dataset.invar_keys),
Key.convert_list(self.dataset.outvar_keys),
)
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
# set foward method
self.requires_grad = requires_grad
self.forward = self.forward_grad if requires_grad else self.forward_nograd
# set plotter
self.plotter = plotter
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
# TODO add metrics specific for validation
# TODO: add potential support for lambda_weighting
losses = PointwiseValidator._l2_relative_error(true_outvar_cpu, pred_outvar_cpu)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
# save batch to vtk/npz file TODO clean this up after graph unroll stuff
if "np" in save_filetypes:
np.savez(
results_dir + name, {**invar, **named_true_outvar, **named_pred_outvar}
)
if "vtk" in save_filetypes:
var_to_polyvtk(
{**invar, **named_true_outvar, **named_pred_outvar}, results_dir + name
)
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Validators",
name,
results_dir,
writer,
step,
invar,
true_outvar,
pred_outvar,
)
# add tensorboard scalars
for k, loss in losses.items():
if TF_SUMMARY:
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
else:
writer.add_scalar(
"Validators/" + name + "/" + k, loss, step, new_style=True
)
return losses
class PointVTKValidator(PointwiseValidator):
"""
Pointwise validator using mesh points of VTK object
Parameters
----------
vtk_obj : VTKBase
Modulus VTK object to use point locations from
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
input_vtk_map : Dict[str, List[str]]
Dictionary mapping from Modulus input variables to VTK variable names {"modulus.sym.name": ["vtk name"]}.
Use colons to denote components of multi-dimensional VTK arrays ("name":# )
true_vtk_map : Dict[str, List[str]]
Dictionary mapping from Modulus target variables to VTK variable names {"modulus.sym.name": ["vtk name"]}.
invar : Dict[str, np.array], optional
Dictionary of additional numpy arrays as input, by default {}
true_outvar : Dict[str, np.array], optional
Dictionary of additional numpy arrays used to validate against validation, by default {}
batch_size : int
Batch size used when running validation.
plotter : ValidatorPlotter
Modulus plotter for showing results in tensorboard.
requires_grad : bool, optional
If automatic differentiation is needed for computing results., by default True
log_iter : bool, optional
Save results to different file each call, by default False
"""
def __init__(
self,
vtk_obj: VTKBase,
nodes: List[Node],
input_vtk_map: Dict[str, List[str]],
true_vtk_map: Dict[str, List[str]],
invar: Dict[str, np.array] = {}, # Additional inputs
true_outvar: Dict[str, np.array] = {}, # Additional targets
batch_size: int = 1024,
plotter: ValidatorPlotter = None,
requires_grad: bool = False,
log_iter: bool = False,
):
# Set VTK file save dir and file name
self.vtk_obj = vtk_obj
self.vtk_obj.file_dir = "./validators"
self.vtk_obj.file_name = "validator"
# Set up input/output names
invar_vtk = self.vtk_obj.get_data_from_map(input_vtk_map)
invar.update(invar_vtk)
# Extract true vars from VTK
true_vtk = self.vtk_obj.get_data_from_map(true_vtk_map)
true_outvar.update(true_vtk)
# set plotter
self.plotter = plotter
self.log_iter = log_iter
# initialize inferencer
super().__init__(
nodes=nodes,
invar=invar,
true_outvar=true_outvar,
batch_size=batch_size,
plotter=plotter,
requires_grad=requires_grad,
)
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
# TODO add metrics specific for validation
# TODO: add potential support for lambda_weighting
losses = PointwiseValidator._l2_relative_error(true_outvar_cpu, pred_outvar_cpu)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
# save batch to vtk/npz file TODO clean this up after graph unroll stuff
self.vtk_obj.file_dir = Path(results_dir)
self.vtk_obj.file_name = Path(name).stem
if "np" in save_filetypes:
np.savez(
results_dir + name, {**invar, **named_true_outvar, **named_pred_outvar}
)
if "vtk" in save_filetypes:
if self.log_iter:
self.vtk_obj.var_to_vtk(data_vars={**pred_outvar}, step=step)
else:
self.vtk_obj.var_to_vtk(data_vars={**pred_outvar})
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Validators",
name,
results_dir,
writer,
step,
invar,
true_outvar,
pred_outvar,
)
# add tensorboard scalars
for k, loss in losses.items():
if TF_SUMMARY:
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
else:
writer.add_scalar(
"Validators/" + name + "/" + k, loss, step, new_style=True
)
return losses
| modulus-sym-main | modulus/sym/domain/validator/continuous.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import tarfile
import torch
import numpy as np
import gc
from typing import Dict, List, Union, Callable, Tuple
from pathlib import Path
from io import BytesIO
from modulus.sym.domain.inferencer import Inferencer
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.models.arch import Arch
from modulus.sym.distributed import DistributedManager
from modulus.sym.dataset import DictInferencePointwiseDataset
logger = logging.getLogger("__name__")
class OVVoxelInferencer(Inferencer):
"""Voxel inferencer for Omniverse extension.
Includes some additional utilities for OV inference control.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
input_keys : List[Key]
Input key list
output_keys : List[Key]
Output key list
mask_value : float, optional
Value to assign masked points, by default np.nan
requires_grad : bool, optional
If automatic differentiation is needed for computing results, by default False
eco : bool, optional
Economy mode, will off load model from GPU after inference, by default False
progress_bar : ModulusOVProgressBar, optional
Modulus OV Extension progress bar for displaying inference progress, by default None
"""
def __init__(
self,
nodes: List[Node],
input_keys: List[Key],
output_keys: List[Key],
mask_value: float = np.nan,
requires_grad: bool = False,
eco: bool = False,
progress_bar=None,
):
self.requires_grad = requires_grad
self._eco = eco
self.mask_value = mask_value
self.mask_index = None
self.input_names = [key.name for key in input_keys]
self.output_names = [key.name for key in output_keys]
self.progress_bar = progress_bar
# construct model from nodes
self.model = Graph(
nodes,
input_keys,
output_keys,
)
self.manager = DistributedManager()
self.device = self.manager.device
def setup_voxel_domain(
self,
bounds: List[List[int]],
npoints: List[int],
invar: Dict[str, np.array] = {}, # Additional inputs
batch_size: int = 1024,
mask_fn: Union[Callable, None] = None,
) -> None:
"""Set up voxel domain for inference
Parameters
----------
bounds : List[List[int]]
List of domain bounds to form uniform rectangular domain
npoints : List[int]
Resolution of voxels in each domain
invar : Dict[str, np.array], optional
Additional input features, by default {}
batch_size: int, optional
Inference batch size, by default 1024
mask_fn : Union[Callable, None], optional
Masking function to remove points from inferencing, by default None
"""
# Start by setting up the
assert len(bounds) == len(
npoints
), f"Bounds and npoints must be same length {len(bounds)}, {len(npoints)}"
assert 0 < len(bounds) < 4, "Only 1, 2, 3 grid dimensionality allowed"
# Pad for missing dimensions
self.npoints = np.array(npoints + [1, 1])[:3]
self.bounds = np.array(bounds + [[0, 0], [0, 0]])[:3]
dx = np.linspace(self.bounds[0][0], self.bounds[0][1], self.npoints[0])
dy = np.linspace(self.bounds[1][0], self.bounds[1][1], self.npoints[1])
dz = np.linspace(self.bounds[2][0], self.bounds[2][1], self.npoints[2])
# Get coordinate arrays (i,j format [x,y,z])
xx, yy, zz = np.meshgrid(dx, dy, dz, indexing="ij")
invar.update(
{
"x": np.reshape(xx, (-1, 1)),
"y": np.reshape(yy, (-1, 1)),
"z": np.reshape(zz, (-1, 1)),
}
)
# If mask set up mask indexes
if mask_fn is not None:
args, *_ = inspect.getargspec(mask_fn)
# Fall back np_lambdify does not supply arguement names
# Ideally np_lambdify should allow input names to be queried
if len(args) == 0:
args = list(invar.keys()) # Hope your inputs all go into the mask
mask_input = {key: invar[key] for key in args if key in invar}
mask = np.squeeze(mask_fn(**mask_input).astype(np.bool))
# True points get masked while False get kept, flip for index
self.mask_index = np.logical_not(mask)
# Mask out to only masked points (only inference here)
for key, value in invar.items():
invar[key] = value[self.mask_index]
# get dataset and dataloader
self.dataset = DictInferencePointwiseDataset(
invar=invar, output_names=self.output_names
)
self.dataloader = Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=0,
distributed=False,
infinite=False,
)
def query(self, memory_fraction: float = 1.0) -> Tuple[Dict[str, np.array]]:
"""Query the inference model
Parameters
----------
memory_fraction : float, optional
Fraction of GPU memory to let PyTorch allocate, by default 1.0
Returns:
Tuple[Dict[str, np.array]]: Dictionary of input and output arrays
"""
torch.cuda.set_per_process_memory_fraction(memory_fraction)
invar_cpu = {key: [] for key in self.dataset.invar_keys}
predvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Eco mode on/off loads model every query
if self.eco or not next(self.model.parameters()).is_cuda:
self.model = self.model.to(self.device)
# Loop through mini-batches
for i, (invar0,) in enumerate(self.dataloader):
# Move data to device
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
if self.requires_grad:
pred_outvar = self.model.forward(invar)
else:
with torch.no_grad():
pred_outvar = self.model.forward(invar)
invar_cpu = {key: value + [invar0[key]] for key, value in invar_cpu.items()}
predvar_cpu = {
key: value + [pred_outvar[key].cpu().detach().numpy()]
for key, value in predvar_cpu.items()
}
# Update progress bar if provided
if self.progress_bar:
self.progress_bar.inference_step(float(i + 1) / len(self.dataloader))
# Eco mode on/off loads model every query
if self.eco:
logger.info("Eco inference on, moving model off GPU")
self.model.cpu()
# Concat mini-batch arrays
invar = {key: np.concatenate(value) for key, value in invar_cpu.items()}
predvar = {key: np.concatenate(value) for key, value in predvar_cpu.items()}
# Mask outputs
invar, predvar = self._mask_results(invar, predvar)
# Finally reshape back into grid
for key, value in invar.items():
shape = list(self.npoints) + [value.shape[1]]
invar[key] = np.reshape(value, (shape))
for key, value in predvar.items():
shape = list(self.npoints) + [value.shape[1]]
predvar[key] = np.reshape(value, (shape))
# Clean up
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
return invar, predvar
def _mask_results(self, invar, predvar):
# Reconstruct full array if mask was applied
for key, value in invar.items():
full_array = np.full(
(self.mask_index.shape[0], value.shape[1]),
self.mask_value,
dtype=value.dtype,
)
full_array[self.mask_index] = value
invar[key] = full_array
for key, value in predvar.items():
full_array = np.full(
(self.mask_index.shape[0], value.shape[1]),
self.mask_value,
dtype=value.dtype,
)
full_array[self.mask_index] = value
predvar[key] = full_array
return invar, predvar
def save_results(self, name, results_dir, writer, save_filetypes, step):
logger.warn(
"OVoxInferencer is not designed to be used inside of Modulus solver"
)
pass
def load_models(self, checkpoint_dir: str):
logging.info(f"Loading model checkpoint at {checkpoint_dir}")
for m in self.model.evaluation_order:
if m.saveable:
m.load(str(checkpoint_dir))
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, e: bool):
self._eco = e
if e == False:
self.model.to(self.device)
else:
self.model.cpu()
class OVFourCastNetInferencer(Inferencer):
"""FourCastNet inferencer for Omniverse extension.
Includes some additional utilities for OV inference control.
Parameters
----------
afno_model : Union[Arch, torch.nn.Module]
AFNO model object
n_channels : int
Number of input channels / fields
img_shape : Tuple[int, int], optional
Input field shape, by default (720, 1440)
eco : bool, optional
Economy mode, will off load model from GPU after inference, by default False
progress_bar : ModulusOVProgressBar, optional
Modulus OV Extension progress bar for displaying inference progress, by default None
"""
def __init__(
self,
afno_model: Union[Arch, torch.nn.Module],
n_channels: int,
img_shape: Tuple[int, int] = (720, 1440),
eco: bool = False,
progress_bar=None,
):
self._eco = eco
self.n_channels = n_channels
self.img_shape = img_shape
self.progress_bar = progress_bar
self.mu = None
self.std = None
# Get PyTorch model out of node if a Modulus Node
if hasattr(afno_model, "_impl"):
self.model = afno_model._impl
else:
self.model = afno_model
self.manager = DistributedManager()
self.device = self.manager.device
def load_initial_state_npy(
self,
file_path: str,
tar_file_path: Union[str, None] = None,
) -> None:
"""Loads a FCN initial state into CPU memory stored in a npy file
Dimensionality of the .npz file should be [in_channels, height, width]
Parameters
----------
file_path : str
File path to .npy file
tar_file_path : Union[str, None], optional
Optional tar ball with .npy file inside, by default None
"""
if tar_file_path is None:
file_path = Path(file_path)
assert file_path.is_file(), f"Invalid npy file path {file_path}"
init_np = np.load(file_path)
else:
init_np = self.get_array_from_tar(tar_file_path, file_path)
logger.info(f"Initial condition loaded with shape {init_np.shape}")
# Run dimension checks
assert init_np.ndim == 3, f"Initial state should have 3 dimensions"
assert (
init_np.shape[0] == self.n_channels
), f"Incorrect channel size; expected {self.n_channels}, got {init_np.shape[0]}"
assert (
init_np.shape[1] == self.img_shape[0]
and init_np.shape[2] == self.img_shape[1]
), "Incorrect field/image shape"
self.init_state = torch.Tensor(init_np).unsqueeze(0)
def load_stats_npz(
self,
file_path: str,
tar_file_path: Union[str, None] = None,
) -> None:
"""Loads mean and standard deviation normalization stats from npz file
Dimensionality of stats in .npz file should be [in_channels, 1, 1]. Npz
file should have two arrays: "mu" and "std"
Parameters
----------
file_path : str
File path to .npz file
tar_file_path : Union[str, None], optional
Optional tar ball with .npy file inside, by default None
"""
if tar_file_path is None:
file_path = Path(file_path)
assert file_path.is_file(), f"Invalid npz file path {file_path}"
stat_npz = np.load(file_path)
else:
stat_npz = self.get_array_from_tar(tar_file_path, file_path)
mu = stat_npz["mu"]
std = stat_npz["std"]
logger.info(f"Mu array loaded with shape {mu.shape}")
logger.info(f"Std array loaded with shape {std.shape}")
# Run dimension checks
assert mu.ndim == 3 and std.ndim == 3, f"Mu and Std should have 3 dimensions"
assert (
mu.shape[0] == self.n_channels
), f"Incorrect channel size; expected {self.n_channels}, got {mu.shape[0]}"
assert (
std.shape[0] == self.n_channels
), f"Incorrect channel size; expected {self.n_channels}, got {std.shape[0]}"
self.mu = torch.Tensor(mu).unsqueeze(0)
self.std = torch.Tensor(std).unsqueeze(0)
@torch.no_grad()
def query(self, tsteps: int, memory_fraction: float = 1.0) -> np.array:
"""Query the inference model, only a batch size of 1 is supported
Parameters
----------
tsteps : int
Number of timesteps to forecast
memory_fraction : float, optional
Fraction of GPU memory to let PyTorch allocate, by default 1.0
Returns
-------
np.array
[tsteps+1, channels, height, width] output prediction fields
"""
torch.cuda.set_per_process_memory_fraction(memory_fraction)
# Create ouput prediction tensor [Tsteps, C, H, W]
shape = self.init_state.shape
outputs = torch.zeros(shape[0] + tsteps, shape[1], shape[2], shape[3])
outputs[0] = (self.init_state - self.mu) / self.std
# Eco mode on/off loads model every query
if self.eco or not next(self.model.parameters()).is_cuda:
self.model = self.model.to(self.device)
# Loop through time-steps
for t in range(tsteps):
# Get input time-step
invar = outputs[t : t + 1].to(self.device)
# Predict
outvar = self.model.forward(invar)
# Store
outputs[t + 1] = outvar[0].detach().cpu()
# Update progress bar if present
if self.progress_bar:
self.progress_bar.inference_step(float(t + 1) / tsteps)
# Eco mode on/off loads model every query
if self.eco:
logger.info("Eco inference on, moving model off GPU")
self.model.cpu()
outputs = self.std * outputs + self.mu
outputs = outputs.numpy()
# Clean up
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
return outputs
def get_array_from_tar(self, tar_file_path: str, np_file_path: str):
"""Loads a numpy array from tar ball, will load entire numpy array into memory
Based on workaround: https://github.com/numpy/numpy/issues/7989
Parameters
----------
tar_file_path : str
File path to tar ball
np_file_path : str
Local path of numpy file inside of tar file
Returns
-------
np.array
Extracted numpy array
"""
tar_file_path = Path(tar_file_path)
assert tar_file_path.is_file(), f"Invalid tar file path {tar_file_path}"
# Open tarball
with tarfile.open(tar_file_path, "r:gz") as tar:
logging.info(f"Loaded tar.gz with files:")
tar.list()
array_file = BytesIO()
array_file.write(tar.extractfile(np_file_path).read())
array_file.seek(0)
return np.load(array_file)
def save_results(self, name, results_dir, writer, save_filetypes, step):
logger.warn(
"OVFourCastNetInferencer is not designed to be used inside of Modulus solver"
)
pass
@property
def eco(self):
return self._eco
@eco.setter
def eco(self, e: bool):
self._eco = e
if e == False:
self.model.to(self.device)
else:
self.model.cpu()
| modulus-sym-main | modulus/sym/domain/inferencer/ov.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
class Inferencer:
"""
Inferencer base class
"""
def forward_grad(self, invar):
pred_outvar = self.model(invar)
return pred_outvar
def forward_nograd(self, invar):
with torch.no_grad():
pred_outvar = self.model(invar)
return pred_outvar
def save_results(self, name, results_dir, writer, save_filetypes, step):
raise NotImplementedError("Subclass of Inferencer needs to implement this")
| modulus-sym-main | modulus/sym/domain/inferencer/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .inferencer import Inferencer
from .pointwise import PointwiseInferencer
from .vtkpointwise import PointVTKInferencer
from .voxel import VoxelInferencer
from .ov import OVVoxelInferencer, OVFourCastNetInferencer
| modulus-sym-main | modulus/sym/domain/inferencer/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.