python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Union, Callable
from pathlib import Path
import inspect
import torch
import numpy as np
from modulus.sym.domain.inferencer import PointVTKInferencer
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.io import InferencerPlotter
from modulus.sym.utils.io.vtk import var_to_polyvtk, VTKBase, VTKUniformGrid
from modulus.sym.dataset import DictInferencePointwiseDataset
class VoxelInferencer(PointVTKInferencer):
"""
Inferencer for creating volex representations.
This inferencer works bu creating a uniform mesh of voxels and masking out the ones defined by a callable function.
The result is a voxel based representation of any complex geometery at any resolution.
Parameters
----------
bounds : List[List[int]]
List of domain bounds to form uniform rectangular domain
npoints : List[int]
Resolution of voxels in each domain
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
output_names : List[str]
List of desired outputs.
export_map : Dict[str, List[str]], optional
Export map dictionary with keys that are VTK variables names and values that are lists of output variables. Will use 1:1 mapping if none is provided, by default None
invar : Dict[str, np.array], optional
Dictionary of additional numpy arrays as input, by default {}
mask_fn : Union[Callable, None], optional
Masking function to remove points from inferencing, by default None
mask_value : float, optional
Value to assign masked points, by default Nan
plotter : Plotter, optional
Modulus `Plotter` for showing results in tensorboard., by default None
requires_grad : bool, optional
If automatic differentiation is needed for computing results., by default True
log_iter : bool, optional
Save results to different file each call, by default False
"""
def __init__(
self,
bounds: List[List[int]],
npoints: List[int],
nodes: List[Node],
output_names: List[str],
export_map: Union[None, Dict[str, List[str]]] = None,
invar: Dict[str, np.array] = {}, # Additional inputs
batch_size: int = 1024,
mask_fn: Union[Callable, None] = None,
mask_value: float = np.nan,
plotter=None,
requires_grad: bool = False,
log_iter: bool = False,
model=None,
):
# No export map means one to one with outvars
self.npoints = npoints
if export_map is None:
export_map = {name: name for name in output_names}
coords = ["x", "y", "z"]
input_vtk_map = {coords[i]: coords[i] for i in range(len(bounds))}
# Create uniform grid dataset
vtk_obj = VTKUniformGrid(
bounds=bounds,
npoints=npoints,
export_map=export_map,
)
super().__init__(
vtk_obj,
nodes,
input_vtk_map=input_vtk_map,
output_names=output_names,
invar=invar, # Additional inputs
batch_size=batch_size,
mask_fn=mask_fn,
mask_value=mask_value,
plotter=plotter,
requires_grad=requires_grad,
log_iter=log_iter,
model=model,
)
def _write_results(
self, invar, predvar, name, results_dir, writer, save_filetypes, step
):
# Save batch to vtk/np files
if "np" in save_filetypes:
# Reshape into grid numpy arrays [cin, xdim, ydim, zdim]
np_vars = {}
for key, value in {**invar, **predvar}.items():
shape = self.npoints + [value.shape[1]]
np_vars[key] = np.moveaxis(np.reshape(value, (shape)), -1, 0)
np.savez(results_dir + name, np_vars)
if "vtk" in save_filetypes:
self.vtk_obj.file_dir = Path(results_dir)
self.vtk_obj.file_name = Path(name).stem
if self.log_iter:
self.vtk_obj.var_to_vtk(data_vars={**invar, **predvar}, step=step)
else:
self.vtk_obj.var_to_vtk(data_vars={**invar, **predvar})
# Add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Inferencers", name, results_dir, writer, step, invar, predvar
)
| modulus-sym-main | modulus/sym/domain/inferencer/voxel.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Union, Callable
from pathlib import Path
import inspect
import torch
import numpy as np
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.io import InferencerPlotter
from modulus.sym.utils.io.vtk import var_to_polyvtk, VTKBase, VTKUniformGrid
from modulus.sym.dataset import DictInferencePointwiseDataset
class PointVTKInferencer(PointwiseInferencer):
"""
Pointwise inferencer using mesh points of VTK object
Parameters
----------
vtk_obj : VTKBase
Modulus VTK object to use point locations from
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
input_vtk_map : Dict[str, List[str]]
Dictionary mapping from Modulus input variables to VTK variable names {"modulus.sym.name": ["vtk name"]}.
Use colons to denote components of multi-dimensional VTK arrays ("name":# )
output_names : List[str]
List of desired outputs.
invar : Dict[str, np.array], optional
Dictionary of additional numpy arrays as input, by default {}
batch_size : int
Batch size used when running inference
mask_fn : Union[Callable, None], optional
Masking function to remove points from inferencing, by default None
mask_value : float, optional
Value to assign masked points, by default Nan
plotter : Plotter, optional
Modulus `Plotter` for showing results in tensorboard., by default None
requires_grad : bool, optional
If automatic differentiation is needed for computing results., by default True
log_iter : bool, optional
Save results to different file each call, by default False
"""
def __init__(
self,
vtk_obj: VTKBase,
nodes: List[Node],
input_vtk_map: Dict[str, List[str]],
output_names: List[str],
invar: Dict[str, np.array] = {}, # Additional inputs
batch_size: int = 1024,
mask_fn: Union[Callable, None] = None,
mask_value: float = np.nan,
plotter=None,
requires_grad: bool = False,
log_iter: bool = False,
model=None,
):
# Set VTK file save dir and file name
self.vtk_obj = vtk_obj
self.vtk_obj.file_dir = "./inferencers"
self.vtk_obj.file_name = "inferencer"
# Set up input dict
invar_vtk = self.vtk_obj.get_data_from_map(input_vtk_map)
invar.update(invar_vtk)
# If mask set up mask indexes
self.mask_value = mask_value
self.mask_index = None
if mask_fn is not None:
args, _, _, _ = inspect.getargspec(mask_fn)
# Fall back np_lambdify does not supply arguement names
# Ideally np_lambdify should allow input names to be queried
if len(args) == 0:
args = list(invar.keys()) # Hope your inputs all go into the mask
mask_input = {key: invar[key] for key in args if key in invar}
mask = np.squeeze(mask_fn(**mask_input).astype(np.bool_))
# True points get masked while False get kept, flip for index
self.mask_index = np.logical_not(mask)
# Mask out to only masked points (only inference here)
for key, value in invar.items():
invar[key] = value[self.mask_index]
# set plotter
self.plotter = plotter
self.log_iter = log_iter
# initialize inferencer
super().__init__(
nodes=nodes,
invar=invar,
output_names=output_names,
batch_size=batch_size,
plotter=plotter,
requires_grad=requires_grad,
model=model,
)
def save_results(self, name, results_dir, writer, save_filetypes, step):
# Compute results
invar, predvar = self._compute_results()
# Reconstruct full array if mask was applied
if self.mask_index is not None:
invar, predvar = self._mask_results(invar, predvar)
# Write results to file
self._write_results(
invar, predvar, name, results_dir, writer, save_filetypes, step
)
def save_stream(
self, name, results_dir, writer, step, save_results, save_filetypes, to_cpu
):
if not to_cpu:
raise NotImplementedError("to_cpu=False not supported.")
# Compute results
invar, predvar = self._compute_results()
# Reconstruct full array if mask was applied
if self.mask_index is not None:
invar, predvar = self._mask_results(invar, predvar)
# Write results to file
if save_results:
self._write_results(
invar, predvar, name, results_dir, writer, save_filetypes, step
)
return {**invar, **predvar}
def _compute_results(self):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
predvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0,) in enumerate(self.dataloader):
# Move data to device
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
invar_cpu = {key: value + [invar0[key]] for key, value in invar_cpu.items()}
predvar_cpu = {
key: value + [pred_outvar[key].cpu().detach().numpy()]
for key, value in predvar_cpu.items()
}
# Concat mini-batch arrays
invar = {key: np.concatenate(value) for key, value in invar_cpu.items()}
predvar = {key: np.concatenate(value) for key, value in predvar_cpu.items()}
return invar, predvar
def _mask_results(self, invar, predvar):
# Reconstruct full array if mask was applied
for key, value in invar.items():
full_array = np.full(
(self.mask_index.shape[0], value.shape[1]),
self.mask_value,
dtype=value.dtype,
)
full_array[self.mask_index] = value
invar[key] = full_array
for key, value in predvar.items():
full_array = np.full(
(self.mask_index.shape[0], value.shape[1]),
self.mask_value,
dtype=value.dtype,
)
full_array[self.mask_index] = value
predvar[key] = full_array
return invar, predvar
def _write_results(
self, invar, predvar, name, results_dir, writer, save_filetypes, step
):
# Save batch to vtk/np files
if "np" in save_filetypes:
np.savez(results_dir + name, {**invar, **predvar})
if "vtk" in save_filetypes:
self.vtk_obj.file_dir = Path(results_dir)
self.vtk_obj.file_name = Path(name).stem
if self.log_iter:
self.vtk_obj.var_to_vtk(data_vars={**invar, **predvar}, step=step)
else:
self.vtk_obj.var_to_vtk(data_vars={**invar, **predvar})
# Add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Inferencers", name, results_dir, writer, step, invar, predvar
)
| modulus-sym-main | modulus/sym/domain/inferencer/vtkpointwise.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Union, Callable
from pathlib import Path
import inspect
import torch
import numpy as np
from modulus.sym.domain.inferencer import Inferencer
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.io import InferencerPlotter
from modulus.sym.utils.io.vtk import var_to_polyvtk, VTKBase, VTKUniformGrid
from modulus.sym.dataset import DictInferencePointwiseDataset
class PointwiseInferencer(Inferencer):
"""
Pointwise Inferencer that allows inferencing on pointwise data
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
invar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays as input.
output_names : List[str]
List of desired outputs.
batch_size : int, optional
Batch size used when running validation, by default 1024
plotter : InferencerPlotter
Modulus plotter for showing results in tensorboard.
requires_grad : bool = False
If automatic differentiation is needed for computing results.
"""
def __init__(
self,
nodes: List[Node],
invar: Dict[str, np.array],
output_names: List[str],
batch_size: int = 1024,
plotter: InferencerPlotter = None,
requires_grad: bool = False,
model=None,
):
# get dataset and dataloader
self.dataset = DictInferencePointwiseDataset(
invar=invar, output_names=output_names
)
self.dataloader = Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=0,
distributed=False,
infinite=False,
)
# construct model from nodes
if model is None:
self.model = Graph(
nodes,
Key.convert_list(self.dataset.invar_keys),
Key.convert_list(self.dataset.outvar_keys),
)
else:
self.model = model
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
# set foward method
self.requires_grad = requires_grad
self.forward = self.forward_grad if requires_grad else self.forward_nograd
# set plotter
self.plotter = plotter
def eval_epoch(self):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
predvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0,) in enumerate(self.dataloader):
# Move data to device
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
invar_cpu = {key: value + [invar0[key]] for key, value in invar_cpu.items()}
predvar_cpu = {
key: value + [pred_outvar[key].cpu().detach().numpy()]
for key, value in predvar_cpu.items()
}
# Concat mini-batch arrays
invar = {key: np.concatenate(value) for key, value in invar_cpu.items()}
predvar = {key: np.concatenate(value) for key, value in predvar_cpu.items()}
return invar, predvar
def save_results(self, name, results_dir, writer, save_filetypes, step):
# evaluate on entire dataset
invar, predvar = self.eval_epoch()
# save batch to vtk/np files TODO clean this up after graph unroll stuff
if "np" in save_filetypes:
np.savez(results_dir + name, {**invar, **predvar})
if "vtk" in save_filetypes:
var_to_polyvtk({**invar, **predvar}, results_dir + name)
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
"Inferencers", name, results_dir, writer, step, invar, predvar
)
| modulus-sym-main | modulus/sym/domain/inferencer/pointwise.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, List
import torch
import logging
from torch.utils.data import DataLoader, BatchSampler, SequentialSampler, RandomSampler
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from typing import Union, List
from modulus.sym.node import Node
from modulus.sym.constants import tf_dt
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.dataset import Dataset, IterableDataset
from modulus.sym.loss import Loss
from modulus.sym.graph import Graph
from modulus.sym.key import Key
logger = logging.getLogger(__name__)
Tensor = torch.Tensor
class Constraint:
"""Base class for constraints"""
def __init__(
self,
nodes: List[Node],
dataset: Union[Dataset, IterableDataset],
loss: Loss,
batch_size: int,
shuffle: bool,
drop_last: bool,
num_workers: int,
):
# Get DDP manager
self.manager = DistributedManager()
self.device = self.manager.device
if not drop_last and self.manager.cuda_graphs:
logger.info("drop_last must be true when using cuda graphs")
drop_last = True
# get dataset and dataloader
self.dataset = dataset
self.dataloader = iter(
Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
)
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(self.dataset.invar_keys),
Key.convert_list(self.dataset.outvar_keys),
)
self.model.to(self.device)
if self.manager.distributed:
# https://pytorch.org/docs/master/notes/cuda.html#id5
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
self.model = DistributedDataParallel(
self.model,
device_ids=[self.manager.local_rank],
output_device=self.device,
broadcast_buffers=self.manager.broadcast_buffers,
find_unused_parameters=self.manager.find_unused_parameters,
process_group=self.manager.group(
"data_parallel"
), # None by default
)
torch.cuda.current_stream().wait_stream(s)
self._input_names = Key.convert_list(dataset.invar_keys)
self._output_names = Key.convert_list(dataset.outvar_keys)
self._input_vars = None
self._target_vars = None
self._lambda_weighting = None
# put loss on device
self._loss = loss.to(self.device)
@property
def input_names(self) -> List[Key]:
return self._input_names
@property
def output_names(self) -> List[Key]:
return self._output_names
def load_data(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def load_data_static(self):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def loss(self, step: int):
raise NotImplementedError("Subclass of Constraint needs to implement this")
def save_batch(self, filename: str):
raise NotImplementedError("Subclass of Constraint needs to implement this")
@staticmethod
def _set_device(tensor_dict, device=None, requires_grad=False):
# convert np to torch if needed
tensor_dict = {
key: torch.as_tensor(value, dtype=tf_dt, device=device)
for key, value in tensor_dict.items()
}
# set requires_grad if needed
if requires_grad:
tensor_dict = {
key: value.requires_grad_(requires_grad)
for key, value in tensor_dict.items()
}
return tensor_dict
@staticmethod
def get_dataloader(
dataset: Union[Dataset, IterableDataset],
batch_size: int,
shuffle: bool,
drop_last: bool,
num_workers: int,
distributed: bool = None,
infinite: bool = True,
):
"Return an appropriate dataloader given a dataset"
assert isinstance(dataset, Dataset) or isinstance(
dataset, IterableDataset
), "error, dataset must be a subclass of Dataset or IterableDataset"
manager = DistributedManager()
# use persistent workers
# this is important for small datasets - torch would otherwise spend a lot of CPU overhead spawning workers each epoch
persistent_workers = True if num_workers > 0 else False
# map-style
if isinstance(dataset, Dataset):
assert batch_size is not None, "error, batch_size must be specified"
assert shuffle is not None, "error, shuffle must be specified"
assert drop_last is not None, "error, drop_last must be specified"
# if distributed, use distributed sampler
if distributed is not False and manager.distributed:
sampler = DistributedSampler(
dataset,
num_replicas=manager.group_size("data_parallel"),
rank=manager.group_rank("data_parallel"),
shuffle=shuffle,
drop_last=drop_last,
)
# otherwise use standard sampler
else:
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
# get batch sampler
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
# if the dataset does auto collation, turn off automatic batching in dataloader
# this passes batched indices directly to dataset
# i.e. the dataloader yields default_convert(dataset[idx])
# see https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/fetch.py
# note: may need to use torch.set_num_threads if array indexing tensors in dataset to avoid excessive threading
if dataset.auto_collation:
dataloader = DataLoader(
dataset,
batch_size=None,
sampler=batch_sampler,
pin_memory=True,
num_workers=num_workers,
worker_init_fn=dataset.worker_init_fn,
persistent_workers=persistent_workers,
)
# otherwise turn on automatic batching in dataloader
# this passes single indices to the dataset
# i.e. the dataloader yields default_collate([dataset[i] for i in idx])
else:
dataloader = DataLoader(
dataset,
batch_sampler=batch_sampler,
pin_memory=True,
num_workers=num_workers,
worker_init_fn=dataset.worker_init_fn,
persistent_workers=persistent_workers,
)
# iterable-style
elif isinstance(dataset, IterableDataset):
# for iterable datasets, must do batching/sampling within dataset
dataloader = DataLoader(
dataset,
batch_size=None,
pin_memory=True,
num_workers=num_workers,
worker_init_fn=dataset.worker_init_fn,
persistent_workers=persistent_workers,
)
# make dataloader infinite
if infinite:
dataloader = InfiniteDataLoader(dataloader)
# initialise dataset if on main thread
if num_workers == 0:
dataset.worker_init_fn(0)
return dataloader
class InfiniteDataLoader:
"An infinite dataloader, for use with map-style datasets to avoid StopIteration after each epoch"
def __init__(self, dataloader):
self.dataloader = dataloader
self.epoch = 0
def __iter__(self):
while True:
dataloader = iter(self.dataloader)
for batch in dataloader:
yield batch
self.epoch += 1
| modulus-sym-main | modulus/sym/domain/constraint/constraint.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Continuous type constraints
"""
import logging
from typing import Dict, List, Union
import torch
from torch.nn.parallel import DistributedDataParallel
import numpy as np
from modulus.sym.domain.constraint import Constraint
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.loss import Loss, PointwiseLossNorm
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.io.vtk import grid_to_vtk
from modulus.sym.dataset import Dataset, IterableDataset, DictGridDataset
logger = logging.getLogger(__name__)
class SupervisedGridConstraint(Constraint):
"""Data-driven grid field constraint
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
dataset: Union[Dataset, IterableDataset]
dataset which supplies invar and outvar examples
Must be a subclass of Dataset or IterableDataset
loss : Loss, optional
Modulus `Loss` function, by default PointwiseLossNorm()
batch_size : int, optional
Batch size used when running constraint, must be specified if Dataset used
Not used if IterableDataset used
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
Not used if IterableDataset used
drop_last : bool, optional
Drop last mini-batch if dataset not fully divisible but batch_size, by default False
Not used if IterableDataset used
num_workers : int, optional
Number of dataloader workers, by default 0
"""
def __init__(
self,
nodes: List[Node],
dataset: Union[Dataset, IterableDataset],
loss: Loss = PointwiseLossNorm(),
batch_size: int = None,
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
):
super().__init__(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
def save_batch(self, filename):
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar0 = {key: value for key, value in invar.items()}
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
true_outvar = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(lambda_weighting, device=self.device)
# If using DDP, strip out collective stuff to prevent deadlocks
# This only works either when one process alone calls in to save_batch
# or when multiple processes independently save data
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
# compute pred outvar
pred_outvar = modl(invar)
# rename values and save batch to vtk file TODO clean this up after graph unroll stuff
named_true_outvar = {"true_" + key: value for key, value in true_outvar.items()}
named_pred_outvar = {"pred_" + key: value for key, value in pred_outvar.items()}
save_var = {
**{key: value for key, value in invar0.items()},
**named_true_outvar,
**named_pred_outvar,
}
save_var = {
key: value.cpu().detach().numpy() for key, value in save_var.items()
}
model_parallel_rank = (
self.manager.group_rank("model_parallel") if self.manager.distributed else 0
)
# Using - as delimiter here since vtk ignores anything after .
grid_to_vtk(save_var, filename + f"-{model_parallel_rank}")
def load_data(self):
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
self._input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
self._target_vars = Constraint._set_device(true_outvar, device=self.device)
self._lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
def load_data_static(self):
if self._input_vars is None:
# Default loading if vars not allocated
self.load_data()
else:
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
# Set grads to false here for inputs, static var has allocation already
input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
target_vars = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
for key in input_vars.keys():
self._input_vars[key].data.copy_(input_vars[key])
for key in target_vars.keys():
self._target_vars[key].copy_(target_vars[key])
for key in lambda_weighting.keys():
self._lambda_weighting[key].copy_(lambda_weighting[key])
def forward(self):
# compute pred outvar
self._output_vars = self.model(self._input_vars)
def loss(self, step: int) -> Dict[str, torch.Tensor]:
if self._output_vars is None:
logger.warn("Calling loss without forward call")
return {}
losses = self._loss(
self._input_vars,
self._output_vars,
self._target_vars,
self._lambda_weighting,
step,
)
return losses
class _DeepONetConstraint(Constraint):
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
outvar: Dict[str, np.array],
batch_size: int,
lambda_weighting: Dict[str, np.array],
loss: Loss,
shuffle: bool,
drop_last: bool,
num_workers: int,
):
# TODO: add support for other datasets (like SupervisedGridConstraint)
# get dataset and dataloader
self.dataset = DictGridDataset(
invar=invar_branch, outvar=outvar, lambda_weighting=lambda_weighting
)
# Get DDP manager
self.manager = DistributedManager()
self.device = self.manager.device
if not drop_last and self.manager.cuda_graphs:
logger.info("drop_last must be true when using cuda graphs")
drop_last = True
self.dataloader = iter(
Constraint.get_dataloader(
dataset=self.dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
)
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(invar_branch.keys())
+ Key.convert_list(invar_trunk.keys()),
Key.convert_list(outvar.keys()),
)
self.model.to(self.device)
if self.manager.distributed:
# https://pytorch.org/docs/master/notes/cuda.html#id5
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
self.model = DistributedDataParallel(
self.model,
device_ids=[self.manager.local_rank],
output_device=self.device,
broadcast_buffers=self.manager.broadcast_buffers,
find_unused_parameters=self.manager.find_unused_parameters,
process_group=self.manager.group(
"data_parallel"
), # None by default
)
torch.cuda.current_stream().wait_stream(s)
self._input_names = Key.convert_list(self.dataset.invar_keys)
self._output_names = Key.convert_list(self.dataset.outvar_keys)
self._input_vars_branch = None
self._target_vars = None
self._lambda_weighting = None
# put loss on device
self._loss = loss.to(self.device)
def save_batch(self, filename):
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar0 = {key: value for key, value in invar.items()}
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
true_outvar = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(lambda_weighting, device=self.device)
# If using DDP, strip out collective stuff to prevent deadlocks
# This only works either when one process alone calls in to save_batch
# or when multiple processes independently save data
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
# compute pred outvar
pred_outvar = modl({**invar, **self._input_vars_trunk})
# rename values and save batch to vtk file TODO clean this up after graph unroll stuff
named_lambda_weighting = {
"lambda_" + key: value for key, value in lambda_weighting.items()
}
named_true_outvar = {"true_" + key: value for key, value in true_outvar.items()}
named_pred_outvar = {"pred_" + key: value for key, value in pred_outvar.items()}
save_var = {
**{key: value for key, value in invar0.items()},
**named_true_outvar,
**named_pred_outvar,
**named_lambda_weighting,
}
save_var = {
key: value.cpu().detach().numpy() for key, value in save_var.items()
}
model_parallel_rank = (
self.manager.group_rank("model_parallel") if self.manager.distributed else 0
)
np.savez_compressed(filename + f".{model_parallel_rank}.npz", **save_var)
def load_data(self):
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
self._input_vars_branch = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
self._target_vars = Constraint._set_device(true_outvar, device=self.device)
self._lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
def load_data_static(self):
if self._input_vars_branch is None:
# Default loading if vars not allocated
self.load_data()
else:
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
# Set grads to false here for inputs, static var has allocation already
input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
target_vars = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
for key in input_vars.keys():
self._input_vars_branch[key].data.copy_(input_vars[key])
for key in target_vars.keys():
self._target_vars[key].copy_(target_vars[key])
for key in lambda_weighting.keys():
self._lambda_weighting[key].copy_(lambda_weighting[key])
def forward(self):
# compute pred outvar
self._output_vars = self.model(
{**self._input_vars_branch, **self._input_vars_trunk}
)
class DeepONetConstraint_Data(_DeepONetConstraint):
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
outvar: Dict[str, np.array],
batch_size: int,
lambda_weighting: Dict[str, np.array] = None,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
):
super().__init__(
nodes=nodes,
invar_branch=invar_branch,
invar_trunk=invar_trunk,
outvar=outvar,
batch_size=batch_size,
lambda_weighting=lambda_weighting,
loss=loss,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
self._input_vars_trunk = Constraint._set_device(
invar_trunk, device=self.device, requires_grad=True
)
def loss(self, step: int):
# compute loss
losses = self._loss(
self._input_vars_trunk,
self._output_vars,
self._target_vars,
self._lambda_weighting,
step,
)
return losses
class DeepONetConstraint_Physics(_DeepONetConstraint):
def __init__(
self,
nodes: List[Node],
invar_branch: Dict[str, np.array],
invar_trunk: Dict[str, np.array],
outvar: Dict[str, np.array],
batch_size: int,
lambda_weighting: Dict[str, np.array] = None,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
tile_trunk_input: bool = True,
):
super().__init__(
nodes=nodes,
invar_branch=invar_branch,
invar_trunk=invar_trunk,
outvar=outvar,
batch_size=batch_size,
lambda_weighting=lambda_weighting,
loss=loss,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
if tile_trunk_input:
for k, v in invar_trunk.items():
invar_trunk[k] = np.tile(v, (batch_size, 1))
self._input_vars_trunk = Constraint._set_device(
invar_trunk, device=self.device, requires_grad=True
)
def loss(self, step: int):
target_vars = {
k: torch.reshape(v, (-1, 1)) for k, v in self._target_vars.items()
}
lambda_weighting = {
k: torch.reshape(v, (-1, 1)) for k, v in self._lambda_weighting.items()
}
# compute loss
losses = self._loss(
self._input_vars_trunk,
self._output_vars,
target_vars,
lambda_weighting,
step,
)
return losses
| modulus-sym-main | modulus/sym/domain/constraint/discrete.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .constraint import Constraint
from .continuous import (
PointwiseConstraint,
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
VariationalConstraint,
VariationalDomainConstraint,
)
from .discrete import (
SupervisedGridConstraint,
DeepONetConstraint_Data,
DeepONetConstraint_Physics,
)
| modulus-sym-main | modulus/sym/domain/constraint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.sym.utils.sympy import np_lambdify
def _compute_outvar(invar, outvar_sympy):
outvar = {}
for key in outvar_sympy.keys():
outvar[key] = np_lambdify(outvar_sympy[key], {**invar})(**invar)
return outvar
def _compute_lambda_weighting(invar, outvar, lambda_weighting_sympy):
lambda_weighting = {}
if lambda_weighting_sympy is None:
for key in outvar.keys():
lambda_weighting[key] = np.ones_like(next(iter(invar.values())))
else:
for key in outvar.keys():
lambda_weighting[key] = np_lambdify(
lambda_weighting_sympy[key], {**invar, **outvar}
)(**invar, **outvar)
return lambda_weighting
| modulus-sym-main | modulus/sym/domain/constraint/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Continuous type constraints
"""
import torch
from torch.nn.parallel import DistributedDataParallel
import numpy as np
from typing import Dict, List, Union, Tuple, Callable
import sympy as sp
import logging
import torch
from .constraint import Constraint
from .utils import _compute_outvar, _compute_lambda_weighting
from modulus.sym.utils.io.vtk import var_to_polyvtk
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.loss import Loss, PointwiseLossNorm, IntegralLossNorm
from modulus.sym.distributed import DistributedManager
from modulus.sym.utils.sympy import np_lambdify
from modulus.sym.geometry import Geometry
from modulus.sym.geometry.helper import _sympy_criteria_to_criteria
from modulus.sym.geometry.parameterization import Parameterization, Bounds
from modulus.sym.dataset import (
DictPointwiseDataset,
ListIntegralDataset,
ContinuousPointwiseIterableDataset,
ContinuousIntegralIterableDataset,
DictImportanceSampledPointwiseIterableDataset,
DictVariationalDataset,
)
Tensor = torch.Tensor
logger = logging.getLogger(__name__)
class PointwiseConstraint(Constraint):
"""
Base class for all Pointwise Constraints
"""
def save_batch(self, filename):
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
true_outvar = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(lambda_weighting, device=self.device)
# If using DDP, strip out collective stuff to prevent deadlocks
# This only works either when one process alone calls in to save_batch
# or when multiple processes independently save data
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
# compute pred outvar
pred_outvar = modl(invar)
# rename values and save batch to vtk file TODO clean this up after graph unroll stuff
named_lambda_weighting = {
"lambda_" + key: value for key, value in lambda_weighting.items()
}
named_true_outvar = {"true_" + key: value for key, value in true_outvar.items()}
named_pred_outvar = {"pred_" + key: value for key, value in pred_outvar.items()}
save_var = {
**{key: value for key, value in invar.items()},
**named_true_outvar,
**named_pred_outvar,
**named_lambda_weighting,
}
save_var = {
key: value.cpu().detach().numpy() for key, value in save_var.items()
}
var_to_polyvtk(save_var, filename)
def load_data(self):
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
self._input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
self._target_vars = Constraint._set_device(true_outvar, device=self.device)
self._lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
def load_data_static(self):
if self._input_vars is None:
# Default loading if vars not allocated
self.load_data()
else:
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
# Set grads to false here for inputs, static var has allocation already
input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
target_vars = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
for key in input_vars.keys():
self._input_vars[key].data.copy_(input_vars[key])
for key in target_vars.keys():
self._target_vars[key].copy_(target_vars[key])
for key in lambda_weighting.keys():
self._lambda_weighting[key].copy_(lambda_weighting[key])
def forward(self):
# compute pred outvar
self._output_vars = self.model(self._input_vars)
def loss(self, step: int) -> Dict[str, torch.Tensor]:
if self._output_vars is None:
logger.warn("Calling loss without forward call")
return {}
losses = self._loss(
self._input_vars,
self._output_vars,
self._target_vars,
self._lambda_weighting,
step,
)
return losses
@classmethod
def from_numpy(
cls,
nodes: List[Node],
invar: Dict[str, np.ndarray],
outvar: Dict[str, np.ndarray],
batch_size: int,
lambda_weighting: Dict[str, np.ndarray] = None,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
):
"""
Create custom pointwise constraint from numpy arrays.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
invar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays as input.
outvar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays to enforce constraint on.
batch_size : int
Batch size used in training.
lambda_weighting : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays to pointwise weight losses.
Default is ones.
loss : Loss
Modulus `Loss` module that defines the loss type, (e.g. L2, L1, ...).
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
drop_last : bool, optional
Drop last mini-batch if dataset not fully divisible but batch_size, by default False
num_workers : int
Number of worker used in fetching data.
"""
if "area" not in invar:
invar["area"] = np.ones_like(next(iter(invar.values())))
# TODO: better area definition?
# no need to lambdify: outvar / lambda_weighting already contain np arrays
# make point dataset
dataset = DictPointwiseDataset(
invar=invar,
outvar=outvar,
lambda_weighting=lambda_weighting,
)
return cls(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
class PointwiseBoundaryConstraint(PointwiseConstraint):
"""
Pointwise Constraint applied to boundary/perimeter/surface of geometry.
For example, in 3D this will create a constraint on the surface of the
given geometry.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
geometry : Geometry
Modulus `Geometry` to apply the constraint with.
outvar : Dict[str, Union[int, float, sp.Basic]]
A dictionary of SymPy Symbols/Expr, floats or int.
This is used to describe the constraint. For example,
`outvar={'u': 0}` would specify `'u'` to be zero everywhere
on the constraint.
batch_size : int
Batch size used in training.
criteria : Union[sp.Basic, True]
SymPy criteria function specifies to only apply constraint to areas
that satisfy this criteria. For example, if
`criteria=sympy.Symbol('x')>0` then only areas that have positive
`'x'` values will have the constraint applied to them.
lambda_weighting : Dict[str, Union[int, float, sp.Basic]] = None
The spatial pointwise weighting of the constraint. For example,
`lambda_weighting={'lambda_u': 2.0*sympy.Symbol('x')}` would
apply a pointwise weighting to the loss of `2.0 * x`.
parameterization : Union[Parameterization, None], optional
This allows adding parameterization or additional inputs.
fixed_dataset : bool = True
If True then the points sampled for this constraint are done right
when initialized and fixed. If false then the points are continually
resampled.
compute_sdf_derivatives: bool, optional
Compute SDF derivatives when sampling geometery
importance_measure : Union[Callable, None] = None
A callable function that computes a scalar importance measure. This
importance measure is then used in the constraint when sampling
points. Areas with higher importance are sampled more frequently
according to Monte Carlo importance sampling,
https://en.wikipedia.org/wiki/Monte_Carlo_integration.
batch_per_epoch : int = 1000
If `fixed_dataset=True` then the total number of points generated
to apply constraint on is `total_nr_points=batch_per_epoch*batch_size`.
quasirandom : bool = False
If true then sample the points using the Halton sequence.
num_workers : int
Number of worker used in fetching data.
loss : Loss
Modulus `Loss` module that defines the loss type, (e.g. L2, L1, ...).
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
"""
def __init__(
self,
nodes: List[Node],
geometry: Geometry,
outvar: Dict[str, Union[int, float, sp.Basic]],
batch_size: int,
criteria: Union[sp.Basic, Callable, None] = None,
lambda_weighting: Dict[str, Union[int, float, sp.Basic]] = None,
parameterization: Union[Parameterization, None] = None,
fixed_dataset: bool = True,
importance_measure: Union[Callable, None] = None,
batch_per_epoch: int = 1000,
quasirandom: bool = False,
num_workers: int = 0,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
):
# assert that not using importance measure with continuous dataset
assert not (
(not fixed_dataset) and (importance_measure is not None)
), "Using Importance measure with continuous dataset is not supported"
# if fixed dataset then sample points and fix for all of training
if fixed_dataset:
# sample boundary
invar = geometry.sample_boundary(
batch_size * batch_per_epoch,
criteria=criteria,
parameterization=parameterization,
quasirandom=quasirandom,
)
# compute outvar
outvar = _compute_outvar(invar, outvar)
# set lambda weighting
lambda_weighting = _compute_lambda_weighting(
invar, outvar, lambda_weighting
)
# make point dataset
if importance_measure is None:
invar["area"] *= batch_per_epoch # TODO find better way to do this
dataset = DictPointwiseDataset(
invar=invar,
outvar=outvar,
lambda_weighting=lambda_weighting,
)
else:
dataset = DictImportanceSampledPointwiseIterableDataset(
invar=invar,
outvar=outvar,
batch_size=batch_size,
importance_measure=importance_measure,
lambda_weighting=lambda_weighting,
shuffle=shuffle,
)
# else sample points every batch
else:
# invar function
invar_fn = lambda: geometry.sample_boundary(
batch_size,
criteria=criteria,
parameterization=parameterization,
quasirandom=quasirandom,
)
# outvar function
outvar_fn = lambda invar: _compute_outvar(invar, outvar)
# lambda weighting function
lambda_weighting_fn = lambda invar, outvar: _compute_lambda_weighting(
invar, outvar, lambda_weighting
)
# make point dataloader
dataset = ContinuousPointwiseIterableDataset(
invar_fn=invar_fn,
outvar_fn=outvar_fn,
lambda_weighting_fn=lambda_weighting_fn,
)
# initialize constraint
super().__init__(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=True,
num_workers=num_workers,
)
class PointwiseInteriorConstraint(PointwiseConstraint):
"""
Pointwise Constraint applied to interior of geometry.
For example, in 3D this will create a constraint on the interior
volume of the given geometry.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
geometry : Geometry
Modulus `Geometry` to apply the constraint with.
outvar : Dict[str, Union[int, float, sp.Basic]]
A dictionary of SymPy Symbols/Expr, floats or int.
This is used to describe the constraint. For example,
`outvar={'u': 0}` would specify `'u'` to be zero everywhere
in the constraint.
batch_size : int
Batch size used in training.
bounds : Dict[sp.Basic, Tuple[float, float]] = None
Bounds of the given geometry,
(e.g. `bounds={sympy.Symbol('x'): (0, 1), sympy.Symbol('y'): (0, 1)}).
criteria : Union[sp.basic, True]
SymPy criteria function specifies to only apply constraint to areas
that satisfy this criteria. For example, if
`criteria=sympy.Symbol('x')>0` then only areas that have positive
`'x'` values will have the constraint applied to them.
lambda_weighting : Dict[str, Union[int, float, sp.Basic]] = None
The spatial pointwise weighting of the constraint. For example,
`lambda_weighting={'lambda_u': 2.0*sympy.Symbol('x')}` would
apply a pointwise weighting to the loss of `2.0 * x`.
parameterization: Union[Parameterization, None] = {}
This allows adding parameterization or additional inputs.
fixed_dataset : bool = True
If True then the points sampled for this constraint are done right
when initialized and fixed. If false then the points are continually
resampled.
importance_measure : Union[Callable, None] = None
A callable function that computes a scalar importance measure. This
importance measure is then used in the constraint when sampling
points. Areas with higher importance are sampled more frequently
according to Monte Carlo importance sampling,
https://en.wikipedia.org/wiki/Monte_Carlo_integration.
batch_per_epoch : int = 1000
If `fixed_dataset=True` then the total number of points generated
to apply constraint on is `total_nr_points=batch_per_epoch*batch_size`.
quasirandom : bool = False
If true then sample the points using the Halton sequence.
num_workers : int
Number of worker used in fetching data.
loss : Loss
Modulus `Loss` module that defines the loss type, (e.g. L2, L1, ...).
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
"""
def __init__(
self,
nodes: List[Node],
geometry: Geometry,
outvar: Dict[str, Union[int, float, sp.Basic]],
batch_size: int,
bounds: Dict[sp.Basic, Tuple[float, float]] = None,
criteria: Union[sp.Basic, Callable, None] = None,
lambda_weighting: Dict[str, Union[int, float, sp.Basic]] = None,
parameterization: Union[Parameterization, None] = None,
fixed_dataset: bool = True,
compute_sdf_derivatives: bool = False,
importance_measure: Union[Callable, None] = None,
batch_per_epoch: int = 1000,
quasirandom: bool = False,
num_workers: int = 0,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
):
# assert that not using importance measure with continuous dataset
assert not (
(not fixed_dataset) and (importance_measure is not None)
), "Using Importance measure with continuous dataset is not supported"
# if fixed dataset then sample points and fix for all of training
if fixed_dataset:
# sample interior
invar = geometry.sample_interior(
batch_size * batch_per_epoch,
bounds=bounds,
criteria=criteria,
parameterization=parameterization,
quasirandom=quasirandom,
compute_sdf_derivatives=compute_sdf_derivatives,
)
# compute outvar
outvar = _compute_outvar(invar, outvar)
# set lambda weighting
lambda_weighting = _compute_lambda_weighting(
invar, outvar, lambda_weighting
)
# make point dataset
if importance_measure is None:
invar["area"] *= batch_per_epoch # TODO find better way to do this
dataset = DictPointwiseDataset(
invar=invar,
outvar=outvar,
lambda_weighting=lambda_weighting,
)
else:
dataset = DictImportanceSampledPointwiseIterableDataset(
invar=invar,
outvar=outvar,
batch_size=batch_size,
importance_measure=importance_measure,
lambda_weighting=lambda_weighting,
shuffle=shuffle,
)
# else sample points every batch
else:
# invar function
invar_fn = lambda: geometry.sample_interior(
batch_size,
bounds=bounds,
criteria=criteria,
parameterization=parameterization,
quasirandom=quasirandom,
compute_sdf_derivatives=compute_sdf_derivatives,
)
# outvar function
outvar_fn = lambda invar: _compute_outvar(invar, outvar)
# lambda weighting function
lambda_weighting_fn = lambda invar, outvar: _compute_lambda_weighting(
invar, outvar, lambda_weighting
)
# make point dataloader
dataset = ContinuousPointwiseIterableDataset(
invar_fn=invar_fn,
outvar_fn=outvar_fn,
lambda_weighting_fn=lambda_weighting_fn,
)
# initialize constraint
super().__init__(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=True,
num_workers=num_workers,
)
class IntegralConstraint(Constraint):
"""
Base class for all Integral Constraints
"""
def save_batch(self, filename):
pass
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
# rename values and save batch to vtk file TODO clean this up after graph unroll stuff
for i in range(self.batch_size):
save_var = {
key: value[i].cpu().detach().numpy() for key, value in invar.items()
}
var_to_polyvtk(save_var, filename + "_batch_" + str(i))
def load_data(self):
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
self._input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
self._target_vars = Constraint._set_device(true_outvar, device=self.device)
self._lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
def load_data_static(self):
if self._input_vars is None:
# Default loading if vars not allocated
self.load_data()
else:
# get train points from dataloader
invar, true_outvar, lambda_weighting = next(self.dataloader)
# Set grads to false here for inputs, static var has allocation already
input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
target_vars = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(
lambda_weighting, device=self.device
)
for key in input_vars.keys():
self._input_vars[key].data.copy_(input_vars[key])
for key in target_vars.keys():
self._target_vars[key].copy_(target_vars[key])
for key in lambda_weighting.keys():
self._lambda_weighting[key].copy_(lambda_weighting[key])
@property
def output_vars(self) -> Dict[str, Tensor]:
return self._output_vars
@output_vars.setter
def output_vars(self, data: Dict[str, Tensor]):
self._output_vars = {}
for output in self.output_names:
self._output_vars[str(output)] = data[str(output)]
def forward(self):
# compute pred outvar
self._output_vars = self.model(self._input_vars)
def loss(self, step: int) -> Dict[str, torch.Tensor]:
if self._output_vars is None:
logger.warn("Calling loss without forward call")
return {}
# split for individual integration
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting = (
[],
[],
[],
[],
)
for i in range(self.batch_size):
list_invar.append(
{key: value[i] for key, value in self._input_vars.items()}
)
list_pred_outvar.append(
{key: value[i] for key, value in self._output_vars.items()}
)
list_true_outvar.append(
{key: value[i] for key, value in self._target_vars.items()}
)
list_lambda_weighting.append(
{key: value[i] for key, value in self._lambda_weighting.items()}
)
# compute integral losses
losses = self._loss(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting, step
)
return losses
class IntegralBoundaryConstraint(IntegralConstraint):
"""
Integral Constraint applied to boundary/perimeter/surface of geometry.
For example, in 3D this will create a constraint on the surface of the
given geometry.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
geometry : Geometry
Modulus `Geometry` to apply the constraint with.
outvar : Dict[str, Union[int, float, sp.Basic]]
A dictionary of SymPy Symbols/Expr, floats or int.
This is used to describe the constraint. For example,
`outvar={'u': 0}` would specify the integral of `'u'`
to be zero.
batch_size : int
Number of integrals to apply.
integral_batch_size : int
Batch sized used in the Monte Carlo integration to compute
the integral.
criteria : Union[sp.basic, True]
SymPy criteria function specifies to only integrate areas
that satisfy this criteria. For example, if
`criteria=sympy.Symbol('x')>0` then only areas that have positive
`'x'` values will be integrated.
lambda_weighting : Dict[str, Union[int, float, sp.Basic]] = None
The weighting of the constraint. For example,
`lambda_weighting={'lambda_u': 2.0}` would
weight the integral constraint by `2.0`.
parameterization : Union[Parameterization, None]
This allows adding parameterization or additional inputs.
fixed_dataset : bool = True
If True then the points sampled for this constraint are done right
when initialized and fixed. If false then the points are continually
resampled.
batch_per_epoch : int = 100
If `fixed_dataset=True` then the total number of integrals generated
to apply constraint on is `total_nr_integrals=batch_per_epoch*batch_size`.
quasirandom : bool = False
If true then sample the points using the Halton sequence.
num_workers : int
Number of worker used in fetching data.
loss : Loss
Modulus `Loss` module that defines the loss type, (e.g. L2, L1, ...).
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
"""
def __init__(
self,
nodes: List[Node],
geometry: Geometry,
outvar: Dict[str, Union[int, float, sp.Basic]],
batch_size: int,
integral_batch_size: int,
criteria: Union[sp.Basic, Callable, None] = None,
lambda_weighting: Dict[str, Union[int, float, sp.Basic]] = None,
parameterization: Union[Parameterization, None] = None,
fixed_dataset: bool = True,
batch_per_epoch: int = 100,
quasirandom: bool = False,
num_workers: int = 0,
loss: Loss = IntegralLossNorm(),
shuffle: bool = True,
):
# convert dict to parameterization if needed
if parameterization is None:
parameterization = geometry.parameterization
elif isinstance(parameterization, dict):
parameterization = Parameterization(parameterization)
# Fixed number of integral examples
if fixed_dataset:
# sample geometry to generate integral batchs
list_invar = []
list_outvar = []
list_lambda_weighting = []
for i in range(batch_size * batch_per_epoch):
# sample parameter ranges
if parameterization:
specific_param_ranges = parameterization.sample(1)
else:
specific_param_ranges = {}
# sample boundary
invar = geometry.sample_boundary(
integral_batch_size,
criteria=criteria,
parameterization=Parameterization(
{
sp.Symbol(key): float(value)
for key, value in specific_param_ranges.items()
}
),
quasirandom=quasirandom,
)
# compute outvar
if (
not specific_param_ranges
): # TODO this can be removed after a np_lambdify rewrite
specific_param_ranges = {"_": next(iter(invar.values()))[0:1]}
outvar_star = _compute_outvar(specific_param_ranges, outvar)
# set lambda weighting
lambda_weighting_star = _compute_lambda_weighting(
specific_param_ranges, outvar, lambda_weighting
)
# store samples
list_invar.append(invar)
list_outvar.append(outvar_star)
list_lambda_weighting.append(lambda_weighting_star)
# make dataset of integral planes
dataset = ListIntegralDataset(
list_invar=list_invar,
list_outvar=list_outvar,
list_lambda_weighting=list_lambda_weighting,
)
# Continuous sampling
else:
# sample parameter ranges
if parameterization:
param_ranges_fn = lambda: parameterization.sample(1)
else:
param_ranges_fn = lambda: {}
# invar function
invar_fn = lambda param_range: geometry.sample_boundary(
integral_batch_size,
criteria=criteria,
parameterization=Parameterization(
{sp.Symbol(key): float(value) for key, value in param_range.items()}
),
quasirandom=quasirandom,
)
# outvar function
outvar_fn = lambda param_range: _compute_outvar(param_range, outvar)
# lambda weighting function
lambda_weighting_fn = lambda param_range, outvar: _compute_lambda_weighting(
param_range, outvar, lambda_weighting
)
# make dataset of integral planes
dataset = ContinuousIntegralIterableDataset(
invar_fn=invar_fn,
outvar_fn=outvar_fn,
batch_size=batch_size,
lambda_weighting_fn=lambda_weighting_fn,
param_ranges_fn=param_ranges_fn,
)
self.batch_size = batch_size
# initialize constraint
super().__init__(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=True,
num_workers=num_workers,
)
class VariationalConstraint(Constraint):
"""
Base class for all Variational Constraints.
B(u, v, g, dom) = \\int_{dom} (F(u, v) - g*v) dx = 0,
where F is an operator, g is a given function/data,
v is the test function.
loss of variational = B1(u1, v1, g1, dom1) + B2(u2, v2, g2, dom2) + ...
"""
def __init__(
self,
nodes: List[Node],
datasets: Dict[str, DictVariationalDataset],
batch_sizes: Dict[str, int],
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
):
# Get DDP manager
self.manager = DistributedManager()
self.device = self.manager.device
if not drop_last and self.manager.cuda_graphs:
logger.info("drop_last must be true when using cuda graphs")
drop_last = True
# make dataloader from dataset
self.data_loaders = {}
invar_keys = []
outvar_keys = []
for name in datasets:
self.data_loaders[name] = iter(
Constraint.get_dataloader(
dataset=datasets[name],
batch_size=batch_sizes[name],
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
)
invar_keys = invar_keys + datasets[name].invar_keys
outvar_keys = outvar_keys + datasets[name].outvar_keys
# construct model from nodes
self.model = Graph(
nodes,
Key.convert_list(list(set(invar_keys))),
Key.convert_list(list(set(outvar_keys))),
)
self.manager = DistributedManager()
self.device = self.manager.device
self.model.to(self.device)
if self.manager.distributed:
# https://pytorch.org/docs/master/notes/cuda.html#id5
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
self.model = DistributedDataParallel(
self.model,
device_ids=[self.manager.local_rank],
output_device=self.device,
broadcast_buffers=self.manager.broadcast_buffers,
find_unused_parameters=self.manager.find_unused_parameters,
process_group=self.manager.group(
"data_parallel"
), # None by default
)
torch.cuda.current_stream().wait_stream(s)
self._input_names = Key.convert_list(list(set(invar_keys)))
self._output_names = Key.convert_list(list(set(outvar_keys)))
self._input_vars = None
self._target_vars = None
self._lambda_weighting = None
# put loss on device
self._loss = loss.to(self.device)
def save_batch(self, filename):
# sample batch
for name, data_loader in self.data_loaders.items():
invar = Constraint._set_device(
next(data_loader), device=self.device, requires_grad=True
)
# If using DDP, strip out collective stuff to prevent deadlocks
# This only works either when one process alone calls in to save_batch
# or when multiple processes independently save data
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
# compute pred outvar
outvar = modl(invar)
named_outvar = {
"pred_" + key: value.cpu().detach().numpy()
for key, value in outvar.items()
}
save_var = {
**{key: value.cpu().detach().numpy() for key, value in invar.items()},
**named_outvar,
}
var_to_polyvtk(save_var, filename + "_" + name)
def load_data(self):
self._input_vars = {}
self._output_vars = {}
for name, data_loader in self.data_loaders.items():
# get train points from dataloader
invar = next(data_loader)
self._input_vars[name] = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
def load_data_static(self):
if self._input_vars is None:
# Default loading if vars not allocated
self.load_data()
else:
for name, data_loader in self.data_loaders.items():
# get train points from dataloader
invar = next(data_loader)
# Set grads to false here for inputs, static var has allocation already
input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
for key in input_vars.keys():
self._input_vars[name][key].data.copy_(input_vars[key])
self._input_vars[name] = Constraint._set_device(
invar, device=self.device, requires_grad=True
)
def forward(self):
# compute pred outvar
for name in self._input_vars.keys():
self._output_vars[name] = self.model(self._input_vars[name])
def loss(self, step):
# compute loss
losses = self._loss(
list(self._input_vars.values()), list(self._output_vars.values()), step
)
return losses
class VariationalDomainConstraint(VariationalConstraint):
"""
Simple Variational Domain Constraint with a single geometry
that represents the domain.
TODO add comprehensive doc string after refactor
"""
def __init__(
self,
nodes: List[Node],
geometry: Geometry,
outvar_names: List[str],
boundary_batch_size: int,
interior_batch_size: int,
interior_bounds: Dict[sp.Basic, Tuple[float, float]] = None,
boundary_criteria: Union[sp.Basic, Callable, None] = None,
interior_criteria: Union[sp.Basic, Callable, None] = None,
parameterization: Union[Parameterization, None] = None,
batch_per_epoch: int = 1000,
quasirandom: bool = False,
num_workers: int = 0,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
):
# sample boundary
invar = geometry.sample_boundary(
boundary_batch_size * batch_per_epoch,
criteria=boundary_criteria,
parameterization=parameterization,
quasirandom=quasirandom,
)
invar["area"] *= batch_per_epoch
# make variational boundary dataset
dataset_boundary = DictVariationalDataset(
invar=invar,
outvar_names=outvar_names,
)
# sample interior
invar = geometry.sample_interior(
interior_batch_size * batch_per_epoch,
bounds=interior_bounds,
criteria=interior_criteria,
parameterization=parameterization,
quasirandom=quasirandom,
)
invar["area"] *= batch_per_epoch
# make variational interior dataset
dataset_interior = DictVariationalDataset(
invar=invar,
outvar_names=outvar_names,
)
datasets = {"boundary": dataset_boundary, "interior": dataset_interior}
batch_sizes = {"boundary": boundary_batch_size, "interior": interior_batch_size}
# initialize constraint
super().__init__(
nodes=nodes,
datasets=datasets,
batch_sizes=batch_sizes,
loss=loss,
shuffle=shuffle,
drop_last=True,
num_workers=num_workers,
)
class DeepONetConstraint(PointwiseConstraint):
"""
Base DeepONet Constraint class for all DeepONets
"""
def save_batch(self, filename):
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
true_outvar = Constraint._set_device(true_outvar, device=self.device)
lambda_weighting = Constraint._set_device(lambda_weighting, device=self.device)
# If using DDP, strip out collective stuff to prevent deadlocks
# This only works either when one process alone calls in to save_batch
# or when multiple processes independently save data
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
# compute pred outvar
pred_outvar = modl(invar)
# rename values and save batch to vtk file TODO clean this up after graph unroll stuff
named_lambda_weighting = {
"lambda_" + key: value for key, value in lambda_weighting.items()
}
named_true_outvar = {"true_" + key: value for key, value in true_outvar.items()}
named_pred_outvar = {"pred_" + key: value for key, value in pred_outvar.items()}
save_var = {
**{key: value for key, value in invar.items()},
**named_true_outvar,
**named_pred_outvar,
**named_lambda_weighting,
}
save_var = {
key: value.cpu().detach().numpy() for key, value in save_var.items()
}
np.savez_compressed(filename + ".npz", **save_var)
@classmethod
def from_numpy(
cls,
nodes: List[Node],
invar: Dict[str, np.ndarray],
outvar: Dict[str, np.ndarray],
batch_size: int,
lambda_weighting: Dict[str, np.ndarray] = None,
loss: Loss = PointwiseLossNorm(),
shuffle: bool = True,
drop_last: bool = True,
num_workers: int = 0,
):
"""
Create custom DeepONet constraint from numpy arrays.
Parameters
----------
nodes : List[Node]
List of Modulus Nodes to unroll graph with.
invar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays as input.
outvar : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays to enforce constraint on.
batch_size : int
Batch size used in training.
lambda_weighting : Dict[str, np.ndarray (N, 1)]
Dictionary of numpy arrays to pointwise weight losses.
Default is ones.
loss : Loss
Modulus `Loss` module that defines the loss type, (e.g. L2, L1, ...).
shuffle : bool, optional
Randomly shuffle examples in dataset every epoch, by default True
drop_last : bool, optional
Drop last mini-batch if dataset not fully divisible but batch_size, by default False
num_workers : int
Number of worker used in fetching data.
"""
# make point dataset
dataset = DictPointwiseDataset(
invar=invar,
outvar=outvar,
lambda_weighting=lambda_weighting,
)
return cls(
nodes=nodes,
dataset=dataset,
loss=loss,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
)
| modulus-sym-main | modulus/sym/domain/constraint/continuous.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Modulus main config
"""
from platform import architecture
import torch
import logging
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from hydra.conf import RunDir, HydraConf
from omegaconf import MISSING, SI
from typing import List, Any
from modulus.sym.constants import JIT_PYTORCH_VERSION
from packaging import version
from .loss import LossConf
from .optimizer import OptimizerConf
from .pde import PDEConf
from .scheduler import SchedulerConf
from .training import TrainingConf, StopCriterionConf
from .profiler import ProfilerConf
from .hydra import default_hydra
logger = logging.getLogger(__name__)
@dataclass
class ModulusConfig:
# General parameters
network_dir: str = "."
initialization_network_dir: str = ""
save_filetypes: str = "vtk"
summary_histograms: bool = False
jit: bool = version.parse(torch.__version__) >= version.parse(JIT_PYTORCH_VERSION)
jit_use_nvfuser: bool = True
jit_arch_mode: str = "only_activation"
jit_autograd_nodes: bool = False
cuda_graphs: bool = True
cuda_graph_warmup: int = 20
find_unused_parameters: bool = False
broadcast_buffers: bool = False
device: str = ""
debug: bool = False
run_mode: str = "train"
arch: Any = MISSING
models: Any = MISSING # List of models
training: TrainingConf = MISSING
stop_criterion: StopCriterionConf = MISSING
loss: LossConf = MISSING
optimizer: OptimizerConf = MISSING
scheduler: SchedulerConf = MISSING
batch_size: Any = MISSING
profiler: ProfilerConf = MISSING
hydra: Any = field(default_factory=lambda: default_hydra)
# User custom parameters that are not used internally in modulus
custom: Any = MISSING
default_defaults = [
{"training": "default_training"},
{"graph": "default"},
{"stop_criterion": "default_stop_criterion"},
{"profiler": "nvtx"},
{"override hydra/job_logging": "info_logging"},
{"override hydra/launcher": "basic"},
{"override hydra/help": "modulus_help"},
{"override hydra/callbacks": "default_callback"},
]
@dataclass
class DefaultModulusConfig(ModulusConfig):
# Core defaults
# Can over-ride default with "override" hydra command
defaults: List[Any] = field(default_factory=lambda: default_defaults)
# Modulus config for debugging
debug_defaults = [
{"training": "default_training"},
{"graph": "default"},
{"stop_criterion": "default_stop_criterion"},
{"profiler": "nvtx"},
{"override hydra/job_logging": "debug_logging"},
{"override hydra/help": "modulus_help"},
{"override hydra/callbacks": "default_callback"},
]
@dataclass
class DebugModulusConfig(ModulusConfig):
# Core defaults
# Can over-ride default with "override" hydra command
defaults: List[Any] = field(default_factory=lambda: debug_defaults)
debug: bool = True
# Modulus config with experimental features (use caution)
experimental_defaults = [
{"training": "default_training"},
{"graph": "default"},
{"stop_criterion": "default_stop_criterion"},
{"profiler": "nvtx"},
{"override hydra/job_logging": "info_logging"},
{"override hydra/launcher": "basic"},
{"override hydra/help": "modulus_help"},
{"override hydra/callbacks": "default_callback"},
]
@dataclass
class ExperimentalModulusConfig(ModulusConfig):
# Core defaults
# Can over-ride default with "override" hydra command
defaults: List[Any] = field(default_factory=lambda: experimental_defaults)
pde: PDEConf = MISSING
def register_modulus_configs() -> None:
if not torch.__version__ == JIT_PYTORCH_VERSION:
logger.warn(
f"TorchScript default is being turned off due to PyTorch version mismatch."
)
cs = ConfigStore.instance()
cs.store(
name="modulus_default",
node=DefaultModulusConfig,
)
cs.store(
name="modulus_debug",
node=DebugModulusConfig,
)
cs.store(
name="modulus_experimental",
node=ExperimentalModulusConfig,
)
| modulus-sym-main | modulus/sym/hydra/config.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported Modulus graph configs
"""
import torch
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
@dataclass
class GraphConf:
func_arch: bool = MISSING
func_arch_allow_partial_hessian: bool = MISSING
@dataclass
class DefaultGraphConf(GraphConf):
func_arch: bool = False
func_arch_allow_partial_hessian: bool = True
def register_graph_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="graph",
name="default",
node=DefaultGraphConf,
)
| modulus-sym-main | modulus/sym/hydra/graph.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Architecture/Model configs
"""
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, SI, II
from typing import Any, Union, List, Dict, Tuple
@dataclass
class ModelConf:
arch_type: str = MISSING
input_keys: Any = MISSING
output_keys: Any = MISSING
detach_keys: Any = MISSING
scaling: Any = None
@dataclass
class AFNOConf(ModelConf):
arch_type: str = "afno"
img_shape: Tuple[int] = MISSING
patch_size: int = 16
embed_dim: int = 256
depth: int = 4
num_blocks: int = 8
@dataclass
class DistributedAFNOConf(ModelConf):
arch_type: str = "distributed_afno"
img_shape: Tuple[int] = MISSING
patch_size: int = 16
embed_dim: int = 256
depth: int = 4
num_blocks: int = 8
channel_parallel_inputs: bool = False
channel_parallel_outputs: bool = False
@dataclass
class DeepOConf(ModelConf):
arch_type: str = "deeponet"
# branch_net: Union[Arch, str],
# trunk_net: Union[Arch, str],
trunk_dim: Any = None # Union[None, int]
branch_dim: Any = None # Union[None, int]
@dataclass
class FNOConf(ModelConf):
arch_type: str = "fno"
dimension: int = MISSING
# decoder_net: Arch
nr_fno_layers: int = 4
fno_modes: Any = 16 # Union[int, List[int]]
padding: int = 8
padding_type: str = "constant"
activation_fn: str = "gelu"
coord_features: bool = True
@dataclass
class FourierConf(ModelConf):
arch_type: str = "fourier"
frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
activation_fn: str = "silu"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
adaptive_activations: bool = False
@dataclass
class FullyConnectedConf(ModelConf):
arch_type: str = "fully_connected"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
activation_fn: str = "silu"
adaptive_activations: bool = False
weight_norm: bool = True
@dataclass
class ConvFullyConnectedConf(ModelConf):
arch_type: str = "conv_fully_connected"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
activation_fn: str = "silu"
adaptive_activations: bool = False
weight_norm: bool = True
@dataclass
class FusedMLPConf(ModelConf):
arch_type: str = "fused_fully_connected"
layer_size: int = 128
nr_layers: int = 6
activation_fn: str = "sigmoid"
@dataclass
class FusedFourierNetConf(ModelConf):
arch_type: str = "fused_fourier"
layer_size: int = 128
nr_layers: int = 6
activation_fn: str = "sigmoid"
n_frequencies: int = 12
@dataclass
class FusedGridEncodingNetConf(ModelConf):
arch_type: str = "fused_hash_encoding"
layer_size: int = 128
nr_layers: int = 6
activation_fn: str = "sigmoid"
indexing: str = "Hash"
n_levels: int = 16
n_features_per_level: int = 2
log2_hashmap_size: int = 19
base_resolution: int = 16
per_level_scale: float = 2.0
interpolation: str = "Smoothstep"
@dataclass
class MultiresolutionHashNetConf(ModelConf):
arch_type: str = "hash_encoding"
layer_size: int = 64
nr_layers: int = 3
skip_connections: bool = False
weight_norm: bool = True
adaptive_activations: bool = False
bounds: Any = "[(1.0, 1.0), (1.0, 1.0)]"
nr_levels: int = 16
nr_features_per_level: int = 2
log2_hashmap_size: int = 19
base_resolution: int = 2
finest_resolution: int = 32
@dataclass
class HighwayFourierConf(ModelConf):
arch_type: str = "highway_fourier"
frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
activation_fn: str = "silu"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
adaptive_activations: bool = False
transform_fourier_features: bool = True
project_fourier_features: bool = False
@dataclass
class ModifiedFourierConf(ModelConf):
arch_type: str = "modified_fourier"
frequencies: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
frequencies_params: Any = "('axis', [0, 1, 2, 3, 4, 5, 6, 7, 8, 9])"
activation_fn: str = "silu"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
adaptive_activations: bool = False
@dataclass
class MultiplicativeFilterConf(ModelConf):
arch_type: str = "multiplicative_fourier"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
activation_fn: str = "identity"
filter_type: str = "fourier"
weight_norm: bool = True
input_scale: float = 10.0
gabor_alpha: float = 6.0
gabor_beta: float = 1.0
normalization: Any = (
None # Change to Union[None, Dict[str, Tuple[float, float]]] when supported
)
@dataclass
class MultiscaleFourierConf(ModelConf):
arch_type: str = "multiscale_fourier"
frequencies: Any = field(default_factory=lambda: [32])
frequencies_params: Any = None
activation_fn: str = "silu"
layer_size: int = 512
nr_layers: int = 6
skip_connections: bool = False
weight_norm: bool = True
adaptive_activations: bool = False
@dataclass
class Pix2PixConf(ModelConf):
arch_type: str = "pix2pix"
dimension: int = MISSING
conv_layer_size: int = 64
n_downsampling: int = 3
n_blocks: int = 3
scaling_factor: int = 1
batch_norm: bool = True
padding_type: str = "reflect"
activation_fn: str = "relu"
@dataclass
class SirenConf(ModelConf):
arch_type: str = "siren"
layer_size: int = 512
nr_layers: int = 6
first_omega: float = 30.0
omega: float = 30.0
normalization: Any = (
None # Change to Union[None, Dict[str, Tuple[float, float]]] when supported
)
@dataclass
class SRResConf(ModelConf):
arch_type: str = "super_res"
large_kernel_size: int = 7
small_kernel_size: int = 3
conv_layer_size: int = 32
n_resid_blocks: int = 8
scaling_factor: int = 8
activation_fn: str = "prelu"
def register_arch_configs() -> None:
# Information regarding multiple config groups
# https://hydra.cc/docs/next/patterns/select_multiple_configs_from_config_group/
cs = ConfigStore.instance()
cs.store(
group="arch",
name="fused_fully_connected",
node={"fused_fully_connected": FusedMLPConf()},
)
cs.store(
group="arch",
name="fused_fourier",
node={"fused_fourier": FusedFourierNetConf()},
)
cs.store(
group="arch",
name="fused_hash_encoding",
node={"fused_hash_encoding": FusedGridEncodingNetConf()},
)
cs.store(
group="arch",
name="fully_connected",
node={"fully_connected": FullyConnectedConf()},
)
cs.store(
group="arch",
name="conv_fully_connected",
node={"conv_fully_connected": ConvFullyConnectedConf()},
)
cs.store(
group="arch",
name="fourier",
node={"fourier": FourierConf()},
)
cs.store(
group="arch",
name="highway_fourier",
node={"highway_fourier": HighwayFourierConf()},
)
cs.store(
group="arch",
name="modified_fourier",
node={"modified_fourier": ModifiedFourierConf()},
)
cs.store(
group="arch",
name="multiplicative_fourier",
node={"multiplicative_fourier": MultiplicativeFilterConf()},
)
cs.store(
group="arch",
name="multiscale_fourier",
node={"multiscale_fourier": MultiscaleFourierConf()},
)
cs.store(
group="arch",
name="siren",
node={"siren": SirenConf()},
)
cs.store(
group="arch",
name="hash_encoding",
node={"hash_encoding": MultiresolutionHashNetConf()},
)
cs.store(
group="arch",
name="fno",
node={"fno": FNOConf()},
)
cs.store(
group="arch",
name="afno",
node={"afno": AFNOConf()},
)
cs.store(
group="arch",
name="distributed_afno",
node={"distributed_afno": DistributedAFNOConf()},
)
cs.store(
group="arch",
name="deeponet",
node={"deeponet": DeepOConf()},
)
cs.store(
group="arch",
name="super_res",
node={"super_res": SRResConf()},
)
cs.store(
group="arch",
name="pix2pix",
node={"pix2pix": Pix2PixConf()},
)
# Schemas for extending models
# Info: https://hydra.cc/docs/next/patterns/extending_configs/
cs.store(
group="arch",
name="fully_connected_cfg",
node=FullyConnectedConf,
)
cs.store(
group="arch",
name="conv_fully_connected_cfg",
node=ConvFullyConnectedConf,
)
cs.store(
group="arch",
name="fused_mlp_cfg",
node=FusedMLPConf,
)
cs.store(
group="arch",
name="fused_fourier_net_cfg",
node=FusedFourierNetConf,
)
cs.store(
group="arch",
name="fused_grid_encoding_net_cfg",
node=FusedGridEncodingNetConf,
)
cs.store(
group="arch",
name="fourier_cfg",
node=FourierConf,
)
cs.store(
group="arch",
name="highway_fourier_cfg",
node=HighwayFourierConf,
)
cs.store(
group="arch",
name="modified_fourier_cfg",
node=ModifiedFourierConf,
)
cs.store(
group="arch",
name="multiplicative_fourier_cfg",
node=MultiplicativeFilterConf,
)
cs.store(
group="arch",
name="multiscale_fourier_cfg",
node=MultiscaleFourierConf,
)
cs.store(
group="arch",
name="siren_cfg",
node=SirenConf,
)
cs.store(
group="arch",
name="hash_net_cfg",
node=MultiresolutionHashNetConf,
)
cs.store(
group="arch",
name="fno_cfg",
node=FNOConf,
)
cs.store(
group="arch",
name="afno_cfg",
node=AFNOConf,
)
cs.store(
group="arch",
name="distributed_afno_cfg",
node=DistributedAFNOConf,
)
cs.store(
group="arch",
name="deeponet_cfg",
node=DeepOConf,
)
cs.store(
group="arch",
name="super_res_cfg",
node=SRResConf,
)
cs.store(
group="arch",
name="pix2pix_cfg",
node=Pix2PixConf,
)
| modulus-sym-main | modulus/sym/hydra/arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .utils import (
instantiate_arch,
instantiate_optim,
instantiate_agg,
instantiate_sched,
to_yaml,
add_hydra_run_path,
to_absolute_path,
)
from .config import ModulusConfig
| modulus-sym-main | modulus/sym/hydra/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Hydra related configs
"""
import pathlib
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from hydra.conf import RunDir
from omegaconf import OmegaConf, MISSING, SI, II
from typing import Any, Union, List, Dict
@dataclass
class SimpleFormat:
format: str = "[%(asctime)s] - %(message)s"
datefmt: str = "%H:%M:%S"
@dataclass
class DebugFormat:
format: str = "[%(levelname)s][%(asctime)s][%(module)s] - %(message)s"
datefmt: str = "%Y-%m-%d %H:%M:%S"
info_console_handler = {
"class": "logging.StreamHandler",
"formatter": "simple",
"stream": "ext://sys.stdout",
}
@dataclass
class DefaultLogging:
version: int = 1
formatters: Any = field(default_factory=lambda: {"simple": SimpleFormat})
handlers: Any = field(default_factory=lambda: {"console": info_console_handler})
root: Any = field(default_factory=lambda: {"handlers": ["console"]})
disable_existing_loggers: bool = False
level: int = (
20 # CRITICAL: 50, ERROR: 40, WARNING: 30, INFO: 20, DEBUG: 10, NOTSET: 0
)
debug_console_handler = {
"class": "logging.StreamHandler",
"formatter": "debug",
"stream": "ext://sys.stdout",
}
@dataclass
class DebugLogging:
version: int = 1
formatters: Any = field(default_factory=lambda: {"debug": DebugFormat})
handlers: Any = field(default_factory=lambda: {"console": debug_console_handler})
root: Any = field(default_factory=lambda: {"handlers": ["console"]})
disable_existing_loggers: bool = False
level: int = (
0 # CRITICAL: 50, ERROR: 40, WARNING: 30, INFO: 20, DEBUG: 10, NOTSET: 0
)
# Hydra defaults group parameters for modulus
file_path = pathlib.Path(__file__).parent.resolve()
modulus_help = OmegaConf.load(file_path / "help.yaml")
# Standard Hydra parameters
default_hydra = {
"run": {"dir": SI("outputs/${hydra:job.override_dirname}/${hydra:job.name}")},
"sweep": {"dir": "multirun", "subdir": SI("${hydra.job.override_dirname}")},
"verbose": SI("${debug}"),
}
def register_hydra_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="hydra/job_logging",
name="info_logging",
node=DefaultLogging,
)
cs.store(
group="hydra/job_logging",
name="debug_logging",
node=DebugLogging,
)
cs.store(
group="hydra/help",
name="modulus_help",
node=modulus_help,
)
| modulus-sym-main | modulus/sym/hydra/hydra.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Place holder for evaluation metrics
"""
def register_metric_configs() -> None:
pass
| modulus-sym-main | modulus/sym/hydra/metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
PDE configs for here
Architecture params need to be updated to more primative focused
This file is largely a place folder right now
"""
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, SI, II
from typing import Any, Union, List, Dict
@dataclass
class PDEConf:
_target_: str = MISSING
@dataclass
class AdvectionDiffusionConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.advection_diffusion.AdvectionDiffusion"
T: str = "T"
D: str = "D"
Q: int = 0
rho: str = "rho"
dim: int = 3
time: bool = False
@dataclass
class DiffusionConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.diffusion.Diffusion"
T: str = "T"
D: str = "D"
Q: int = 0
dim: int = 3
time: bool = False
@dataclass
class MaxwellFreqRealConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.electromagnetic.MaxwellFreqReal"
ux: str = "ux"
uy: str = "uy"
uz: str = "uz"
k: float = 1.0
@dataclass
class EnergyFluidConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.energy_equation.EnergyFluid"
cp: str = "cp"
kappa: str = "kappa"
rho: str = "rho"
nu: str = "nu"
visc_heating: bool = False
dim: int = 3
time: bool = False
@dataclass
class LinearElasticityConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.linear_elasticity.LinearElasticity"
E = 10
nu = 0.3
lambda_ = None
mu = None
rho: int = 1
dim: int = 3
time: bool = False
@dataclass
class LinearElasticityPlaneConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.linear_elasticity.LinearElasticityPlaneStress"
E = 10
nu = 0.3
lambda_ = None
mu = None
rho: int = 1
time: bool = False
@dataclass
class NavierStokesConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.navier_stokes.NavierStokes"
nu = MISSING
rho: float = 1
dim: int = 3
time: bool = True
@dataclass
class ZeroEquationConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.turbulence_zero_eq.ZeroEquation"
nu = MISSING
rho: float = 1
dim: int = 3
time: bool = True
@dataclass
class WaveEquationConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.wave_equation.WaveEquation"
u = "u"
c = "c"
dim: int = 3
time: bool = True
@dataclass
class HelmholtzEquationConf(PDEConf):
_target_: str = "modulus.sym.eq.pdes.wave_equation.HelmholtzEquation"
u = MISSING
K = MISSING
dim: int = 3
def register_pde_configs() -> None:
# TODO: Allow multiple pdes via multiple config groups
# https://hydra.cc/docs/next/patterns/select_multiple_configs_from_config_group/
cs = ConfigStore.instance()
cs.store(
group="pde",
name="advection-diffusion",
node=AdvectionDiffusionConf,
)
cs.store(
group="pde",
name="diffusion",
node=DiffusionConf,
)
cs.store(
group="pde",
name="maxwell-real",
node=MaxwellFreqRealConf,
)
cs.store(
group="pde",
name="energy-fluid",
node=EnergyFluidConf,
)
cs.store(
group="pde",
name="linear-elasticity",
node=LinearElasticityConf,
)
cs.store(
group="pde",
name="linear-elasticity-plane",
node=LinearElasticityPlaneConf,
)
cs.store(
group="pde",
name="navier-stokes",
node=NavierStokesConf,
)
cs.store(
group="pde",
name="zero-eq-turbulence",
node=ZeroEquationConf,
)
cs.store(
group="pde",
name="wave",
node=WaveEquationConf,
)
cs.store(
group="pde",
name="helmholtz",
node=HelmholtzEquationConf,
)
| modulus-sym-main | modulus/sym/hydra/pde.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported Modulus loss aggregator configs
"""
import torch
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
from typing import Any
@dataclass
class LossConf:
_target_: str = MISSING
weights: Any = None
@dataclass
class AggregatorSumConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.Sum"
@dataclass
class AggregatorGradNormConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.GradNorm"
alpha: float = 1.0
@dataclass
class AggregatorResNormConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.ResNorm"
alpha: float = 1.0
@dataclass
class AggregatorHomoscedasticConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.HomoscedasticUncertainty"
@dataclass
class AggregatorLRAnnealingConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.LRAnnealing"
update_freq: int = 1
alpha: float = 0.01
ref_key: Any = None # Change to Union[None, str] when supported by hydra
eps: float = 1e-8
@dataclass
class AggregatorSoftAdaptConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.SoftAdapt"
eps: float = 1e-8
@dataclass
class AggregatorRelobraloConf(LossConf):
_target_: str = "modulus.sym.loss.aggregator.Relobralo"
alpha: float = 0.95
beta: float = 0.99
tau: float = 1.0
eps: float = 1e-8
@dataclass
class NTKConf:
use_ntk: bool = False
save_name: Any = None # Union[str, None]
run_freq: int = 1000
def register_loss_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="loss",
name="sum",
node=AggregatorSumConf,
)
cs.store(
group="loss",
name="grad_norm",
node=AggregatorGradNormConf,
)
cs.store(
group="loss",
name="res_norm",
node=AggregatorResNormConf,
)
cs.store(
group="loss",
name="homoscedastic",
node=AggregatorHomoscedasticConf,
)
cs.store(
group="loss",
name="lr_annealing",
node=AggregatorLRAnnealingConf,
)
cs.store(
group="loss",
name="soft_adapt",
node=AggregatorSoftAdaptConf,
)
cs.store(
group="loss",
name="relobralo",
node=AggregatorRelobraloConf,
)
| modulus-sym-main | modulus/sym/hydra/loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import hydra
import os
import torch
import logging
import copy
import pprint
from termcolor import colored
from pathlib import Path
from omegaconf import DictConfig, OmegaConf, MISSING
from typing import Optional, Any, Union, List
from hydra._internal.utils import _run_hydra, get_args_parser
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd
from modulus.sym.key import Key
from modulus.sym.models.arch import Arch
from modulus.sym.distributed import DistributedManager
from modulus.sym.models.utils import ModulusModels
from modulus.sym.models.layers import Activation
from .arch import ModelConf
from .config import register_modulus_configs, ModulusConfig
from .hydra import register_hydra_configs
from .loss import register_loss_configs
from .metric import register_metric_configs
from .arch import register_arch_configs
from .optimizer import register_optimizer_configs
from .pde import register_pde_configs
from .profiler import register_profiler_configs
from .scheduler import register_scheduler_configs
from .training import register_training_configs
from .callbacks import register_callbacks_configs
from .graph import register_graph_configs
logger = logging.getLogger(__name__)
def main(config_path: str, config_name: str = "config"):
"""Modified decorator for loading hydra configs in modulus
See: https://github.com/facebookresearch/hydra/blob/main/hydra/main.py
"""
def register_decorator(func):
@functools.wraps(func)
def func_decorated(cfg_passthrough: Optional[DictConfig] = None) -> Any:
# Register all modulus groups before calling hydra main
register_hydra_configs()
register_callbacks_configs()
register_loss_configs()
register_metric_configs()
register_arch_configs()
register_optimizer_configs()
register_pde_configs()
register_profiler_configs()
register_scheduler_configs()
register_training_configs()
register_modulus_configs()
register_graph_configs()
# Set number of intraop torch CPU threads
torch.set_num_threads(1) # TODO: define this as a hydra config somehow
# Setup distributed process config
DistributedManager.initialize()
# Create model parallel process group
model_parallel_size = os.getenv(
"MODEL_PARALLEL_SIZE"
) # TODO: get this from config instead
if model_parallel_size:
# Create model parallel process group
DistributedManager.create_process_subgroup(
"model_parallel", int(model_parallel_size), verbose=True
)
# Create data parallel process group for DDP allreduce
DistributedManager.create_orthogonal_process_group(
"data_parallel", "model_parallel", verbose=True
)
# Pass through dict config
if cfg_passthrough is not None:
return func(cfg_passthrough)
else:
args_parser = get_args_parser()
args = args_parser.parse_args()
# multiple times (--multirun)
_run_hydra(
args=args_parser.parse_args(),
args_parser=args_parser,
task_function=func,
config_path=config_path,
config_name=config_name,
)
return func_decorated
return register_decorator
def compose(
config_name: Optional[str] = None,
config_path: Optional[str] = None,
overrides: List[str] = [],
return_hydra_config: bool = False,
job_name: Optional[str] = "app",
caller_stack_depth: int = 2,
) -> DictConfig:
"""Internal Modulus config initializer and compose function.
This is an alternative for initializing a Hydra config which should be used
as a last ditch effort in cases where @modulus.main() cannot work. For more info
see: https://hydra.cc/docs/advanced/compose_api/
Parameters
----------
config_name : str
Modulus config name
config_path : str
Path to config file relative to the caller at location caller_stack_depth
overrides : list of strings
List of overrides
return_hydra_config : bool
Return the hydra options in the dict config
job_name : string
Name of program run instance
caller_stack_depth : int
Stack depth of this function call (needed for finding config relative to python).
"""
# Clear already initialized hydra
hydra.core.global_hydra.GlobalHydra.instance().clear()
hydra.initialize(
config_path,
job_name,
caller_stack_depth,
)
register_hydra_configs()
register_callbacks_configs()
register_loss_configs()
register_metric_configs()
register_arch_configs()
register_optimizer_configs()
register_pde_configs()
register_profiler_configs()
register_scheduler_configs()
register_training_configs()
register_modulus_configs()
register_graph_configs()
cfg = hydra.compose(
config_name=config_name,
overrides=overrides,
return_hydra_config=return_hydra_config,
)
return cfg
def instantiate_arch(
cfg: ModelConf,
input_keys: Union[List[Key], None] = None,
output_keys: Union[List[Key], None] = None,
detach_keys: Union[List[Key], None] = None,
verbose: bool = False,
**kwargs,
) -> Arch:
# Function for instantiating a modulus architecture with hydra
assert hasattr(
cfg, "arch_type"
), "Model configs are required to have an arch_type defined. \
Improper architecture supplied, please make sure config \
provided is a single arch config NOT the full hydra config!"
try:
# Convert to python dictionary
model_cfg = OmegaConf.to_container(cfg, resolve=True)
# Get model class beased on arch type
modulus_models = ModulusModels()
model_arch = modulus_models[model_cfg["arch_type"]]
del model_cfg["arch_type"]
# Add keys if present
if not input_keys is None:
model_cfg["input_keys"] = input_keys
if not output_keys is None:
model_cfg["output_keys"] = output_keys
if not detach_keys is None:
model_cfg["detach_keys"] = detach_keys
# Add any additional kwargs
for key, value in kwargs.items():
model_cfg[key] = value
# Init model from config dictionary
model, param = model_arch.from_config(model_cfg)
# Verbose printing
if verbose:
pp = pprint.PrettyPrinter(indent=4)
logger.info(f"Initialized models with parameters: \n")
pp.pprint(param)
except Exception as e:
fail = colored(f"Failed to initialize architecture.\n {model_cfg}", "red")
raise Exception(fail) from e
return model
def instantiate_optim(
cfg: DictConfig, model: torch.nn.Module, verbose: bool = False
) -> torch.optim.Optimizer:
# Function for instantiating an optimizer with hydra
# Remove custom parameters used internally in modulus
optim_cfg = copy.deepcopy(cfg.optimizer)
del optim_cfg._params_
try:
optimizer = hydra.utils.instantiate(optim_cfg, params=model.parameters())
except Exception as e:
fail = colored("Failed to initialize optimizer: \n", "red")
logger.error(fail + to_yaml(optim_cfg))
raise Exception(fail) from e
if verbose:
pp = pprint.PrettyPrinter(indent=4)
logger.info(f"Initialized optimizer: \n")
pp.pprint(optimizer)
return optimizer
def instantiate_sched(
cfg: DictConfig, optimizer: torch.optim
) -> torch.optim.lr_scheduler:
# Function for instantiating a scheduler with hydra
sched_cfg = copy.deepcopy(cfg.scheduler)
# Default is no scheduler, so just make fixed LR
if sched_cfg is MISSING:
sched_cfg = {
"_target_": "torch.optim.lr_scheduler.ConstantLR",
"factor": 1.0,
}
# Handle custom cases
if sched_cfg._target_ == "custom":
if "tf.ExponentialLR" in sched_cfg._name_:
sched_cfg = {
"_target_": "torch.optim.lr_scheduler.ExponentialLR",
"gamma": sched_cfg.decay_rate ** (1.0 / sched_cfg.decay_steps),
}
else:
logger.warn("Detected unsupported custom scheduler", sched_cfg)
try:
scheduler = hydra.utils.instantiate(sched_cfg, optimizer=optimizer)
except Exception as e:
fail = colored("Failed to initialize scheduler: \n", "red")
logger.error(fail + to_yaml(sched_cfg))
raise Exception(fail) from e
return scheduler
def instantiate_agg(cfg: DictConfig, model: torch.nn.Module, num_losses: int = 1):
# Function for instantiating a loss aggregator with hydra
try:
aggregator = hydra.utils.instantiate(
cfg.loss,
model,
num_losses,
_convert_="all",
)
except Exception as e:
fail = colored("Failed to initialize loss aggregator: \n", "red")
logger.error(fail + to_yaml(cfg.loss))
raise Exception(fail) from e
return aggregator
def to_yaml(cfg: DictConfig):
"""Converges dict config into a YML string"""
return OmegaConf.to_yaml(cfg)
def add_hydra_run_path(path: Union[str, Path]) -> Path:
"""Prepends current hydra run path"""
working_dir = Path(os.getcwd())
# Working directory only present with @modulus.main()
if HydraConfig.initialized():
org_dir = Path(get_original_cwd())
hydra_dir = working_dir.relative_to(org_dir) / Path(path)
else:
hydra_dir = working_dir / Path(path)
if isinstance(path, str):
hydra_dir = str(hydra_dir)
return hydra_dir
def to_absolute_path(*args: Union[str, Path]):
"""Converts file path to absolute path based on run file location
Modified from: https://github.com/facebookresearch/hydra/blob/main/hydra/utils.py
"""
out = ()
for path in args:
p = Path(path)
if not HydraConfig.initialized():
base = Path(os.getcwd())
else:
ret = HydraConfig.get().runtime.cwd
base = Path(ret)
if p.is_absolute():
ret = p
else:
ret = base / p
if isinstance(path, str):
out = out + (str(ret),)
else:
out = out + (ret,)
if len(args) == 1:
out = out[0]
return out
| modulus-sym-main | modulus/sym/hydra/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported Modulus callback configs
"""
import torch
import logging
from dataclasses import dataclass
from omegaconf import DictConfig
from hydra.core.config_store import ConfigStore
from hydra.experimental.callback import Callback
from typing import Any
from omegaconf import DictConfig, OmegaConf
from modulus.sym.distributed import DistributedManager
from modulus.sym.manager import JitManager, JitArchMode, GraphManager
logger = logging.getLogger(__name__)
class ModulusCallback(Callback):
def on_job_start(self, config: DictConfig, **kwargs: Any) -> None:
# Update dist manager singleton with config parameters
manager = DistributedManager()
manager.broadcast_buffers = config.broadcast_buffers
manager.find_unused_parameters = config.find_unused_parameters
manager.cuda_graphs = config.cuda_graphs
# jit manager
jit_manager = JitManager()
jit_manager.init(
config.jit,
config.jit_arch_mode,
config.jit_use_nvfuser,
config.jit_autograd_nodes,
)
# graph manager
graph_manager = GraphManager()
graph_manager.init(
config.graph.func_arch,
config.graph.func_arch_allow_partial_hessian,
config.debug,
)
# The FuncArch does not work with TorchScript at all, so we raise
# a warning and disabled it.
if config.graph.func_arch and jit_manager.enabled:
jit_manager.enabled = False
logger.warning("Disabling JIT because functorch does not work with it.")
logger.info(jit_manager)
logger.info(graph_manager)
DefaultCallbackConfigs = DictConfig(
{
"modulus_callback": OmegaConf.create(
{
"_target_": "modulus.sym.hydra.callbacks.ModulusCallback",
}
)
}
)
def register_callbacks_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="hydra/callbacks",
name="default_callback",
node=DefaultCallbackConfigs,
)
| modulus-sym-main | modulus/sym/hydra/callbacks.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported optimizer configs
"""
import torch
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from typing import List, Any
from omegaconf import MISSING
@dataclass
class OptimizerConf:
_target_ = MISSING
_params_: Any = field(
default_factory=lambda: {
"compute_gradients": "adam_compute_gradients",
"apply_gradients": "adam_apply_gradients",
}
)
@dataclass
class AdamConf(OptimizerConf):
_target_: str = "torch.optim.Adam"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
amsgrad: bool = False
@dataclass
class SGDConf(OptimizerConf):
_target_: str = "torch.optim.SGD"
lr: float = 1.0e-3
momentum: float = 1.0e-2
dampening: float = 0
weight_decay: float = 0
nesterov: bool = False
@dataclass
class AdahessianConf(OptimizerConf):
_target_: str = "torch_optimizer.Adahessian"
lr: float = 1.0e-1
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-4
weight_decay: float = 0.0
hessian_power: float = 1.0
_params_: Any = field(
default_factory=lambda: {
"compute_gradients": "adahess_compute_gradients",
"apply_gradients": "adahess_apply_gradients",
}
)
@dataclass
class BFGSConf(OptimizerConf):
_target_: str = "torch.optim.LBFGS"
lr: float = 1.0
max_iter: int = 1000
max_eval: Any = None
tolerance_grad: float = 1e-7
tolerance_change: float = 1e-9
history_size: int = 100
line_search_fn: Any = None # Union[None, str]
_params_: Any = field(
default_factory=lambda: {
"compute_gradients": "bfgs_compute_gradients",
"apply_gradients": "bfgs_apply_gradients",
}
)
@dataclass
class AdadeltaConf(OptimizerConf):
_target_: str = "torch.optim.Adadelta"
lr: float = 1.0
rho: float = 0.9
eps: float = 1e-6
weight_decay: float = 0
@dataclass
class AdagradConf(OptimizerConf):
_target_: str = "torch.optim.Adagrad"
lr: float = 1.0e-2
lr_decay: float = 0
weight_decay: float = 0
initial_accumulator_value: float = 0
eps: float = 1e-10
@dataclass
class AdamWConf(OptimizerConf):
_target_: str = "torch.optim.AdamW"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0.01
amsgrad: bool = False
@dataclass
class SparseAdamConf(OptimizerConf):
_target_: str = "torch.optim.SparseAdam"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
@dataclass
class AdamaxConf(OptimizerConf):
_target_: str = "torch.optim.Adamax"
lr: float = 2.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class ASGDConf(OptimizerConf):
_target_: str = "torch.optim.ASGD"
lr: float = 1.0e-2
lambd: float = 1.0e-4
alpha: float = 0.75
t0: float = 1000000.0
weight_decay: float = 0
@dataclass
class NAdamConf(OptimizerConf):
_target_: str = "torch.optim.NAdam"
lr: float = 2.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
momentum_decay: float = 0.004
@dataclass
class RAdamConf(OptimizerConf):
_target_: str = "torch.optim.RAdam"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class RMSpropConf(OptimizerConf):
_target_: str = "torch.optim.RMSprop"
lr: float = 1.0e-2
alpha: float = 0.99
eps: float = 1e-8
weight_decay: float = 0
momentum: float = 0
centered: bool = False
@dataclass
class RpropConf(OptimizerConf):
_target_: str = "torch.optim.Rprop"
lr: float = 1.0e-2
etas: List[float] = field(default_factory=lambda: [0.5, 1.2])
step_sizes: List[float] = field(default_factory=lambda: [1.0e-6, 50])
@dataclass
class A2GradExpConf(OptimizerConf):
_target_: str = "torch_optimizer.A2GradExp"
lr: float = 1e-2 # LR not support for optim, but needed to not fail schedulers
beta: float = 10.0
lips: float = 10.0
@dataclass
class A2GradIncConf(OptimizerConf):
_target_: str = "torch_optimizer.A2GradInc"
lr: float = 1e-2 # LR not support for optim, but needed to not fail schedulers
beta: float = 10.0
lips: float = 10.0
@dataclass
class A2GradUniConf(OptimizerConf):
_target_: str = "torch_optimizer.A2GradUni"
lr: float = 1e-2 # LR not support for optim, but needed to not fail schedulers
beta: float = 10.0
lips: float = 10.0
@dataclass
class AccSGDConf(OptimizerConf):
_target_: str = "torch_optimizer.AccSGD"
lr: float = 1.0e-3
kappa: float = 1000.0
xi: float = 10.0
small_const: float = 0.7
weight_decay: float = 0
@dataclass
class AdaBeliefConf(OptimizerConf):
_target_: str = "torch_optimizer.AdaBelief"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1.0e-3
weight_decay: float = 0
amsgrad: bool = False
weight_decouple: bool = False
fixed_decay: bool = False
rectify: bool = False
@dataclass
class AdaBoundConf(OptimizerConf):
_target_: str = "torch_optimizer.AdaBound"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
final_lr: float = 0.1
gamma: float = 1e-3
eps: float = 1e-8
weight_decay: float = 0
amsbound: bool = False
@dataclass
class AdaModConf(OptimizerConf):
_target_: str = "torch_optimizer.AdaMod"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
beta3: float = 0.999
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class AdafactorConf(OptimizerConf):
_target_: str = "torch_optimizer.Adafactor"
lr: float = 1.0e-3
eps2: List[float] = field(default_factory=lambda: [1e-30, 1e-3])
clip_threshold: float = 1.0
decay_rate: float = -0.8
beta1: Any = None
weight_decay: float = 0
scale_parameter: bool = True
relative_step: bool = True
warmup_init: bool = False
@dataclass
class AdamPConf(OptimizerConf):
_target_: str = "torch_optimizer.AdamP"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
delta: float = 0.1
wd_ratio: float = 0.1
@dataclass
class AggMoConf(OptimizerConf):
_target_: str = "torch_optimizer.AggMo"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.0, 0.9, 0.99])
weight_decay: float = 0
@dataclass
class ApolloConf(OptimizerConf):
_target_: str = "torch_optimizer.Apollo"
lr: float = 1.0e-2
beta: float = 0.9
eps: float = 1e-4
warmup: int = 0
init_lr: float = 0.01
weight_decay: float = 0
@dataclass
class DiffGradConf(OptimizerConf):
_target_: str = "torch_optimizer.DiffGrad"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class LambConf(OptimizerConf):
_target_: str = "torch_optimizer.Lamb"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
@dataclass
class MADGRADConf(OptimizerConf):
_target_: str = "torch_optimizer.MADGRAD"
lr: float = 1.0e-2
momentum: float = 0.9
weight_decay: float = 0
eps: float = 1e-6
@dataclass
class NovoGradConf(OptimizerConf):
_target_: str = "torch_optimizer.NovoGrad"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-8
weight_decay: float = 0
grad_averaging: bool = False
amsgrad: bool = False
@dataclass
class PIDConf(OptimizerConf):
_target_: str = "torch_optimizer.PID"
lr: float = 1.0e-3
momentum: float = 0
dampening: float = 0
weight_decay: float = 1e-2
integral: float = 5.0
derivative: float = 10.0
@dataclass
class QHAdamConf(OptimizerConf):
_target_: str = "torch_optimizer.QHAdam"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
nus: List[float] = field(default_factory=lambda: [1.0, 1.0])
weight_decay: float = 0
decouple_weight_decay: bool = False
eps: float = 1e-8
@dataclass
class QHMConf(OptimizerConf):
_target_: str = "torch_optimizer.QHM"
lr: float = 1.0e-3
momentum: float = 0
nu: float = 0.7
weight_decay: float = 1e-2
weight_decay_type: str = "grad"
@dataclass
class RangerConf(OptimizerConf):
_target_: str = "torch_optimizer.Ranger"
lr: float = 1.0e-3
alpha: float = 0.5
k: int = 6
N_sma_threshhold: int = 5
betas: List[float] = field(default_factory=lambda: [0.95, 0.999])
eps: float = 1e-5
weight_decay: float = 0
@dataclass
class RangerQHConf(OptimizerConf):
_target_: str = "torch_optimizer.RangerQH"
lr: float = 1.0e-3
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
nus: List[float] = field(default_factory=lambda: [0.7, 1.0])
weight_decay: float = 0
k: int = 6
alpha: float = 0.5
decouple_weight_decay: bool = False
eps: float = 1e-8
@dataclass
class RangerVAConf(OptimizerConf):
_target_: str = "torch_optimizer.RangerVA"
lr: float = 1.0e-3
alpha: float = 0.5
k: int = 6
n_sma_threshhold: int = 5
betas: List[float] = field(default_factory=lambda: [0.95, 0.999])
eps: float = 1e-5
weight_decay: float = 0
amsgrad: bool = True
transformer: str = "softplus"
smooth: int = 50
grad_transformer: str = "square"
@dataclass
class SGDPConf(OptimizerConf):
_target_: str = "torch_optimizer.SGDP"
lr: float = 1.0e-3
momentum: float = 0
dampening: float = 0
weight_decay: float = 1e-2
nesterov: bool = False
delta: float = 0.1
wd_ratio: float = 0.1
@dataclass
class SGDWConf(OptimizerConf):
_target_: str = "torch_optimizer.SGDW"
lr: float = 1.0e-3
momentum: float = 0
dampening: float = 0
weight_decay: float = 1e-2
nesterov: bool = False
@dataclass
class SWATSConf(OptimizerConf):
_target_: str = "torch_optimizer.SWATS"
lr: float = 1.0e-1
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-3
weight_decay: float = 0
amsgrad: bool = False
nesterov: bool = False
@dataclass
class ShampooConf(OptimizerConf):
_target_: str = "torch_optimizer.Shampoo"
lr: float = 1.0e-1
momentum: float = 0
weight_decay: float = 0
epsilon: float = 1e-4
update_freq: int = 1
@dataclass
class YogiConf(OptimizerConf):
_target_: str = "torch_optimizer.Yogi"
lr: float = 1.0e-2
betas: List[float] = field(default_factory=lambda: [0.9, 0.999])
eps: float = 1e-3
initial_accumulator: float = 1e-6
weight_decay: float = 0
def register_optimizer_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="optimizer",
name="adam",
node=AdamConf,
)
cs.store(
group="optimizer",
name="sgd",
node=SGDConf,
)
cs.store(
group="optimizer",
name="adahessian",
node=AdahessianConf,
)
cs.store(
group="optimizer",
name="bfgs",
node=BFGSConf,
)
cs.store(
group="optimizer",
name="adadelta",
node=AdadeltaConf,
)
cs.store(
group="optimizer",
name="adagrad",
node=AdagradConf,
)
cs.store(
group="optimizer",
name="adamw",
node=AdamWConf,
)
cs.store(
group="optimizer",
name="sparse_adam",
node=SparseAdamConf,
)
cs.store(
group="optimizer",
name="adamax",
node=AdamaxConf,
)
cs.store(
group="optimizer",
name="asgd",
node=ASGDConf,
)
cs.store(
group="optimizer",
name="nadam",
node=NAdamConf,
)
cs.store(
group="optimizer",
name="radam",
node=RAdamConf,
)
cs.store(
group="optimizer",
name="rmsprop",
node=RMSpropConf,
)
cs.store(
group="optimizer",
name="rprop",
node=RpropConf,
)
cs.store(
group="optimizer",
name="a2grad_exp",
node=A2GradExpConf,
)
cs.store(
group="optimizer",
name="a2grad_inc",
node=A2GradIncConf,
)
cs.store(
group="optimizer",
name="a2grad_uni",
node=A2GradUniConf,
)
cs.store(
group="optimizer",
name="accsgd",
node=AccSGDConf,
)
cs.store(
group="optimizer",
name="adabelief",
node=AdaBeliefConf,
)
cs.store(
group="optimizer",
name="adabound",
node=AdaBoundConf,
)
cs.store(
group="optimizer",
name="adamod",
node=AdaModConf,
)
cs.store(
group="optimizer",
name="adafactor",
node=AdafactorConf,
)
cs.store(
group="optimizer",
name="adamp",
node=AdamPConf,
)
cs.store(
group="optimizer",
name="aggmo",
node=AggMoConf,
)
cs.store(
group="optimizer",
name="apollo",
node=ApolloConf,
)
cs.store(
group="optimizer",
name="diffgrad",
node=DiffGradConf,
)
cs.store(
group="optimizer",
name="lamb",
node=LambConf,
)
cs.store(
group="optimizer",
name="madgrad",
node=MADGRADConf,
)
cs.store(
group="optimizer",
name="novograd",
node=NovoGradConf,
)
cs.store(
group="optimizer",
name="pid",
node=PIDConf,
)
cs.store(
group="optimizer",
name="qhadam",
node=QHAdamConf,
)
cs.store(
group="optimizer",
name="qhm",
node=QHMConf,
)
cs.store(
group="optimizer",
name="ranger",
node=RangerConf,
)
cs.store(
group="optimizer",
name="ranger_qh",
node=RangerQHConf,
)
cs.store(
group="optimizer",
name="ranger_va",
node=RangerVAConf,
)
cs.store(
group="optimizer",
name="sgdp",
node=SGDPConf,
)
cs.store(
group="optimizer",
name="sgdw",
node=SGDWConf,
)
cs.store(
group="optimizer",
name="swats",
node=SWATSConf,
)
cs.store(
group="optimizer",
name="shampoo",
node=ShampooConf,
)
cs.store(
group="optimizer",
name="yogi",
node=YogiConf,
)
| modulus-sym-main | modulus/sym/hydra/optimizer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported PyTorch scheduler configs
"""
import torch
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING
@dataclass
class SchedulerConf:
_target_ = MISSING
@dataclass
class ExponentialLRConf(SchedulerConf):
_target_: str = "torch.optim.lr_scheduler.ExponentialLR"
gamma: float = 0.99998718
@dataclass
class TFExponentialLRConf(SchedulerConf):
_target_: str = "custom"
_name_: str = "tf.ExponentialLR"
decay_rate: float = 0.95
decay_steps: int = 1000
@dataclass
class CosineAnnealingLRConf(SchedulerConf):
_target_: str = "torch.optim.lr_scheduler.CosineAnnealingLR"
T_max: int = 1000
eta_min: float = 0
last_epoch: int = -1
@dataclass
class CosineAnnealingWarmRestartsConf(SchedulerConf):
_target_: str = "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts"
T_0: int = 1000
T_mult: int = 1
eta_min: float = 0
last_epoch: int = -1
def register_scheduler_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="scheduler",
name="exponential_lr",
node=ExponentialLRConf,
)
cs.store(
group="scheduler",
name="tf_exponential_lr",
node=TFExponentialLRConf,
)
cs.store(
group="scheduler",
name="cosine_annealing",
node=CosineAnnealingLRConf,
)
cs.store(
group="scheduler",
name="cosine_annealing_warm_restarts",
node=CosineAnnealingWarmRestartsConf,
)
| modulus-sym-main | modulus/sym/hydra/scheduler.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiler config
"""
from dataclasses import dataclass, field
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, SI, II
from typing import Any, Union, List, Dict
@dataclass
class ProfilerConf:
profile: bool = MISSING
start_step: int = MISSING
end_step: int = MISSING
@dataclass
class NvtxProfiler(ProfilerConf):
name: str = "nvtx"
profile: bool = False
start_step: int = 0
end_step: int = 100
@dataclass
class TensorBoardProfiler(ProfilerConf):
name: str = "tensorboard"
profile: bool = False
start_step: int = 0
end_step: int = 100
warmup: int = 5
repeat: int = 1
filename: str = "${hydra.job.override_dirname}-${hydra.job.name}.profile"
def register_profiler_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="profiler",
name="nvtx",
node=NvtxProfiler,
)
cs.store(
group="profiler",
name="tensorboard",
node=TensorBoardProfiler,
)
| modulus-sym-main | modulus/sym/hydra/profiler.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Supported modulus training paradigms
"""
import torch
from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, II
from typing import Any
from .loss import NTKConf
@dataclass
class TrainingConf:
max_steps: int = MISSING
grad_agg_freq: int = MISSING
rec_results_freq: int = MISSING
rec_validation_freq: int = MISSING
rec_inference_freq: int = MISSING
rec_monitor_freq: int = MISSING
rec_constraint_freq: int = MISSING
save_network_freq: int = MISSING
print_stats_freq: int = MISSING
summary_freq: int = MISSING
amp: bool = MISSING
amp_dtype: str = MISSING
@dataclass
class DefaultTraining(TrainingConf):
max_steps: int = 10000
grad_agg_freq: int = 1
rec_results_freq: int = 1000
rec_validation_freq: int = II("training.rec_results_freq")
rec_inference_freq: int = II("training.rec_results_freq")
rec_monitor_freq: int = II("training.rec_results_freq")
rec_constraint_freq: int = II("training.rec_results_freq")
save_network_freq: int = 1000
print_stats_freq: int = 100
summary_freq: int = 1000
amp: bool = False
amp_dtype: str = "float16"
ntk: NTKConf = NTKConf()
@dataclass
class VariationalTraining(DefaultTraining):
test_function: str = MISSING
use_quadratures: bool = False
@dataclass
class StopCriterionConf:
metric: Any = MISSING
min_delta: Any = MISSING
patience: int = MISSING
mode: str = MISSING
freq: int = MISSING
strict: bool = MISSING
@dataclass
class DefaultStopCriterion(StopCriterionConf):
metric: Any = None
min_delta: Any = None
patience: int = 50000
mode: str = "min"
freq: int = 1000
strict: bool = False
def register_training_configs() -> None:
cs = ConfigStore.instance()
cs.store(
group="training",
name="default_training",
node=DefaultTraining,
)
cs.store(
group="training",
name="variational_training",
node=VariationalTraining,
)
cs.store(
group="stop_criterion",
name="default_stop_criterion",
node=DefaultStopCriterion,
)
| modulus-sym-main | modulus/sym/hydra/training.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.interpolation import interpolation
import torch
import numpy as np
def test_interpolation():
# set device
device = "cuda" if torch.cuda.is_available() else "cpu"
# make context grid to do interpolation from
grid = [(-1, 2, 30), (-1, 2, 30), (-1, 2, 30)]
np_linspace = [np.linspace(x[0], x[1], x[2]) for x in grid]
np_mesh_grid = np.meshgrid(*np_linspace, indexing="ij")
np_mesh_grid = np.stack(np_mesh_grid, axis=0)
mesh_grid = torch.tensor(np_mesh_grid, dtype=torch.float32).to(device)
sin_grid = torch.sin(
mesh_grid[0:1, :, :] + mesh_grid[1:2, :, :] ** 2 + mesh_grid[2:3, :, :] ** 3
).to(device)
# make query points to evaluate on
nr_points = 100
query_points = (
torch.stack(
[
torch.linspace(0.0, 1.0, nr_points),
torch.linspace(0.0, 1.0, nr_points),
torch.linspace(0.0, 1.0, nr_points),
],
axis=-1,
)
.to(device)
.requires_grad_(True)
)
# compute interpolation
interpolation_types = [
"nearest_neighbor",
"linear",
"smooth_step_1",
"smooth_step_2",
"gaussian",
]
for i_type in interpolation_types:
# perform interpolation
computed_interpolation = interpolation(
query_points,
sin_grid,
grid=grid,
interpolation_type=i_type,
mem_speed_trade=False,
)
# compare to numpy
np_computed_interpolation = computed_interpolation.cpu().detach().numpy()
np_ground_truth = (
(
torch.sin(
query_points[:, 0:1]
+ query_points[:, 1:2] ** 2
+ query_points[:, 2:3] ** 3
)
)
.cpu()
.detach()
.numpy()
)
difference = np.linalg.norm(
(np_computed_interpolation - np_ground_truth) / nr_points
)
# verify
assert difference < 1e-2, "Test failed!"
| modulus-sym-main | test/test_interpolation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.sym.eq.derivatives import MeshlessFiniteDerivative
from modulus.sym.node import Node
from modulus.sym.key import Key
from modulus.sym.graph import Graph
class SineNet(torch.nn.Module):
def forward(self, inputs):
return {
"y": (inputs["w"] ** 3) * torch.sin(inputs["x"]),
"z": inputs["w"] * torch.cos(inputs["x"]),
}
class ParabolaNet(torch.nn.Module):
def forward(self, inputs):
return {
"p": (inputs["nu"] ** 3) + inputs["x"],
"q": 2 * inputs["z"],
}
def test_meshless_finite_deriv():
# Define sinisoidal function node
function_node = Node(
inputs=[Key("w"), Key("x")],
outputs=[Key("y"), Key("z")],
evaluate=SineNet(),
name="Test Node",
)
# Define finite derivative node
deriv = MeshlessFiniteDerivative.make_node(
node_model=function_node,
derivatives=[
Key("y", derivatives=[Key("x"), Key("w")]),
Key("y", derivatives=[Key("x")]),
Key("y", derivatives=[Key("w"), Key("w"), Key("w")]),
Key("z", derivatives=[Key("x"), Key("x")]),
Key("z", derivatives=[Key("x"), Key("x"), Key("x"), Key("x")]),
],
dx=0.01,
order=2,
max_batch_size=15,
)
inputs = {"x": torch.randn(5, 1).double(), "w": torch.randn(5, 1).double()}
inputs.update(function_node.evaluate(inputs)) # Forward to get y
outputs = deriv.evaluate(inputs)
assert torch.allclose(
outputs["y__x"].double(), (inputs["w"] ** 3) * torch.cos(inputs["x"]), atol=1e-3
), "First derivative test failed"
assert torch.allclose(
outputs["z__x__x"].double(), -inputs["w"] * torch.cos(inputs["x"]), atol=1e-3
), "Second derivative test failed"
assert torch.allclose(
outputs["y__x__w"].double(),
3 * inputs["w"] ** 2 * torch.cos(inputs["x"]),
atol=1e-3,
), "Mixed second derivative test failed"
assert torch.allclose(
outputs["y__w__w__w"].double(), 6 * torch.sin(inputs["x"]), atol=1e-3
), "Third derivative test failed"
assert torch.allclose(
outputs["z__x__x__x__x"].double(),
inputs["w"] * torch.cos(inputs["x"]),
atol=1e-3,
), "Forth derivative test failed"
# Testing forth order derivs
deriv = MeshlessFiniteDerivative.make_node(
node_model=function_node,
derivatives=[
Key("y", derivatives=[Key("x")]),
Key("z", derivatives=[Key("x"), Key("x")]),
],
dx=0.01,
order=4,
max_batch_size=20,
)
inputs = {"x": torch.randn(5, 1).double(), "w": torch.randn(5, 1).double()}
inputs.update(function_node.evaluate(inputs)) # Forward to get y
outputs = deriv.evaluate(inputs)
assert torch.allclose(
outputs["y__x"].double(), (inputs["w"] ** 3) * torch.cos(inputs["x"]), atol=1e-2
), "Forth order first derivative test failed"
assert torch.allclose(
outputs["z__x__x"].double(), -inputs["w"] * torch.cos(inputs["x"]), atol=1e-2
), "Forth order second derivative test failed"
# Multinode checks
function_node_2 = Node(
inputs=[Key("nu"), Key("w"), Key("z")],
outputs=[Key("p"), Key("q")],
evaluate=ParabolaNet(),
name="Test Node 2",
)
# Define finite derivative node
deriv = MeshlessFiniteDerivative.make_node(
node_model=Graph(
nodes=[function_node, function_node_2],
invar=[Key("w"), Key("x"), Key("nu")],
req_names=[Key("p"), Key("q")],
),
derivatives=[
Key("p", derivatives=[Key("nu")]),
Key("q", derivatives=[Key("x"), Key("w")]),
],
dx=0.01,
)
inputs = {
"x": torch.randn(5, 1).double(),
"w": torch.randn(5, 1).double(),
"nu": torch.randn(5, 1).double(),
}
outputs = deriv.evaluate(inputs)
assert torch.allclose(
outputs["p__nu"].double(), 3 * (inputs["nu"] ** 2), atol=1e-3
), "Multi-node first derivative test failed"
assert torch.allclose(
outputs["q__x__w"].double(), 2 * -torch.sin(inputs["x"]), atol=1e-3
), "Multi-node second derivative test failed"
# Testing callable dx
def dx_func(count: int):
# First pass should be inaccurate
if count == 1:
return 10.0
else:
return 0.01
deriv = MeshlessFiniteDerivative.make_node(
node_model=function_node,
derivatives=[
Key("y", derivatives=[Key("x")]),
],
dx=dx_func,
order=2,
)
inputs = {"x": torch.randn(5, 1).double(), "w": torch.randn(5, 1).double()}
inputs.update(function_node.evaluate(inputs)) # Forward to get y
outputs_1 = deriv.evaluate(inputs) # Inaccruate pass
outputs_2 = deriv.evaluate(inputs) # Accruate pass
assert not torch.allclose(
outputs_1["y__x"].double(),
(inputs["w"] ** 3) * torch.cos(inputs["x"]),
atol=1e-3,
), "Callable dx first derivative test failed"
assert torch.allclose(
outputs_2["y__x"].double(),
(inputs["w"] ** 3) * torch.cos(inputs["x"]),
atol=1e-3,
), "Callable dx first derivative test failed"
class GradModel(torch.nn.Module):
def forward(self, inputs):
return {"u": torch.cos(inputs["x"]), "v": torch.sin(inputs["y"])}
def test_meshless_finite_deriv_grads():
# Testing gradient calcs
# TODO: Grad tests for every grad
model = GradModel()
dx = 0.01
deriv = MeshlessFiniteDerivative.make_node(
node_model=model,
derivatives=[
Key("u", derivatives=[Key("x")]),
Key("v", derivatives=[Key("y"), Key("y")]),
],
dx=dx,
)
# == First derivative test ==
inputs_mfd = {"x": torch.randn(5, 1).double(), "y": torch.randn(5, 1).double()}
inputs_mfd["x"].requires_grad = True
inputs_mfd["y"].requires_grad = True
inputs_mfd.update(model.forward(inputs_mfd))
outputs = deriv.evaluate(inputs_mfd)
loss = outputs["u__x"].sum()
loss.backward()
# Auto diff calc
inputs_auto = inputs_mfd["x"].detach().clone()
inputs_auto.requires_grad = True
inputs_up1 = torch.cos(inputs_auto + dx)
inputs_um1 = torch.cos(inputs_auto - dx)
grad = (inputs_up1 - inputs_um1) / (2.0 * dx)
loss = grad.sum()
loss.backward()
assert torch.allclose(
inputs_auto.grad,
inputs_mfd["x"].grad,
atol=1e-3,
), "First derivative gradient test failed"
# == Second derivative test ==
loss = outputs["v__y__y"].sum()
loss.backward()
# Auto diff calc
inputs_auto = inputs_mfd["y"].detach().clone()
inputs_auto.requires_grad = True
inputs = torch.sin(inputs_auto)
inputs_up1 = torch.sin(inputs_auto + dx)
inputs_um1 = torch.sin(inputs_auto - dx)
grad = (inputs_up1 - 2 * inputs + inputs_um1) / (dx * dx)
loss = grad.sum()
loss.backward()
assert torch.allclose(
inputs_auto.grad,
inputs_mfd["y"].grad,
atol=1e-3,
), "Second derivative gradient test failed"
if __name__ == "__main__":
test_meshless_finite_deriv()
test_meshless_finite_deriv_grads()
| modulus-sym-main | test/test_meshless_finite_dirv.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import GPUtil
import os
import pytest
import argparse
from pytest import ExitCode
from termcolor import colored
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--testdir", default=".")
args = parser.parse_args()
os.system("nvidia-smi")
availible_gpus = GPUtil.getAvailable(limit=8)
if len(availible_gpus) == 0:
print(colored(f"No free GPUs found on DGX 4850", "red"))
raise RuntimeError()
else:
os.environ["CUDA_VISIBLE_DEVICES"] = str(availible_gpus[-1])
print(colored(f"=== Using GPU {availible_gpus[-1]} ===", "blue"))
retcode = pytest.main(["-x", args.testdir])
if ExitCode.OK == retcode:
print(colored("UNIT TESTS PASSED! :D", "green"))
else:
print(colored("UNIT TESTS FAILED!", "red"))
raise ValueError(f"Pytest returned error code {retcode}")
| modulus-sym-main | test/run_tests.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-sym-main | test/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from modulus.sym.models.layers import SpectralConv1d, SpectralConv2d, SpectralConv3d
class SpectralConv1d_old(nn.Module):
def __init__(self, in_channels: int, out_channels: int, modes1: int):
super(SpectralConv1d_old, self).__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat)
)
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
bsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
bsize,
self.out_channels,
x.size(-1) // 2 + 1,
device=x.device,
dtype=torch.cfloat,
)
out_ft[:, :, : self.modes1] = self.compl_mul1d(
x_ft[:, :, : self.modes1], self.weights1
)
# Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
class SpectralConv2d_old(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super(SpectralConv2d_old, self).__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(
in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat
)
)
self.weights2 = nn.Parameter(
self.scale
* torch.rand(
in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat
)
)
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d(
x_ft[:, :, : self.modes1, : self.modes2], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d(
x_ft[:, :, -self.modes1 :, : self.modes2], self.weights2
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class SpectralConv3d_old(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super(SpectralConv3d_old, self).__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights2 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights3 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights4 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
# Complex multiplication
def compl_mul3d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2
)
out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3
)
out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
def test_spectral_convs():
in_channels = 2
out_channels = 3
modes = 4
sc1d_old = SpectralConv1d_old(in_channels, out_channels, modes)
# Init weights
sc1d_old.weights1.data = torch.complex(
torch.randn(in_channels, out_channels, modes),
torch.randn(in_channels, out_channels, modes),
)
sc1d = SpectralConv1d(in_channels, out_channels, modes)
# Copy to new model
sc1d.weights1.data = torch.stack(
[sc1d_old.weights1.real, sc1d_old.weights1.imag], dim=-1
)
inputs = torch.randn(5, in_channels, 32)
# Forward pass of spectral conv
output_old = sc1d_old(inputs)
output = sc1d(inputs)
assert torch.allclose(
output_old, output, rtol=1e-3, atol=1e-3
), "Spectral conv 1d mismatch"
sc2d_old = SpectralConv2d_old(in_channels, out_channels, modes, modes)
sc2d_old.weights1.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes),
torch.randn(in_channels, out_channels, modes, modes),
)
sc2d_old.weights2.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes),
torch.randn(in_channels, out_channels, modes, modes),
)
sc2d = SpectralConv2d(in_channels, out_channels, modes, modes)
# Copy to new model
sc2d.weights1.data = torch.stack(
[sc2d_old.weights1.real, sc2d_old.weights1.imag], dim=-1
)
sc2d.weights2.data = torch.stack(
[sc2d_old.weights2.real, sc2d_old.weights2.imag], dim=-1
)
inputs = torch.randn(5, in_channels, 32, 32)
# Forward pass of spectral conv
output_old = sc2d_old(inputs)
output = sc2d(inputs)
assert torch.allclose(
output_old, output, rtol=1e-3, atol=1e-3
), "Spectral conv 2d mismatch"
sc3d_old = SpectralConv3d_old(in_channels, out_channels, modes, modes, modes)
sc3d_old.weights1.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes, modes),
torch.randn(in_channels, out_channels, modes, modes, modes),
)
sc3d_old.weights2.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes, modes),
torch.randn(in_channels, out_channels, modes, modes, modes),
)
sc3d_old.weights3.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes, modes),
torch.randn(in_channels, out_channels, modes, modes, modes),
)
sc3d_old.weights4.data = torch.complex(
torch.randn(in_channels, out_channels, modes, modes, modes),
torch.randn(in_channels, out_channels, modes, modes, modes),
)
sc3d = SpectralConv3d(in_channels, out_channels, modes, modes, modes)
# Copy to new model
sc3d.weights1.data = torch.stack(
[sc3d_old.weights1.real, sc3d_old.weights1.imag], dim=-1
)
sc3d.weights2.data = torch.stack(
[sc3d_old.weights2.real, sc3d_old.weights2.imag], dim=-1
)
sc3d.weights3.data = torch.stack(
[sc3d_old.weights3.real, sc3d_old.weights3.imag], dim=-1
)
sc3d.weights4.data = torch.stack(
[sc3d_old.weights4.real, sc3d_old.weights4.imag], dim=-1
)
inputs = torch.randn(5, in_channels, 32, 32, 32)
# Forward pass of spectral conv
output_old = sc3d_old(inputs)
output = sc3d(inputs)
assert torch.allclose(
output_old, output, rtol=1e-3, atol=1e-3
), "Spectral conv 3d mismatch"
test_spectral_convs()
| modulus-sym-main | test/test_spectral_convs.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol
import numpy as np
from pathlib import Path
from modulus.sym.geometry.tessellation import Tessellation
from modulus.sym.geometry import Parameterization
dir_path = Path(__file__).parent
def test_tesselated_geometry():
# read in cube file
cube = Tessellation.from_stl(dir_path / "stls/cube.stl")
# sample boundary
boundary = cube.sample_boundary(
1000, parameterization=Parameterization({Symbol("fake_param"): 1})
)
# sample interior
interior = cube.sample_interior(
1000, parameterization=Parameterization({Symbol("fake_param"): 1})
)
# check if surface area is right for boundary
assert np.isclose(np.sum(boundary["area"]), 6.0)
# check if volume is right for interior
assert np.isclose(np.sum(interior["area"]), 1.0)
| modulus-sym-main | test/test_tesselated_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from modulus.sym.utils.sympy import SympyToTorch
import sympy
def test_sympy_node():
# Define SymPy symbol and expression
x = sympy.Symbol("x")
y = sympy.Symbol("y")
expr = sympy.Max(sympy.sin(x), sympy.cos(y))
# Get numpy reference
x_np = np.random.random(10)
y_np = np.random.random(10)
expr_np = np.maximum(np.sin(x_np), np.cos(y_np))
sn = SympyToTorch(expr, "node")
# Choose device to run on and copy data from numpy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
x_th = torch.tensor(x_np, dtype=torch.float32, device=device)
y_th = torch.tensor(y_np, dtype=torch.float32, device=device)
assert np.allclose(x_th.cpu().detach().numpy(), x_np)
assert np.allclose(y_th.cpu().detach().numpy(), y_np)
# Run the compiled function on input tensors
var = {"x": x_th, "y": y_th}
expr_th = sn(var)
expr_th_out = expr_th["node"].cpu().detach().numpy()
assert np.allclose(expr_th_out, expr_np, rtol=1.0e-3), "SymPy printer test failed!"
if __name__ == "__main__":
test_sympy_node()
| modulus-sym-main | test/test_sympy_node.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from typing import List, Optional
from modulus.sym.key import Key
from modulus.sym.constants import diff
from modulus.sym.eq.derivatives import Derivative
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y, z):
return (
1.5 * x * x + torch.sin(y) + torch.exp(z),
2 * x * x + torch.cos(y) + torch.exp(-z),
1.5 * x * x + torch.sin(y) + torch.exp(z),
2 * x * x + torch.cos(y) + torch.exp(-z),
)
def validate_gradients(
x, y, z, dudx, dudy, dudz, dvdx, dvdy, dvdz, dwdx, dwdy, dwdz, dpdx, dpdy, dpdz
):
# Check against exact solution
assert torch.allclose(dudx, 3 * x), "x derivative of u failed"
assert torch.allclose(dudy, torch.cos(y)), "y derivative of u failed"
assert torch.allclose(dudz, torch.exp(z)), "z derivative of u failed"
assert torch.allclose(dvdx, 4 * x), "x derivative of v failed"
assert torch.allclose(dvdy, -torch.sin(y)), "y derivative of v failed"
assert torch.allclose(dvdz, -torch.exp(-z)), "z derivative of v failed"
assert torch.allclose(dwdx, 3 * x), "x derivative of w failed"
assert torch.allclose(dwdy, torch.cos(y)), "y derivative of w failed"
assert torch.allclose(dwdz, torch.exp(z)), "z derivative of w failed"
assert torch.allclose(dpdx, 4 * x), "x derivative of p failed"
assert torch.allclose(dpdy, -torch.sin(y)), "y derivative of p failed"
assert torch.allclose(dpdz, -torch.exp(-z)), "z derivative of p failed"
def test_derivative_node():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set up input coordinates
batch_size = 128
x = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
y = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
z = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
# Instantiate the model and compute outputs
model = torch.jit.script(Model()).to(device)
u, v, w, p = model(x, y, z)
input_vars = [
Key.from_str("x"),
Key.from_str("y"),
Key.from_str("z"),
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
]
derivs = [
Key.from_str(diff("u", "x")),
Key.from_str(diff("u", "y")),
Key.from_str(diff("u", "z")),
Key.from_str(diff("v", "x")),
Key.from_str(diff("v", "y")),
Key.from_str(diff("v", "z")),
Key.from_str(diff("w", "x")),
Key.from_str(diff("w", "y")),
Key.from_str(diff("w", "z")),
Key.from_str(diff("p", "x")),
Key.from_str(diff("p", "y")),
Key.from_str(diff("p", "z")),
]
dnode = Derivative.make_node(input_vars, derivs, jit=False)
input_dict = dict(zip((str(v) for v in input_vars), [x, y, z, u, v, w, p]))
derivs_dict = dnode.evaluate(input_dict)
validate_gradients(x, y, z, *(derivs_dict[str(d)] for d in derivs))
if __name__ == "__main__":
test_derivative_node()
| modulus-sym-main | test/test_derivatives.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.sym.loss import (
PointwiseLossNorm,
DecayedPointwiseLossNorm,
IntegralLossNorm,
DecayedIntegralLossNorm,
)
def test_loss_norm():
# make pointwise test values
invar = {"x": torch.arange(10)[:, None], "area": torch.ones(10)[:, None] / 10}
pred_outvar = {"u": torch.arange(10)[:, None]}
true_outvar = {"u": torch.arange(10)[:, None] + 2}
lambda_weighting = {"u": torch.ones(10)[:, None]}
# Test Pointwise l2
loss = PointwiseLossNorm(2)
l = loss.forward(invar, pred_outvar, true_outvar, lambda_weighting, step=0)
assert torch.isclose(l["u"], torch.tensor(4.0))
# Test Pointwise l1
loss = PointwiseLossNorm(1)
l = loss.forward(invar, pred_outvar, true_outvar, lambda_weighting, step=0)
assert torch.isclose(l["u"], torch.tensor(2.0))
# Test Decayed Pointwise l2
loss = DecayedPointwiseLossNorm(2, 1, decay_steps=1000, decay_rate=0.5)
l = loss.forward(invar, pred_outvar, true_outvar, lambda_weighting, step=0)
assert torch.isclose(l["u"], torch.tensor(4.0))
l = loss.forward(invar, pred_outvar, true_outvar, lambda_weighting, step=1000)
assert torch.isclose(l["u"], torch.tensor(2.82842712))
l = loss.forward(invar, pred_outvar, true_outvar, lambda_weighting, step=1000000)
assert torch.isclose(l["u"], torch.tensor(2.0))
# make Integral test values
list_invar = [
{"x": torch.arange(10)[:, None], "area": torch.ones(10)[:, None] / 10}
]
list_pred_outvar = [{"u": torch.arange(10)[:, None]}]
list_true_outvar = [{"u": torch.tensor(2.5)[None, None]}]
list_lambda_weighting = [{"u": torch.ones(1)[None, None]}]
# Test Integral l2
loss = IntegralLossNorm(2)
l = loss.forward(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting, step=0
)
assert torch.isclose(l["u"], torch.tensor(4.0))
# Test Integral l1
loss = IntegralLossNorm(1)
l = loss.forward(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting, step=0
)
assert torch.isclose(l["u"], torch.tensor(2.0))
# Test Decayed Integral l2
loss = DecayedIntegralLossNorm(2, 1, decay_steps=1000, decay_rate=0.5)
l = loss.forward(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting, step=0
)
assert torch.isclose(l["u"], torch.tensor(4.0))
l = loss.forward(
list_invar, list_pred_outvar, list_true_outvar, list_lambda_weighting, step=1000
)
assert torch.isclose(l["u"], torch.tensor(2.82842712))
l = loss.forward(
list_invar,
list_pred_outvar,
list_true_outvar,
list_lambda_weighting,
step=1000000,
)
assert torch.isclose(l["u"], torch.tensor(2.0))
| modulus-sym-main | test/test_loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from typing import Dict, List, Optional
from modulus.sym.key import Key
from modulus.sym.constants import diff
from modulus.sym.node import Node
from modulus.sym.graph import Graph
from modulus.sym.eq.derivatives import MeshlessFiniteDerivative
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
x, y, z = inputs["x"], inputs["y"], inputs["z"]
return {
"u": 1.5 * x * x + torch.sin(y) + torch.exp(z),
"v": 2 * x * x + torch.cos(y) + torch.exp(-z),
"w": 1.5 * x * x + torch.sin(y) + torch.exp(z),
"p": 2 * x * x + torch.cos(y) + torch.exp(-z),
}
class Loss(torch.nn.Module):
def __init__(self):
super().__init__()
self.input_keys: List[str] = [diff("u", "x"), diff("v", "y"), diff("w", "z")]
def forward(self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
divergence = (
inputs[self.input_keys[0]]
+ inputs[self.input_keys[1]]
+ inputs[self.input_keys[2]]
)
return {"divergence_loss": torch.square(divergence).mean()}
def validate_divergence_loss(x, y, z, divergence_loss, rtol=1e-5, atol=1e-8):
dudx = 3 * x
dvdy = -torch.sin(y)
dwdz = torch.exp(z)
divergence_loss_exact = torch.square(dudx + dvdy + dwdz).mean()
assert torch.allclose(divergence_loss, divergence_loss_exact, rtol, atol)
def test_graph():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set up input coordinates
batch_size = 128
x = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
y = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
z = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
# Instantiate the model and compute outputs
model = torch.jit.script(Model()).to(device)
model_node = Node(["x", "y", "z"], ["u", "v", "w", "p"], model, name="Model")
loss = torch.jit.script(Loss()).to(device)
loss_node = Node(
[diff("u", "x"), diff("v", "y"), diff("w", "z")],
["divergence_loss"],
loss,
name="Loss",
)
nodes = [model_node, loss_node]
input_vars = [Key.from_str("x"), Key.from_str("y"), Key.from_str("z")]
output_vars = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
Key.from_str("divergence_loss"),
]
graph = Graph(nodes, input_vars, output_vars)
input_dict = dict(zip((str(v) for v in input_vars), [x, y, z]))
output_dict = graph(input_dict)
validate_divergence_loss(x, y, z, output_dict["divergence_loss"])
def test_graph_no_loss_node():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set up input coordinates
batch_size = 128
x = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
y = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
z = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
# Instantiate the model and compute outputs
model = torch.jit.script(Model()).to(device)
model_node = Node(["x", "y", "z"], ["u", "v", "w", "p"], model, name="Model")
loss = torch.jit.script(Loss()).to(device)
loss_node = Node(
[diff("u", "x"), diff("v", "y"), diff("w", "z")],
["divergence_loss"],
loss,
name="Loss",
)
nodes = [model_node]
input_vars = [Key.from_str("x"), Key.from_str("y"), Key.from_str("z")]
output_vars = [
Key.from_str("u__x"),
Key.from_str("v__y"),
Key.from_str("w__z"),
]
graph = Graph(nodes, input_vars, output_vars)
input_dict = dict(zip((str(v) for v in input_vars), [x, y, z]))
output_dict = graph(input_dict)
# Calc loss manually
loss = Loss()
output_dict.update(loss(output_dict))
validate_divergence_loss(x, y, z, output_dict["divergence_loss"])
def test_mfd_graph():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Set up input coordinates
batch_size = 32
x = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
y = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
z = torch.rand(batch_size, 1, dtype=torch.float32, requires_grad=True).to(device)
# Instantiate the model and compute outputs
model = torch.jit.script(Model()).to(device)
model_node = Node(["x", "y", "z"], ["u", "v", "w", "p"], model, name="Model")
loss = torch.jit.script(Loss()).to(device)
loss_node = Node(
[diff("u", "x"), diff("v", "y"), diff("w", "z")],
["divergence_loss"],
loss,
name="Loss",
)
nodes = [model_node, loss_node]
input_vars = [Key.from_str("x"), Key.from_str("y"), Key.from_str("z")]
output_vars = [
Key.from_str("u"),
Key.from_str("v"),
Key.from_str("w"),
Key.from_str("p"),
Key.from_str("divergence_loss"),
]
# Test meshless finite derivative node in graph
mfd_node = MeshlessFiniteDerivative.make_node(
node_model=model,
derivatives=[
Key("u", derivatives=[Key("x")]),
Key("v", derivatives=[Key("y")]),
Key("w", derivatives=[Key("z")]),
],
dx=0.001,
)
graph = Graph(nodes + [mfd_node], input_vars, output_vars)
input_dict = dict(zip((str(v) for v in input_vars), [x, y, z]))
output_dict = graph(input_dict)
# Need to raise allclose atol here because finite diff is approximate
validate_divergence_loss(x, y, z, output_dict["divergence_loss"], atol=1e-3)
if __name__ == "__main__":
test_graph()
test_graph_no_loss_node()
test_mfd_graph()
| modulus-sym-main | test/test_graph.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pathlib import Path
from modulus.sym.geometry import Parameterization, Parameter, Bounds
from modulus.sym.geometry.primitives_1d import Point1D, Line1D
from modulus.sym.geometry.primitives_2d import (
Line,
Channel2D,
Rectangle,
Circle,
Triangle,
Ellipse,
Polygon,
)
from modulus.sym.geometry.primitives_3d import (
Plane,
Channel,
Box,
Sphere,
Cylinder,
Torus,
Cone,
TriangularPrism,
Tetrahedron,
IsoTriangularPrism,
ElliCylinder,
)
from modulus.sym.geometry.tessellation import Tessellation
from modulus.sym.utils.io.vtk import var_to_polyvtk
dir_path = Path(__file__).parent
def check_geometry(
geo,
criteria=None,
parameterization=None,
bounds=None,
boundary_area=None,
interior_area=None,
max_sdf=None,
compute_sdf_derivatives=True,
check_bounds=None,
debug=False,
):
if debug:
print("checking geo: " + str(geo))
# check boundary
if boundary_area is not None:
boundary = geo.sample_boundary(
1000, criteria=criteria, parameterization=parameterization
)
if debug:
var_to_polyvtk(boundary, "boundary.vtp")
assert np.isclose(np.sum(boundary["area"]), boundary_area, rtol=1e-1)
# check interior
if interior_area is not None:
interior = geo.sample_interior(
1000,
criteria=criteria,
parameterization=parameterization,
bounds=bounds,
compute_sdf_derivatives=compute_sdf_derivatives,
)
if debug:
var_to_polyvtk(interior, "interior.vtp")
assert np.isclose(np.sum(interior["area"]), interior_area, rtol=1e-1)
if max_sdf is not None:
assert np.max(interior["sdf"]) < max_sdf
if compute_sdf_derivatives:
sdf_diff = np.concatenate(
[interior["sdf__" + d] for d in geo.dims], axis=-1
)
assert np.all(
np.isclose(np.mean(np.linalg.norm(sdf_diff, axis=1)), 1.0, rtol=1e-1)
)
def test_primitives():
# point 1d
g = Point1D(1)
check_geometry(g, boundary_area=1)
# line 1d
g = Line1D(1, 2.5)
check_geometry(g, boundary_area=2, interior_area=1.5, max_sdf=0.75)
# line
g = Line((1, 0), (1, 2.5), normal=1)
check_geometry(g, boundary_area=2.5)
# channel
g = Channel2D((0, 0), (2, 3))
check_geometry(g, boundary_area=4, interior_area=6, max_sdf=1.5)
# rectangle
g = Rectangle((0, 0), (2, 3))
check_geometry(g, boundary_area=10, interior_area=6, max_sdf=1.0)
# circle
g = Circle((0, 2), 2)
check_geometry(g, boundary_area=4 * np.pi, interior_area=4 * np.pi, max_sdf=2.0)
# triangle
g = Triangle((0, 0.5), 1, 1)
check_geometry(
g,
boundary_area=1.0 + 2 * np.sqrt(0.5**2 + 1.0),
interior_area=0.5,
max_sdf=0.30897,
)
# ellipse
g = Ellipse((0, 2), 1, 2)
check_geometry(g, boundary_area=9.688448, interior_area=2 * np.pi, max_sdf=1.0)
# polygon
g = Polygon([(0, 0), (2, 0), (2, 1), (1, 2), (0, 1)])
check_geometry(g, boundary_area=4 + 2 * np.sqrt(2), interior_area=3.0)
# plane
g = Plane((0, -1, 0), (0, 1, 2))
check_geometry(g, boundary_area=4)
# channel
g = Channel((0, 0, -1), (2, 3, 4))
check_geometry(g, boundary_area=32, interior_area=30, max_sdf=1.5)
# box
g = Box((0, 0, -1), (2, 3, 4))
check_geometry(g, boundary_area=62, interior_area=30, max_sdf=1)
# sphere
g = Sphere((0, 1, 2), 2)
check_geometry(g, boundary_area=16 * np.pi, interior_area=np.pi * 8 * 4 / 3.0)
# cylinder
g = Cylinder((0, 1, 2), 2, 3)
check_geometry(g, boundary_area=20 * np.pi, interior_area=12 * np.pi, max_sdf=1.5)
# torus
g = Torus((0, 1, 2), 2, 1)
check_geometry(
g, boundary_area=8 * np.pi**2, interior_area=4 * np.pi**2, max_sdf=1
)
"""
# cone
g = Cone((0, 1, 2), 1, 3)
checks.append((g, np.pi*(1+np.sqrt(10)), np.pi, 0, None))
# triangular prism
g = TriangularPrism((0, 1, 2), 1, 2)
checks.append((g, 6*np.sqrt(2) + 1, 2, 0, None))
# tetrahedron
g = Tetrahedron((0, 1, 2), 1)
checks.append((g, np.sqrt(3), 1.0/(6.0*np.sqrt(2)), 0, None))
"""
# box scale
g = Box((0, 0, 0), (1, 2, 3))
g = g.scale(2)
check_geometry(g, boundary_area=88, interior_area=48, max_sdf=1)
# box translate
g = Box((0, 0, 0), (1, 2, 3))
g = g.translate((0, 1, 2))
check_geometry(g, boundary_area=22, interior_area=6, max_sdf=0.5)
# box rotate
g = Box((0, 0, 0), (1, 2, 3))
g = g.rotate(np.pi / 4.0, axis="x", center=(10, -1, 20))
g = g.rotate(np.pi / 4.0, axis="y")
g = g.rotate(np.pi / 4.0, axis="z", center=(10, -10, 20))
check_geometry(g, boundary_area=22, interior_area=6, max_sdf=0.5)
# repeat operation
g = Sphere((0, 0, 0), 0.5)
g = g.repeat(1.5, [-1, -1, -1], [3, 3, 3])
check_geometry(
g,
boundary_area=np.pi * 5**3,
interior_area=(1.0 / 6.0) * np.pi * 5**3,
max_sdf=0.5,
)
# tessellated geometry
g = Tessellation.from_stl(dir_path / "stls/cube.stl")
check_geometry(g, boundary_area=6, interior_area=1.0, max_sdf=0.5)
# tessellated with primitives geometry
g = Tessellation.from_stl(dir_path / "stls/cube.stl") - Box(
(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)
)
check_geometry(g, boundary_area=6, interior_area=0.875)
# Integral plane
sdf_fn = Tessellation.from_stl(dir_path / "stls/cube.stl") - Box(
(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)
)
def _interior_criteria(sdf_fn):
def interior_criteria(invar, params):
sdf = sdf_fn.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
return interior_criteria
g = Plane((0.25, 0, 0), (0.25, 1, 1))
check_geometry(g, boundary_area=0.75, criteria=_interior_criteria(sdf_fn))
# test parameterization
radius = Parameter("radius")
angle = Parameter("angle")
g = Circle((0, 0, 0), radius, parameterization=Parameterization({radius: (1, 2)}))
g = Rectangle((-2, -2, -2), (2, 2, 2)) - g
g = g.rotate(
angle=angle, parameterization=Parameterization({angle: (0, 2.0 * np.pi)})
)
check_geometry(g, boundary_area=16 + 3 * np.pi)
check_geometry(
g,
boundary_area=16 + 2 * np.pi,
parameterization=Parameterization({radius: 1, angle: np.pi}),
)
test_primitives()
| modulus-sym-main | test/test_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from modulus.sym.utils.sympy import torch_lambdify
import sympy
def test_lambdify():
# Define SymPy symbol and expression
x = sympy.Symbol("x")
y = sympy.Symbol("y")
expr = sympy.Max(sympy.sin(x), sympy.cos(y))
# Get numpy reference
x_np = np.random.random(10)
y_np = np.random.random(10)
expr_np = np.maximum(np.sin(x_np), np.cos(y_np))
# Compile SymPy expression to the framework
lam_tf = torch_lambdify(expr, ["x", "y"])
# Choose device to run on and copy data from numpy
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
x_th = torch.tensor(x_np, dtype=torch.float32, device=device)
y_th = torch.tensor(y_np, dtype=torch.float32, device=device)
assert np.allclose(x_th.cpu().detach().numpy(), x_np)
assert np.allclose(y_th.cpu().detach().numpy(), y_np)
# Run the compiled function on input tensors
expr_th = lam_tf([x_th, y_th])
expr_th_out = expr_th.cpu().detach().numpy()
assert np.allclose(expr_th_out, expr_np, rtol=1.0e-3), "SymPy printer test failed!"
if __name__ == "__main__":
test_lambdify()
| modulus-sym-main | test/test_sympy_printer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.eq.pdes.basic import GradNormal, Curl
import torch
import numpy as np
import os
def test_normal_gradient_equation():
# test data for normal gradient
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
normal_z = np.random.rand(1024, 1)
u = np.exp(2 * x + y + z + t)
u__x = 2 * np.exp(2 * x + y + z + t)
u__y = 1 * np.exp(2 * x + y + z + t)
u__z = 1 * np.exp(2 * x + y + z + t)
normal_gradient_u_true = normal_x * u__x + normal_y * u__y + normal_z * u__z
normal_gradient_eq = GradNormal(T="u", dim=3, time=True)
evaluations = normal_gradient_eq.make_nodes()[0].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"u__y": torch.tensor(u__y, dtype=torch.float32),
"u__z": torch.tensor(u__z, dtype=torch.float32),
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
}
)
normal_gradient_u_eval_pred = evaluations["normal_gradient_u"].numpy()
# verify PDE computation
assert np.allclose(
normal_gradient_u_eval_pred, normal_gradient_u_true
), "Test Failed!"
def test_curl():
# test data for curl equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
a = np.exp(2 * x + y + z)
b = np.exp(x + 2 * y + z)
c = np.exp(x + y + 2 * z)
a__x = 2 * np.exp(2 * x + y + z)
a__y = 1 * np.exp(2 * x + y + z)
a__z = 1 * np.exp(2 * x + y + z)
b__x = 1 * np.exp(x + 2 * y + z)
b__y = 2 * np.exp(x + 2 * y + z)
b__z = 1 * np.exp(x + 2 * y + z)
c__x = 1 * np.exp(x + y + 2 * z)
c__y = 1 * np.exp(x + y + 2 * z)
c__z = 2 * np.exp(x + y + 2 * z)
u_true = c__y - b__z
v_true = a__z - c__x
w_true = b__x - a__y
curl_eq = Curl(("a", "b", "c"), ("u", "v", "w"))
evaluations_u = curl_eq.make_nodes()[0].evaluate(
{
"c__y": torch.tensor(c__y, dtype=torch.float32),
"b__z": torch.tensor(b__z, dtype=torch.float32),
}
)
evaluations_v = curl_eq.make_nodes()[1].evaluate(
{
"a__z": torch.tensor(a__z, dtype=torch.float32),
"c__x": torch.tensor(c__x, dtype=torch.float32),
}
)
evaluations_w = curl_eq.make_nodes()[2].evaluate(
{
"b__x": torch.tensor(b__x, dtype=torch.float32),
"a__y": torch.tensor(a__y, dtype=torch.float32),
}
)
u_eval_pred = evaluations_u["u"].numpy()
v_eval_pred = evaluations_v["v"].numpy()
w_eval_pred = evaluations_w["w"].numpy()
# verify PDE computation
assert np.allclose(u_eval_pred, u_true, atol=1e-4), "Test Failed!"
assert np.allclose(v_eval_pred, v_true, atol=1e-4), "Test Failed!"
assert np.allclose(w_eval_pred, w_true, atol=1e-4), "Test Failed!"
if __name__ == "__main__":
test_normal_gradient_equation()
test_curl()
| modulus-sym-main | test/test_pdes/test_basic.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
import torch
import numpy as np
import os
def test_navier_stokes_equation():
# test data for navier stokes equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
u = np.exp(2 * x + y + z + t)
v = np.exp(x + 2 * y + z + t)
w = np.exp(x + y + 2 * z + t)
p = np.exp(x + y + z + t)
rho = 1.0
nu = 0.2
u__t = 1 * np.exp(2 * x + y + z + t)
u__x = 2 * np.exp(2 * x + y + z + t)
u__y = 1 * np.exp(2 * x + y + z + t)
u__z = 1 * np.exp(2 * x + y + z + t)
u__x__x = 2 * 2 * np.exp(2 * x + y + z + t)
u__y__y = 1 * 1 * np.exp(2 * x + y + z + t)
u__z__z = 1 * 1 * np.exp(2 * x + y + z + t)
u__x__y = 1 * 2 * np.exp(2 * x + y + z + t)
u__x__z = 1 * 2 * np.exp(2 * x + y + z + t)
u__y__z = 1 * 1 * np.exp(2 * x + y + z + t)
u__y__x = u__x__y
u__z__x = u__x__z
u__z__y = u__y__z
v__t = 1 * np.exp(x + 2 * y + z + t)
v__x = 1 * np.exp(x + 2 * y + z + t)
v__y = 2 * np.exp(x + 2 * y + z + t)
v__z = 1 * np.exp(x + 2 * y + z + t)
v__x__x = 1 * 1 * np.exp(x + 2 * y + z + t)
v__y__y = 2 * 2 * np.exp(x + 2 * y + z + t)
v__z__z = 1 * 1 * np.exp(x + 2 * y + z + t)
v__x__y = 2 * 1 * np.exp(x + 2 * y + z + t)
v__x__z = 1 * 1 * np.exp(x + 2 * y + z + t)
v__y__z = 1 * 2 * np.exp(x + 2 * y + z + t)
v__y__x = v__x__y
v__z__x = v__x__z
v__z__y = v__y__z
w__t = 1 * np.exp(x + y + 2 * z + t)
w__x = 1 * np.exp(x + y + 2 * z + t)
w__y = 1 * np.exp(x + y + 2 * z + t)
w__z = 2 * np.exp(x + y + 2 * z + t)
w__x__x = 1 * 1 * np.exp(x + y + 2 * z + t)
w__y__y = 1 * 1 * np.exp(x + y + 2 * z + t)
w__z__z = 2 * 2 * np.exp(x + y + 2 * z + t)
w__x__y = 1 * 1 * np.exp(x + y + 2 * z + t)
w__x__z = 2 * 1 * np.exp(x + y + 2 * z + t)
w__y__z = 2 * 1 * np.exp(x + y + 2 * z + t)
w__y__x = w__x__y
w__z__x = w__x__z
w__z__y = w__y__z
p__x = 1 * np.exp(x + y + z + t)
p__y = 1 * np.exp(x + y + z + t)
p__z = 1 * np.exp(x + y + z + t)
continuity_equation_true = 0 + rho * u__x + rho * v__y + rho * w__z
momentum_x_equation_true = (
rho * u__t
+ u * rho * u__x
+ v * rho * u__y
+ w * rho * u__z
+ p__x
- rho * nu * u__x__x
- rho * nu * u__y__y
- rho * nu * u__z__z
)
momentum_y_equation_true = (
rho * v__t
+ u * rho * v__x
+ v * rho * v__y
+ w * rho * v__z
+ p__y
- rho * nu * v__x__x
- rho * nu * v__y__y
- rho * nu * v__z__z
)
momentum_z_equation_true = (
rho * w__t
+ u * rho * w__x
+ v * rho * w__y
+ w * rho * w__z
+ p__z
- rho * nu * w__x__x
- rho * nu * w__y__y
- rho * nu * w__z__z
)
navier_stokes_eq = NavierStokes(nu=nu, rho=rho, dim=3, time=True)
evaluations_continuity = navier_stokes_eq.make_nodes()[0].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
}
)
evaluations_momentum_x = navier_stokes_eq.make_nodes()[1].evaluate(
{
"u__t": torch.tensor(u__t, dtype=torch.float32),
"u__x": torch.tensor(u__x, dtype=torch.float32),
"u__y": torch.tensor(u__y, dtype=torch.float32),
"u__z": torch.tensor(u__z, dtype=torch.float32),
"u__x__x": torch.tensor(u__x__x, dtype=torch.float32),
"u__y__y": torch.tensor(u__y__y, dtype=torch.float32),
"u__z__z": torch.tensor(u__z__z, dtype=torch.float32),
"p__x": torch.tensor(p__x, dtype=torch.float32),
"u": torch.tensor(u, dtype=torch.float32),
"v": torch.tensor(v, dtype=torch.float32),
"w": torch.tensor(w, dtype=torch.float32),
}
)
evaluations_momentum_y = navier_stokes_eq.make_nodes()[2].evaluate(
{
"v__t": torch.tensor(v__t, dtype=torch.float32),
"v__x": torch.tensor(v__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"v__z": torch.tensor(v__z, dtype=torch.float32),
"v__x__x": torch.tensor(v__x__x, dtype=torch.float32),
"v__y__y": torch.tensor(v__y__y, dtype=torch.float32),
"v__z__z": torch.tensor(v__z__z, dtype=torch.float32),
"p__y": torch.tensor(p__y, dtype=torch.float32),
"u": torch.tensor(u, dtype=torch.float32),
"v": torch.tensor(v, dtype=torch.float32),
"w": torch.tensor(w, dtype=torch.float32),
}
)
evaluations_momentum_z = navier_stokes_eq.make_nodes()[3].evaluate(
{
"w__t": torch.tensor(w__t, dtype=torch.float32),
"w__x": torch.tensor(w__x, dtype=torch.float32),
"w__y": torch.tensor(w__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
"w__x__x": torch.tensor(w__x__x, dtype=torch.float32),
"w__y__y": torch.tensor(w__y__y, dtype=torch.float32),
"w__z__z": torch.tensor(w__z__z, dtype=torch.float32),
"p__z": torch.tensor(p__z, dtype=torch.float32),
"u": torch.tensor(u, dtype=torch.float32),
"v": torch.tensor(v, dtype=torch.float32),
"w": torch.tensor(w, dtype=torch.float32),
}
)
continuity_eq_eval_pred = evaluations_continuity["continuity"].numpy()
momentum_x_eq_eval_pred = evaluations_momentum_x["momentum_x"].numpy()
momentum_y_eq_eval_pred = evaluations_momentum_y["momentum_y"].numpy()
momentum_z_eq_eval_pred = evaluations_momentum_z["momentum_z"].numpy()
# verify PDE computation
assert np.allclose(
continuity_eq_eval_pred, continuity_equation_true
), "Test Failed!"
assert np.allclose(
momentum_x_eq_eval_pred, momentum_x_equation_true
), "Test Failed!"
assert np.allclose(
momentum_y_eq_eval_pred, momentum_y_equation_true
), "Test Failed!"
assert np.allclose(
momentum_z_eq_eval_pred, momentum_z_equation_true
), "Test Failed!"
if __name__ == "__main__":
test_navier_stokes_equation()
| modulus-sym-main | test/test_pdes/test_navier_stokes.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from modulus.sym.eq.pdes.wave_equation import WaveEquation, HelmholtzEquation
def test_wave_equation():
# test data for wave equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
u = np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
c = 0.1
u__t__t = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__x__x = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__y__y = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__z__z = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
wave_equation_true = u__t__t - c * c * u__x__x - c * c * u__y__y - c * c * u__z__z
# evaluate the equation
eq = WaveEquation(u="u", c=c, dim=3, time=True)
evaluations = eq.make_nodes()[0].evaluate(
{
"u__x__x": torch.tensor(u__x__x, dtype=torch.float32),
"u__y__y": torch.tensor(u__y__y, dtype=torch.float32),
"u__z__z": torch.tensor(u__z__z, dtype=torch.float32),
"u__t__t": torch.tensor(u__t__t, dtype=torch.float32),
}
)
eq_eval = evaluations["wave_equation"].numpy()
# verify PDE computation
assert np.allclose(eq_eval, wave_equation_true), "Test Failed!"
def test_helmholtz_equation():
# test data for helmholtz equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
u = np.sin(x) * np.sin(y) * np.sin(z)
k = 0.1
u__x__x = -np.sin(x) * np.sin(y) * np.sin(z)
u__y__y = -np.sin(x) * np.sin(y) * np.sin(z)
u__z__z = -np.sin(x) * np.sin(y) * np.sin(z)
helmholtz_equation_true = -(k**2 * u + u__x__x + u__y__y + u__z__z)
# evaluate the equation
eq = HelmholtzEquation(u="u", k=k, dim=3)
evaluations = eq.make_nodes()[0].evaluate(
{
"u": torch.tensor(u, dtype=torch.float32),
"u__x__x": torch.tensor(u__x__x, dtype=torch.float32),
"u__y__y": torch.tensor(u__y__y, dtype=torch.float32),
"u__z__z": torch.tensor(u__z__z, dtype=torch.float32),
}
)
eq_eval = evaluations["helmholtz"].numpy()
# verify PDE computation
assert np.allclose(eq_eval, helmholtz_equation_true), "Test Failed!"
if __name__ == "__main__":
test_wave_equation()
test_helmholtz_equation()
| modulus-sym-main | test/test_pdes/test_wave_equation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
def test_advection_diffusion():
# test data for advection diffusion equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
T = np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u = np.exp(2 * x + y + z)
v = np.exp(x + 2 * y + z)
w = np.exp(x + y + 2 * z)
rho = 1.0
D = 0.1
T__t = -np.sin(x) * np.sin(y) * np.sin(z) * np.sin(t)
T__x = np.cos(x) * np.sin(y) * np.sin(z) * np.cos(t)
T__y = np.sin(x) * np.cos(y) * np.sin(z) * np.cos(t)
T__z = np.sin(x) * np.sin(y) * np.cos(z) * np.cos(t)
T__x__x = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
T__y__y = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
T__z__z = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
advection = u * T__x + v * T__y + w * T__z
diffusion = D * T__x__x + D * T__y__y + D * T__z__z
curl = 0
advection_diffusion_equation_true = T__t + advection + T * curl - diffusion
# evaluate the equation
eq = AdvectionDiffusion(T="T", D=D, rho=float(rho), dim=3, time=True)
evaluations = eq.make_nodes()[0].evaluate(
{
"T__t": torch.tensor(T__t, dtype=torch.float32),
"T__x": torch.tensor(T__x, dtype=torch.float32),
"T__y": torch.tensor(T__y, dtype=torch.float32),
"T__z": torch.tensor(T__z, dtype=torch.float32),
"T__x__x": torch.tensor(T__x__x, dtype=torch.float32),
"T__y__y": torch.tensor(T__y__y, dtype=torch.float32),
"T__z__z": torch.tensor(T__z__z, dtype=torch.float32),
"u": torch.tensor(u, dtype=torch.float32),
"v": torch.tensor(v, dtype=torch.float32),
"w": torch.tensor(w, dtype=torch.float32),
}
)
eq_eval = evaluations["advection_diffusion_T"].numpy()
# verify PDE computation
assert np.allclose(eq_eval, advection_diffusion_equation_true), "Test Failed!"
if __name__ == "__main__":
test_advection_diffusion()
| modulus-sym-main | test/test_pdes/test_advection_diffusion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from modulus.sym.eq.pdes.signed_distance_function import ScreenedPoissonDistance
def test_screened_poisson_distance_equation():
# test data for screened poisson distance
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
distance = np.exp(x + y + z)
distance__x = np.exp(x + y + z)
distance__y = np.exp(x + y + z)
distance__z = np.exp(x + y + z)
distance__x__x = np.exp(x + y + z)
distance__y__y = np.exp(x + y + z)
distance__z__z = np.exp(x + y + z)
tau = 0.1
sdf_grad = 1 - distance__x**2 - distance__y**2 - distance__z**2
poisson = np.sqrt(tau) * (distance__x__x + distance__y__y + distance__z__z)
screened_poisson_distance_true = sdf_grad + poisson
# evaluate the equation
screened_poisson_distance_eq = ScreenedPoissonDistance(
distance="distance", tau=tau, dim=3
)
evaluations = screened_poisson_distance_eq.make_nodes()[0].evaluate(
{
"distance__x": torch.tensor(distance__x, dtype=torch.float32),
"distance__y": torch.tensor(distance__y, dtype=torch.float32),
"distance__z": torch.tensor(distance__z, dtype=torch.float32),
"distance__x__x": torch.tensor(distance__x__x, dtype=torch.float32),
"distance__y__y": torch.tensor(distance__y__y, dtype=torch.float32),
"distance__z__z": torch.tensor(distance__z__z, dtype=torch.float32),
}
)
screened_poisson_distance_eq_eval_pred = evaluations[
"screened_poisson_distance"
].numpy()
# verify PDE computation
assert np.allclose(
screened_poisson_distance_eq_eval_pred, screened_poisson_distance_true
), "Test Failed!"
if __name__ == "__main__":
test_screened_poisson_distance_equation()
| modulus-sym-main | test/test_pdes/test_screened_poisson_distance.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import os
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
def test_diffusion_equation():
# test data for diffusion equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
u = np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
D = 0.1
Q = 0.1
u__t = -np.sin(x) * np.sin(y) * np.sin(z) * np.sin(t)
u__x = np.cos(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__y = np.sin(x) * np.cos(y) * np.sin(z) * np.cos(t)
u__z = np.sin(x) * np.sin(y) * np.cos(z) * np.cos(t)
u__x__x = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__y__y = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u__z__z = -np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
diffusion_equation_true = u__t - D * u__x__x - D * u__y__y - D * u__z__z - Q
# evaluate the equation
eq = Diffusion(T="u", D=D, Q=Q, dim=3, time=True)
evaluations = eq.make_nodes()[0].evaluate(
{
"u__x__x": torch.tensor(u__x__x, dtype=torch.float32),
"u__y__y": torch.tensor(u__y__y, dtype=torch.float32),
"u__z__z": torch.tensor(u__z__z, dtype=torch.float32),
"u__t": torch.tensor(u__t, dtype=torch.float32),
}
)
eq_eval = evaluations["diffusion_u"].numpy()
# verify PDE computation
assert np.allclose(eq_eval, diffusion_equation_true), "Test Failed!"
def test_diffusion_interface():
# test data for diffusion interface
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
normal_z = np.random.rand(1024, 1)
u_1 = np.sin(x) * np.sin(y) * np.sin(z) * np.cos(t)
u_2 = np.cos(x) * np.cos(y) * np.cos(z) * np.sin(t)
D_1 = 0.1
D_2 = 100
u_1__x = np.cos(x) * np.sin(y) * np.sin(z) * np.cos(t)
u_1__y = np.sin(x) * np.cos(y) * np.sin(z) * np.cos(t)
u_1__z = np.sin(x) * np.sin(y) * np.cos(z) * np.cos(t)
u_2__x = -np.sin(x) * np.cos(y) * np.cos(z) * np.sin(t)
u_2__y = -np.cos(x) * np.sin(y) * np.cos(z) * np.sin(t)
u_2__z = -np.cos(x) * np.cos(y) * np.sin(z) * np.sin(t)
diffusion_interface_dirichlet_u_1_u_2_true = u_1 - u_2
diffusion_interface_neumann_u_1_u_2_true = D_1 * (
normal_x * u_1__x + normal_y * u_1__y + normal_z * u_1__z
) - D_2 * (normal_x * u_2__x + normal_y * u_2__y + normal_z * u_2__z)
# evaluate the equation
eq = DiffusionInterface(T_1="u_1", T_2="u_2", D_1=D_1, D_2=D_2, dim=3, time=True)
evaluations = eq.make_nodes()[0].evaluate(
{
"u_1": torch.tensor(u_1, dtype=torch.float32),
"u_2": torch.tensor(u_2, dtype=torch.float32),
}
)
eq_1_eval = evaluations["diffusion_interface_dirichlet_u_1_u_2"].numpy()
evaluations = eq.make_nodes()[1].evaluate(
{
"u_1": torch.tensor(u_1, dtype=torch.float32),
"u_2": torch.tensor(u_2, dtype=torch.float32),
"u_1__x": torch.tensor(u_1__x, dtype=torch.float32),
"u_1__y": torch.tensor(u_1__y, dtype=torch.float32),
"u_1__z": torch.tensor(u_1__z, dtype=torch.float32),
"u_2__x": torch.tensor(u_2__x, dtype=torch.float32),
"u_2__y": torch.tensor(u_2__y, dtype=torch.float32),
"u_2__z": torch.tensor(u_2__z, dtype=torch.float32),
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
}
)
eq_2_eval = evaluations["diffusion_interface_neumann_u_1_u_2"].numpy()
# verify PDE computation
assert np.allclose(
eq_1_eval, diffusion_interface_dirichlet_u_1_u_2_true
), "Test Failed!"
assert np.allclose(
eq_2_eval, diffusion_interface_neumann_u_1_u_2_true
), "Test Failed!"
if __name__ == "__main__":
test_diffusion_equation()
test_diffusion_interface()
| modulus-sym-main | test/test_pdes/test_diffusion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
import torch
import numpy as np
import os
def test_zero_equation():
# test data for zero equation
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
u = np.exp(2 * x + y + z + t)
v = np.exp(x + 2 * y + z + t)
w = np.exp(x + y + 2 * z + t)
u__x = 2 * np.exp(2 * x + y + z + t)
u__y = 1 * np.exp(2 * x + y + z + t)
u__z = 1 * np.exp(2 * x + y + z + t)
v__x = 1 * np.exp(x + 2 * y + z + t)
v__y = 2 * np.exp(x + 2 * y + z + t)
v__z = 1 * np.exp(x + 2 * y + z + t)
w__x = 1 * np.exp(x + y + 2 * z + t)
w__y = 1 * np.exp(x + y + 2 * z + t)
w__z = 2 * np.exp(x + y + 2 * z + t)
normal_distance = np.exp(x + y + z)
rho = 1.0
nu = 0.2
max_distance = 0.5
mixing_length = np.minimum(0.419 * normal_distance, 0.09 * max_distance)
G = (
2 * u__x**2
+ 2 * v__y**2
+ 2 * w__z**2
+ (u__y + v__x) ** 2
+ (u__z + w__x) ** 2
+ (v__z + w__y) ** 2
)
nu_true = nu + rho * mixing_length**2 * np.sqrt(G)
zero_eq = ZeroEquation(nu=nu, max_distance=max_distance, rho=rho, dim=3, time=True)
evaluations_zero_eq = zero_eq.make_nodes()[0].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"u__y": torch.tensor(u__y, dtype=torch.float32),
"u__z": torch.tensor(u__z, dtype=torch.float32),
"v__x": torch.tensor(v__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"v__z": torch.tensor(v__z, dtype=torch.float32),
"w__x": torch.tensor(w__x, dtype=torch.float32),
"w__y": torch.tensor(w__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
"sdf": torch.tensor(normal_distance, dtype=torch.float32),
}
)
zero_eq_eval_pred = evaluations_zero_eq["nu"].numpy()
# verify PDE computation
assert np.allclose(zero_eq_eval_pred, nu_true), "Test Failed!"
if __name__ == "__main__":
test_zero_equation()
| modulus-sym-main | test/test_pdes/test_zero_equation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.eq.pdes.linear_elasticity import (
LinearElasticity,
LinearElasticityPlaneStress,
)
import torch
import numpy as np
import os
def test_linear_elasticity_equations():
# test data for linear elasticity equations
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
normal_z = np.random.rand(1024, 1)
u = np.exp(2 * x + y + z + t)
v = np.exp(x + 2 * y + z + t)
w = np.exp(x + y + 2 * z + t)
u__t__t = 1 * np.exp(2 * x + y + z + t)
v__t__t = 1 * np.exp(x + 2 * y + z + t)
w__t__t = 1 * np.exp(x + y + 2 * z + t)
u__x = 2 * np.exp(2 * x + y + z + t)
u__y = 1 * np.exp(2 * x + y + z + t)
u__z = 1 * np.exp(2 * x + y + z + t)
u__x__x = 2 * 2 * np.exp(2 * x + y + z + t)
u__y__y = 1 * 1 * np.exp(2 * x + y + z + t)
u__z__z = 1 * 1 * np.exp(2 * x + y + z + t)
u__x__y = 1 * 2 * np.exp(2 * x + y + z + t)
u__x__z = 1 * 2 * np.exp(2 * x + y + z + t)
u__y__z = 1 * 1 * np.exp(2 * x + y + z + t)
u__y__x = u__x__y
u__z__x = u__x__z
u__z__y = u__y__z
v__x = 1 * np.exp(x + 2 * y + z + t)
v__y = 2 * np.exp(x + 2 * y + z + t)
v__z = 1 * np.exp(x + 2 * y + z + t)
v__x__x = 1 * 1 * np.exp(x + 2 * y + z + t)
v__y__y = 2 * 2 * np.exp(x + 2 * y + z + t)
v__z__z = 1 * 1 * np.exp(x + 2 * y + z + t)
v__x__y = 2 * 1 * np.exp(x + 2 * y + z + t)
v__x__z = 1 * 1 * np.exp(x + 2 * y + z + t)
v__y__z = 1 * 2 * np.exp(x + 2 * y + z + t)
v__y__x = v__x__y
v__z__x = v__x__z
v__z__y = v__y__z
w__x = 1 * np.exp(x + y + 2 * z + t)
w__y = 1 * np.exp(x + y + 2 * z + t)
w__z = 2 * np.exp(x + y + 2 * z + t)
w__x__x = 1 * 1 * np.exp(x + y + 2 * z + t)
w__y__y = 1 * 1 * np.exp(x + y + 2 * z + t)
w__z__z = 2 * 2 * np.exp(x + y + 2 * z + t)
w__x__y = 1 * 1 * np.exp(x + y + 2 * z + t)
w__x__z = 2 * 1 * np.exp(x + y + 2 * z + t)
w__y__z = 2 * 1 * np.exp(x + y + 2 * z + t)
w__y__x = w__x__y
w__z__x = w__x__z
w__z__y = w__y__z
sigma_xx = np.sin(x) * np.cos(y) * np.cos(z)
sigma_yy = np.cos(x) * np.sin(y) * np.cos(z)
sigma_zz = np.cos(x) * np.cos(y) * np.sin(z)
sigma_xy = np.sin(x) * np.sin(y) * np.cos(z)
sigma_xz = np.sin(x) * np.cos(y) * np.sin(z)
sigma_yz = np.cos(x) * np.sin(y) * np.sin(z)
sigma_xx__x = np.cos(x) * np.cos(y) * np.cos(z)
sigma_yy__y = np.cos(x) * np.cos(y) * np.cos(z)
sigma_zz__z = np.cos(x) * np.cos(y) * np.cos(z)
sigma_xy__x = np.cos(x) * np.sin(y) * np.cos(z)
sigma_xy__y = np.sin(x) * np.cos(y) * np.cos(z)
sigma_xz__x = np.cos(x) * np.cos(y) * np.sin(z)
sigma_xz__z = np.sin(x) * np.cos(y) * np.cos(z)
sigma_yz__y = np.cos(x) * np.cos(y) * np.sin(z)
sigma_yz__z = np.cos(x) * np.sin(y) * np.cos(z)
E = 1.0
nu = 0.1
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
rho = 10.0
stress_disp_xx_true = lambda_ * (u__x + v__y + w__z) + 2 * mu * u__x - sigma_xx
stress_disp_yy_true = lambda_ * (u__x + v__y + w__z) + 2 * mu * v__y - sigma_yy
stress_disp_zz_true = lambda_ * (u__x + v__y + w__z) + 2 * mu * w__z - sigma_zz
stress_disp_xy_true = mu * (u__y + v__x) - sigma_xy
stress_disp_xz_true = mu * (u__z + w__x) - sigma_xz
stress_disp_yz_true = mu * (v__z + w__y) - sigma_yz
equilibrium_x_true = rho * u__t__t - (sigma_xx__x + sigma_xy__y + sigma_xz__z)
equilibrium_y_true = rho * v__t__t - (sigma_xy__x + sigma_yy__y + sigma_yz__z)
equilibrium_z_true = rho * w__t__t - (sigma_xz__x + sigma_yz__y + sigma_zz__z)
traction_x_true = normal_x * sigma_xx + normal_y * sigma_xy + normal_z * sigma_xz
traction_y_true = normal_x * sigma_xy + normal_y * sigma_yy + normal_z * sigma_yz
traction_z_true = normal_x * sigma_xz + normal_y * sigma_yz + normal_z * sigma_zz
navier_x_true = (
rho * u__t__t
- (lambda_ + mu) * (u__x__x + v__y__x + w__z__x)
- mu * (u__x__x + u__y__y + u__z__z)
)
navier_y_true = (
rho * v__t__t
- (lambda_ + mu) * (u__x__y + v__y__y + w__z__y)
- mu * (v__x__x + v__y__y + v__z__z)
)
navier_z_true = (
rho * w__t__t
- (lambda_ + mu) * (u__x__z + v__y__z + w__z__z)
- mu * (w__x__x + w__y__y + w__z__z)
)
linear_elasticity_eq = LinearElasticity(nu=nu, E=E, rho=rho, dim=3, time=True)
evaluations_stress_disp_xx = linear_elasticity_eq.make_nodes()[0].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
"sigma_xx": torch.tensor(sigma_xx, dtype=torch.float32),
}
)
evaluations_stress_disp_yy = linear_elasticity_eq.make_nodes()[1].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
"sigma_yy": torch.tensor(sigma_yy, dtype=torch.float32),
}
)
evaluations_stress_disp_zz = linear_elasticity_eq.make_nodes()[2].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"w__z": torch.tensor(w__z, dtype=torch.float32),
"sigma_zz": torch.tensor(sigma_zz, dtype=torch.float32),
}
)
evaluations_stress_disp_xy = linear_elasticity_eq.make_nodes()[3].evaluate(
{
"u__y": torch.tensor(u__y, dtype=torch.float32),
"v__x": torch.tensor(v__x, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
}
)
evaluations_stress_disp_xz = linear_elasticity_eq.make_nodes()[4].evaluate(
{
"u__z": torch.tensor(u__z, dtype=torch.float32),
"w__x": torch.tensor(w__x, dtype=torch.float32),
"sigma_xz": torch.tensor(sigma_xz, dtype=torch.float32),
}
)
evaluations_stress_disp_yz = linear_elasticity_eq.make_nodes()[5].evaluate(
{
"v__z": torch.tensor(v__z, dtype=torch.float32),
"w__y": torch.tensor(w__y, dtype=torch.float32),
"sigma_yz": torch.tensor(sigma_yz, dtype=torch.float32),
}
)
evaluations_equilibrium_x = linear_elasticity_eq.make_nodes()[6].evaluate(
{
"u__t__t": torch.tensor(u__t__t, dtype=torch.float32),
"sigma_xx__x": torch.tensor(sigma_xx__x, dtype=torch.float32),
"sigma_xy__y": torch.tensor(sigma_xy__y, dtype=torch.float32),
"sigma_xz__z": torch.tensor(sigma_xz__z, dtype=torch.float32),
}
)
evaluations_equilibrium_y = linear_elasticity_eq.make_nodes()[7].evaluate(
{
"v__t__t": torch.tensor(v__t__t, dtype=torch.float32),
"sigma_xy__x": torch.tensor(sigma_xy__x, dtype=torch.float32),
"sigma_yy__y": torch.tensor(sigma_yy__y, dtype=torch.float32),
"sigma_yz__z": torch.tensor(sigma_yz__z, dtype=torch.float32),
}
)
evaluations_equilibrium_z = linear_elasticity_eq.make_nodes()[8].evaluate(
{
"w__t__t": torch.tensor(w__t__t, dtype=torch.float32),
"sigma_xz__x": torch.tensor(sigma_xz__x, dtype=torch.float32),
"sigma_yz__y": torch.tensor(sigma_yz__y, dtype=torch.float32),
"sigma_zz__z": torch.tensor(sigma_zz__z, dtype=torch.float32),
}
)
evaluations_traction_x = linear_elasticity_eq.make_nodes()[9].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"sigma_xx": torch.tensor(sigma_xx, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
"sigma_xz": torch.tensor(sigma_xz, dtype=torch.float32),
}
)
evaluations_traction_y = linear_elasticity_eq.make_nodes()[10].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"sigma_yy": torch.tensor(sigma_yy, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
"sigma_yz": torch.tensor(sigma_yz, dtype=torch.float32),
}
)
evaluations_traction_z = linear_elasticity_eq.make_nodes()[11].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"sigma_zz": torch.tensor(sigma_zz, dtype=torch.float32),
"sigma_xz": torch.tensor(sigma_xz, dtype=torch.float32),
"sigma_yz": torch.tensor(sigma_yz, dtype=torch.float32),
}
)
evaluations_navier_x = linear_elasticity_eq.make_nodes()[12].evaluate(
{
"u__t__t": torch.tensor(u__t__t, dtype=torch.float32),
"u__x__x": torch.tensor(u__x__x, dtype=torch.float32),
"v__x__y": torch.tensor(v__x__y, dtype=torch.float32),
"w__x__z": torch.tensor(w__x__z, dtype=torch.float32),
"u__y__y": torch.tensor(u__y__y, dtype=torch.float32),
"u__z__z": torch.tensor(u__z__z, dtype=torch.float32),
}
)
evaluations_navier_y = linear_elasticity_eq.make_nodes()[13].evaluate(
{
"v__t__t": torch.tensor(v__t__t, dtype=torch.float32),
"u__x__y": torch.tensor(u__x__y, dtype=torch.float32),
"v__y__y": torch.tensor(v__y__y, dtype=torch.float32),
"w__y__z": torch.tensor(w__y__z, dtype=torch.float32),
"v__x__x": torch.tensor(v__x__x, dtype=torch.float32),
"v__z__z": torch.tensor(v__z__z, dtype=torch.float32),
}
)
evaluations_navier_z = linear_elasticity_eq.make_nodes()[14].evaluate(
{
"w__t__t": torch.tensor(w__t__t, dtype=torch.float32),
"u__x__z": torch.tensor(u__x__z, dtype=torch.float32),
"v__y__z": torch.tensor(v__y__z, dtype=torch.float32),
"w__x__x": torch.tensor(w__x__x, dtype=torch.float32),
"w__y__y": torch.tensor(w__y__y, dtype=torch.float32),
"w__z__z": torch.tensor(w__z__z, dtype=torch.float32),
}
)
stress_disp_xx_eval_pred = evaluations_stress_disp_xx["stress_disp_xx"].numpy()
stress_disp_yy_eval_pred = evaluations_stress_disp_yy["stress_disp_yy"].numpy()
stress_disp_zz_eval_pred = evaluations_stress_disp_zz["stress_disp_zz"].numpy()
stress_disp_xy_eval_pred = evaluations_stress_disp_xy["stress_disp_xy"].numpy()
stress_disp_xz_eval_pred = evaluations_stress_disp_xz["stress_disp_xz"].numpy()
stress_disp_yz_eval_pred = evaluations_stress_disp_yz["stress_disp_yz"].numpy()
equilibrium_x_eval_pred = evaluations_equilibrium_x["equilibrium_x"].numpy()
equilibrium_y_eval_pred = evaluations_equilibrium_y["equilibrium_y"].numpy()
equilibrium_z_eval_pred = evaluations_equilibrium_z["equilibrium_z"].numpy()
traction_x_eval_pred = evaluations_traction_x["traction_x"].numpy()
traction_y_eval_pred = evaluations_traction_y["traction_y"].numpy()
traction_z_eval_pred = evaluations_traction_z["traction_z"].numpy()
navier_x_eval_pred = evaluations_navier_x["navier_x"].numpy()
navier_y_eval_pred = evaluations_navier_y["navier_y"].numpy()
navier_z_eval_pred = evaluations_navier_z["navier_z"].numpy()
# verify PDE computation
assert np.allclose(stress_disp_xx_eval_pred, stress_disp_xx_true), "Test Failed!"
assert np.allclose(stress_disp_yy_eval_pred, stress_disp_yy_true), "Test Failed!"
assert np.allclose(stress_disp_zz_eval_pred, stress_disp_zz_true), "Test Failed!"
assert np.allclose(stress_disp_xy_eval_pred, stress_disp_xy_true), "Test Failed!"
assert np.allclose(stress_disp_xz_eval_pred, stress_disp_xz_true), "Test Failed!"
assert np.allclose(stress_disp_yz_eval_pred, stress_disp_yz_true), "Test Failed!"
assert np.allclose(equilibrium_x_eval_pred, equilibrium_x_true), "Test Failed!"
assert np.allclose(equilibrium_y_eval_pred, equilibrium_y_true), "Test Failed!"
assert np.allclose(equilibrium_z_eval_pred, equilibrium_z_true), "Test Failed!"
assert np.allclose(traction_x_eval_pred, traction_x_true), "Test Failed!"
assert np.allclose(traction_y_eval_pred, traction_y_true), "Test Failed!"
assert np.allclose(traction_z_eval_pred, traction_z_true), "Test Failed!"
assert np.allclose(navier_x_eval_pred, navier_x_true, rtol=1e-3), "Test Failed!"
assert np.allclose(navier_y_eval_pred, navier_y_true, rtol=1e-3), "Test Failed!"
assert np.allclose(navier_z_eval_pred, navier_z_true, rtol=1e-3), "Test Failed!"
def test_linear_elasticity_plane_stress_equations():
# test data for linear elasticity plane stress
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
t = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
u = np.exp(2 * x + y + t)
v = np.exp(x + 2 * y + t)
sigma_xx = np.sin(x) * np.cos(y)
sigma_yy = np.cos(x) * np.sin(y)
sigma_xy = np.sin(x) * np.sin(y)
u__t__t = 1 * np.exp(2 * x + y + t)
v__t__t = 1 * np.exp(x + 2 * y + t)
u__x = 2 * np.exp(2 * x + y + t)
u__y = 1 * np.exp(2 * x + y + t)
u__x__x = 2 * 2 * np.exp(2 * x + y + t)
u__y__y = 1 * 1 * np.exp(2 * x + y + t)
u__x__y = 1 * 2 * np.exp(2 * x + y + t)
u__y__x = u__x__y
v__x = 1 * np.exp(x + 2 * y + t)
v__y = 2 * np.exp(x + 2 * y + t)
v__x__x = 1 * 1 * np.exp(x + 2 * y + t)
v__y__y = 2 * 2 * np.exp(x + 2 * y + t)
v__x__y = 2 * 1 * np.exp(x + 2 * y + t)
v__y__x = v__x__y
sigma_xx__x = np.cos(x) * np.cos(y)
sigma_yy__y = np.cos(x) * np.cos(y)
sigma_xy__x = np.cos(x) * np.sin(y)
sigma_xy__y = np.sin(x) * np.cos(y)
E = 1.0
nu = 0.1
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
rho = 10.0
w_z = -lambda_ / (lambda_ + 2 * mu) * (u__x + v__y)
stress_disp_xx_true = lambda_ * (u__x + v__y + w_z) + 2 * mu * u__x - sigma_xx
stress_disp_yy_true = lambda_ * (u__x + v__y + w_z) + 2 * mu * v__y - sigma_yy
stress_disp_xy_true = mu * (u__y + v__x) - sigma_xy
equilibrium_x_true = rho * u__t__t - (sigma_xx__x + sigma_xy__y)
equilibrium_y_true = rho * v__t__t - (sigma_xy__x + sigma_yy__y)
traction_x_true = normal_x * sigma_xx + normal_y * sigma_xy
traction_y_true = normal_x * sigma_xy + normal_y * sigma_yy
linear_elasticity_eq = LinearElasticityPlaneStress(nu=nu, E=E, rho=rho, time=True)
evaluations_stress_disp_xx = linear_elasticity_eq.make_nodes()[0].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"sigma_xx": torch.tensor(sigma_xx, dtype=torch.float32),
}
)
evaluations_stress_disp_yy = linear_elasticity_eq.make_nodes()[1].evaluate(
{
"u__x": torch.tensor(u__x, dtype=torch.float32),
"v__y": torch.tensor(v__y, dtype=torch.float32),
"sigma_yy": torch.tensor(sigma_yy, dtype=torch.float32),
}
)
evaluations_stress_disp_xy = linear_elasticity_eq.make_nodes()[2].evaluate(
{
"u__y": torch.tensor(u__y, dtype=torch.float32),
"v__x": torch.tensor(v__x, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
}
)
evaluations_equilibrium_x = linear_elasticity_eq.make_nodes()[3].evaluate(
{
"u__t__t": torch.tensor(u__t__t, dtype=torch.float32),
"sigma_xx__x": torch.tensor(sigma_xx__x, dtype=torch.float32),
"sigma_xy__y": torch.tensor(sigma_xy__y, dtype=torch.float32),
}
)
evaluations_equilibrium_y = linear_elasticity_eq.make_nodes()[4].evaluate(
{
"v__t__t": torch.tensor(v__t__t, dtype=torch.float32),
"sigma_xy__x": torch.tensor(sigma_xy__x, dtype=torch.float32),
"sigma_yy__y": torch.tensor(sigma_yy__y, dtype=torch.float32),
}
)
evaluations_traction_x = linear_elasticity_eq.make_nodes()[5].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"sigma_xx": torch.tensor(sigma_xx, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
}
)
evaluations_traction_y = linear_elasticity_eq.make_nodes()[6].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"sigma_yy": torch.tensor(sigma_yy, dtype=torch.float32),
"sigma_xy": torch.tensor(sigma_xy, dtype=torch.float32),
}
)
stress_disp_xx_eval_pred = evaluations_stress_disp_xx["stress_disp_xx"].numpy()
stress_disp_yy_eval_pred = evaluations_stress_disp_yy["stress_disp_yy"].numpy()
stress_disp_xy_eval_pred = evaluations_stress_disp_xy["stress_disp_xy"].numpy()
equilibrium_x_eval_pred = evaluations_equilibrium_x["equilibrium_x"].numpy()
equilibrium_y_eval_pred = evaluations_equilibrium_y["equilibrium_y"].numpy()
traction_x_eval_pred = evaluations_traction_x["traction_x"].numpy()
traction_y_eval_pred = evaluations_traction_y["traction_y"].numpy()
# verify PDE computation
assert np.allclose(stress_disp_xx_eval_pred, stress_disp_xx_true), "Test Failed!"
assert np.allclose(stress_disp_yy_eval_pred, stress_disp_yy_true), "Test Failed!"
assert np.allclose(stress_disp_xy_eval_pred, stress_disp_xy_true), "Test Failed!"
assert np.allclose(equilibrium_x_eval_pred, equilibrium_x_true), "Test Failed!"
assert np.allclose(equilibrium_y_eval_pred, equilibrium_y_true), "Test Failed!"
assert np.allclose(traction_x_eval_pred, traction_x_true), "Test Failed!"
assert np.allclose(traction_y_eval_pred, traction_y_true), "Test Failed!"
if __name__ == "__main__":
test_linear_elasticity_equations()
test_linear_elasticity_plane_stress_equations()
| modulus-sym-main | test/test_pdes/test_linear_elasticity.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.eq.pdes.electromagnetic import MaxwellFreqReal, SommerfeldBC, PEC
import torch
import numpy as np
import os
def test_maxwell_freq_real():
# test data for frequency domain Maxwell's equations
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
ux = np.exp(1 * x + 1 * y + 1 * z)
uy = np.exp(2 * x + 2 * y + 2 * z)
uz = np.exp(3 * x + 3 * y + 3 * z)
ux__x = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__x = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__x = 3 * np.exp(3 * x + 3 * y + 3 * z)
ux__y = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__y = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__y = 3 * np.exp(3 * x + 3 * y + 3 * z)
ux__z = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__z = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__z = 3 * np.exp(3 * x + 3 * y + 3 * z)
ux__x__x = 1 * np.exp(1 * x + 1 * y + 1 * z)
ux__x__y = 1 * np.exp(1 * x + 1 * y + 1 * z)
ux__x__z = 1 * np.exp(1 * x + 1 * y + 1 * z)
ux__y__x = ux__x__y
ux__y__y = 1 * np.exp(1 * x + 1 * y + 1 * z)
ux__y__z = 1 * np.exp(1 * x + 1 * y + 1 * z)
ux__z__x = ux__x__z
ux__z__y = ux__y__z
ux__z__z = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__x__x = 4 * np.exp(2 * x + 2 * y + 2 * z)
uy__x__y = 4 * np.exp(2 * x + 2 * y + 2 * z)
uy__x__z = 4 * np.exp(2 * x + 2 * y + 2 * z)
uy__y__x = uy__x__y
uy__y__y = 4 * np.exp(2 * x + 2 * y + 2 * z)
uy__y__z = 4 * np.exp(2 * x + 2 * y + 2 * z)
uy__z__x = uy__x__z
uy__z__y = uy__y__z
uy__z__z = 4 * np.exp(2 * x + 2 * y + 2 * z)
uz__x__x = 9 * np.exp(3 * x + 3 * y + 3 * z)
uz__x__y = 9 * np.exp(3 * x + 3 * y + 3 * z)
uz__x__z = 9 * np.exp(3 * x + 3 * y + 3 * z)
uz__y__x = uz__x__y
uz__y__y = 9 * np.exp(3 * x + 3 * y + 3 * z)
uz__y__z = 9 * np.exp(3 * x + 3 * y + 3 * z)
uz__z__x = uz__x__z
uz__z__y = uz__y__z
uz__z__z = 9 * np.exp(3 * x + 3 * y + 3 * z)
curlux = uz__y - uy__z # 3*np.exp(3*x + 3*y + 3*z) - 2*np.exp(2*x + 2*y + 2*z)
curluy = ux__z - uz__x # 1*np.exp(1*x + 1*y + 1*z) - 3*np.exp(3*x + 3*y + 3*z)
curluz = uy__x - ux__y # 2*np.exp(2*x + 2*y + 2*z) - 1*np.exp(1*x + 1*y + 1*z)
curlcurlux = (
4 * np.exp(2 * x + 2 * y + 2 * z)
- 1 * np.exp(1 * x + 1 * y + 1 * z)
- 1 * np.exp(1 * x + 1 * y + 1 * z)
+ 9 * np.exp(3 * x + 3 * y + 3 * z)
) # uy__x__y - ux__y__y - ux__z__z + uz__x__z #curluz__y - curluy__z
curlcurluy = (
9 * np.exp(3 * x + 3 * y + 3 * z)
- 4 * np.exp(2 * x + 2 * y + 2 * z)
- 4 * np.exp(2 * x + 2 * y + 2 * z)
+ 1 * np.exp(1 * x + 1 * y + 1 * z)
) # uz__y__z - uy__z__z - uy__x__x + ux__y__x #curlux__z - curluz__x
curlcurluz = (
1 * np.exp(1 * x + 1 * y + 1 * z)
- 9 * np.exp(3 * x + 3 * y + 3 * z)
- 9 * np.exp(3 * x + 3 * y + 3 * z)
+ 4 * np.exp(2 * x + 2 * y + 2 * z)
) # ux__z__x - uz__x__x - uz__y__y + uy__z__y #curluy__x - curlux__y
k = 0.1
Maxwell_Freq_real_x_true = curlcurlux - k**2 * ux
Maxwell_Freq_real_y_true = curlcurluy - k**2 * uy
Maxwell_Freq_real_z_true = curlcurluz - k**2 * uz
maxwell_eq = MaxwellFreqReal(k=k)
evaluations_MaxwellFreqReal_x = maxwell_eq.make_nodes()[0].evaluate(
{
"ux": torch.tensor(ux, dtype=torch.float32),
"uy__x__y": torch.tensor(uy__x__y, dtype=torch.float32),
"ux__y__y": torch.tensor(ux__y__y, dtype=torch.float32),
"ux__z__z": torch.tensor(ux__z__z, dtype=torch.float32),
"uz__x__z": torch.tensor(uz__x__z, dtype=torch.float32),
}
)
evaluations_MaxwellFreqReal_y = maxwell_eq.make_nodes()[1].evaluate(
{
"uy": torch.tensor(uy, dtype=torch.float32),
"uz__y__z": torch.tensor(uz__y__z, dtype=torch.float32),
"uy__z__z": torch.tensor(uy__z__z, dtype=torch.float32),
"uy__x__x": torch.tensor(uy__x__x, dtype=torch.float32),
"ux__x__y": torch.tensor(ux__x__y, dtype=torch.float32),
}
)
evaluations_MaxwellFreqReal_z = maxwell_eq.make_nodes()[2].evaluate(
{
"uz": torch.tensor(uz, dtype=torch.float32),
"ux__x__z": torch.tensor(ux__x__z, dtype=torch.float32),
"uz__x__x": torch.tensor(uz__x__x, dtype=torch.float32),
"uz__y__y": torch.tensor(uz__y__y, dtype=torch.float32),
"uy__y__z": torch.tensor(uy__y__z, dtype=torch.float32),
}
)
Maxwell_Freq_real_x_eval_pred = evaluations_MaxwellFreqReal_x[
"Maxwell_Freq_real_x"
].numpy()
Maxwell_Freq_real_y_eval_pred = evaluations_MaxwellFreqReal_y[
"Maxwell_Freq_real_y"
].numpy()
Maxwell_Freq_real_z_eval_pred = evaluations_MaxwellFreqReal_z[
"Maxwell_Freq_real_z"
].numpy()
# verify PDE computation
assert np.allclose(
Maxwell_Freq_real_x_eval_pred, Maxwell_Freq_real_x_true
), "Test Failed!"
assert np.allclose(
Maxwell_Freq_real_y_eval_pred, Maxwell_Freq_real_y_true
), "Test Failed!"
assert np.allclose(
Maxwell_Freq_real_z_eval_pred, Maxwell_Freq_real_z_true
), "Test Failed!"
def test_sommerfeld_bc():
# test data for SommerfeldBC
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
normal_z = np.random.rand(1024, 1)
ux = np.exp(1 * x + 1 * y + 1 * z)
uy = np.exp(2 * x + 2 * y + 2 * z)
uz = np.exp(3 * x + 3 * y + 3 * z)
ux__x = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__x = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__x = 3 * np.exp(3 * x + 3 * y + 3 * z)
ux__y = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__y = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__y = 3 * np.exp(3 * x + 3 * y + 3 * z)
ux__z = 1 * np.exp(1 * x + 1 * y + 1 * z)
uy__z = 2 * np.exp(2 * x + 2 * y + 2 * z)
uz__z = 3 * np.exp(3 * x + 3 * y + 3 * z)
curlux = uz__y - uy__z # 3*np.exp(3*x + 3*y + 3*z) - 2*np.exp(2*x + 2*y + 2*z)
curluy = ux__z - uz__x # 1*np.exp(1*x + 1*y + 1*z) - 3*np.exp(3*x + 3*y + 3*z)
curluz = uy__x - ux__y # 2*np.exp(2*x + 2*y + 2*z) - 1*np.exp(1*x + 1*y + 1*z)
SommerfeldBC_real_x_true = normal_y * curluz - normal_z * curluy
SommerfeldBC_real_y_true = normal_z * curlux - normal_x * curluz
SommerfeldBC_real_z_true = normal_x * curluy - normal_y * curlux
sommerfeld_bc = SommerfeldBC()
evaluations_SommerfeldBC_real_x = sommerfeld_bc.make_nodes()[0].evaluate(
{
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"ux__y": torch.tensor(ux__y, dtype=torch.float32),
"uy__x": torch.tensor(uy__x, dtype=torch.float32),
"ux__z": torch.tensor(ux__z, dtype=torch.float32),
"uz__x": torch.tensor(uz__x, dtype=torch.float32),
}
)
evaluations_SommerfeldBC_real_y = sommerfeld_bc.make_nodes()[1].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"ux__y": torch.tensor(ux__y, dtype=torch.float32),
"uy__x": torch.tensor(uy__x, dtype=torch.float32),
"uy__z": torch.tensor(uy__z, dtype=torch.float32),
"uz__y": torch.tensor(uz__y, dtype=torch.float32),
}
)
evaluations_SommerfeldBC_real_z = sommerfeld_bc.make_nodes()[2].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"ux__z": torch.tensor(ux__z, dtype=torch.float32),
"uz__x": torch.tensor(uz__x, dtype=torch.float32),
"uy__z": torch.tensor(uy__z, dtype=torch.float32),
"uz__y": torch.tensor(uz__y, dtype=torch.float32),
}
)
SommerfeldBC_real_x_eval_pred = evaluations_SommerfeldBC_real_x[
"SommerfeldBC_real_x"
].numpy()
SommerfeldBC_real_y_eval_pred = evaluations_SommerfeldBC_real_y[
"SommerfeldBC_real_y"
].numpy()
SommerfeldBC_real_z_eval_pred = evaluations_SommerfeldBC_real_z[
"SommerfeldBC_real_z"
].numpy()
# verify PDE computation
assert np.allclose(
SommerfeldBC_real_x_eval_pred, SommerfeldBC_real_x_true, atol=1e-4
), "Test Failed!"
assert np.allclose(
SommerfeldBC_real_y_eval_pred, SommerfeldBC_real_y_true, atol=1e-4
), "Test Failed!"
assert np.allclose(
SommerfeldBC_real_z_eval_pred, SommerfeldBC_real_z_true, atol=1e-4
), "Test Failed!"
def test_pec():
# test data for PEC
x = np.random.rand(1024, 1)
y = np.random.rand(1024, 1)
z = np.random.rand(1024, 1)
normal_x = np.random.rand(1024, 1)
normal_y = np.random.rand(1024, 1)
normal_z = np.random.rand(1024, 1)
ux = np.exp(1 * x + 1 * y + 1 * z)
uy = np.exp(2 * x + 2 * y + 2 * z)
uz = np.exp(3 * x + 3 * y + 3 * z)
PEC_x_true = normal_y * uz - normal_z * uy
PEC_y_true = normal_z * ux - normal_x * uz
PEC_z_true = normal_x * uy - normal_y * ux
pec = PEC()
evaluations_PEC_x = pec.make_nodes()[0].evaluate(
{
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"uz": torch.tensor(uz, dtype=torch.float32),
"uy": torch.tensor(uy, dtype=torch.float32),
}
)
evaluations_PEC_y = pec.make_nodes()[1].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_z": torch.tensor(normal_z, dtype=torch.float32),
"ux": torch.tensor(ux, dtype=torch.float32),
"uz": torch.tensor(uz, dtype=torch.float32),
}
)
evaluations_PEC_z = pec.make_nodes()[2].evaluate(
{
"normal_x": torch.tensor(normal_x, dtype=torch.float32),
"normal_y": torch.tensor(normal_y, dtype=torch.float32),
"ux": torch.tensor(ux, dtype=torch.float32),
"uy": torch.tensor(uy, dtype=torch.float32),
}
)
PEC_x_eval_pred = evaluations_PEC_x["PEC_x"].numpy()
PEC_y_eval_pred = evaluations_PEC_y["PEC_y"].numpy()
PEC_z_eval_pred = evaluations_PEC_z["PEC_z"].numpy()
# verify PDE computation
assert np.allclose(PEC_x_eval_pred, PEC_x_true, atol=1e-4), "Test Failed!"
assert np.allclose(PEC_y_eval_pred, PEC_y_true, atol=1e-4), "Test Failed!"
assert np.allclose(PEC_z_eval_pred, PEC_z_true, atol=1e-4), "Test Failed!"
if __name__ == "__main__":
test_maxwell_freq_real()
test_sommerfeld_bc()
test_pec()
| modulus-sym-main | test/test_pdes/test_electromagnetic.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.deeponet import DeepONetArch
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.pix2pix import Pix2PixArch
import torch
import numpy as np
from modulus.sym.key import Key
import pytest
from modulus.sym.graph import Graph
from modulus.sym.models.arch import FuncArch
from .model_test_utils import validate_func_arch_net
# ensure torch.rand() is deterministic
_ = torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# disable tf32 for accuracy
torch.backends.cuda.matmul.allow_tf32 = False
@pytest.mark.parametrize(
"branch_input_keys", [[Key("a", 100)], [Key("a", 100, scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
@pytest.mark.parametrize("dim", [1, 2])
def test_func_arch_deeponet(branch_input_keys, validate_with_dict_forward, dim):
deriv_keys = [Key.from_str("u__x"), Key.from_str("u__x__x")]
branch_net = FullyConnectedArch(
input_keys=branch_input_keys,
output_keys=[Key("branch", 128)],
nr_layers=4,
layer_size=128,
)
trunk_net = FourierNetArch(
input_keys=[Key("x", 1)],
output_keys=[Key("trunk", 128)],
nr_layers=4,
layer_size=128,
frequencies=("axis", [i for i in range(5)]),
)
ref_net = DeepONetArch(
branch_net=branch_net,
trunk_net=trunk_net,
output_keys=[Key("u")],
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_deeponet_with_pix2pix(validate_with_dict_forward):
"""
deeponet does not support FuncArch if branch_net is Pix2PixArch.
"""
deriv_keys = [Key.from_str("sol__x"), Key.from_str("sol__x__x")]
branch_input_keys = [Key("coeff")]
output_keys = [Key("sol")]
branch_net = Pix2PixArch(
input_keys=branch_input_keys,
output_keys=[Key("branch")], # hard set in deeponet
dimension=2,
conv_layer_size=32,
)
trunk_net = FourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("trunk", 256)], # hard set in deeponet
nr_layers=5,
layer_size=128,
frequencies=("axis", [i for i in range(5)]),
)
ref_net = DeepONetArch(
branch_net=branch_net,
trunk_net=trunk_net,
output_keys=output_keys,
branch_dim=1024,
)
if validate_with_dict_forward:
ref_net.forward = ref_net._dict_forward
ref_graph = Graph(
[
ref_net.make_node("ref_net", jit=False),
],
ref_net.input_keys,
deriv_keys + [Key("sol")],
func_arch=False,
).to(device)
# deeponet with pix2pix should not support func_arch
assert not ref_net.supports_func_arch
# there is nothing happened even if we enable func_arch
ft_graph = Graph(
[
ref_net.make_node("ref_net", jit=False),
],
ref_net.input_keys,
deriv_keys + [Key("sol")],
func_arch=True,
).to(device)
# there should be no FuncArch instance
for node in ft_graph.node_evaluation_order:
evaluate = node.evaluate
assert not isinstance(evaluate, FuncArch)
# check result
x = torch.rand([100, 1], device=device).requires_grad_()
y = torch.rand([100, 1], device=device).requires_grad_()
coeff = torch.rand(
[100, branch_input_keys[0].size, 32, 32], device=device
).requires_grad_()
in_vars = {"x": x, "y": y, "coeff": coeff}
ft_out = ft_graph(in_vars)
ref_out = ref_graph(in_vars)
for k in ref_out.keys():
assert torch.allclose(ref_out[k], ft_out[k], atol=6e-5)
| modulus-sym-main | test/test_models/test_deeponet.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.fully_connected import FullyConnectedArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["_impl.layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["_impl.final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
@pytest.mark.parametrize("jit", [True, False])
def test_fully_connected(jit):
filename = dir_path / "data/test_fully_connected.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
# create graph
arch = FullyConnectedArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
if jit:
arch = torch.jit.script(arch)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
def validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
):
ref_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
periodicity=periodicity,
nr_layers=3,
)
ft_net = validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
return ft_net
@pytest.mark.parametrize(
"input_keys",
[
[Key("x"), Key("y")],
[Key("x"), Key("y", scale=(1.0, 2.0))], # input scale
[Key("x"), Key("z", size=100), Key("y")], # input size larger than 1
],
)
@pytest.mark.parametrize(
"output_keys",
[
[Key("u"), Key("v"), Key("p")],
# output scale and output size larger than 1
[Key("u"), Key("v"), Key("p", scale=(1.0, 2.0)), Key("w", size=100)],
],
)
@pytest.mark.parametrize(
"periodicity",
[
{},
{"x": (0.0, 2 * torch.pi)},
{"x": (0.0, 2 * torch.pi), "y": (torch.pi, 4 * torch.pi)},
],
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_fully_connected(
input_keys, output_keys, periodicity, validate_with_dict_forward
):
# need full jacobian
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("v__y"),
Key.from_str("p__x"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([0, 1, 2]))
# need partial jacobian
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("p__x"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([0, 2]))
# need partial jacobian
deriv_keys = [
Key.from_str("v__y"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([1]))
# need full hessian
deriv_keys = [
Key.from_str("u__x__x"),
Key.from_str("v__y__y"),
Key.from_str("p__x__x"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([0, 1, 2]))
# need full hessian
deriv_keys = [
Key.from_str("u__x__x"),
Key.from_str("v__y__y"),
Key.from_str("p__x"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([0, 1, 2]))
# need partial hessian
deriv_keys = [
Key.from_str("u__x__x"),
Key.from_str("p__x__x"),
]
ft_net = validate_func_arch_fully_connected(
input_keys, output_keys, periodicity, deriv_keys, validate_with_dict_forward
)
assert torch.allclose(ft_net.needed_output_dims, torch.tensor([0, 2]))
if __name__ == "__main__":
test_fully_connected(True)
test_fully_connected(False)
| modulus-sym-main | test/test_models/test_fully_connected.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import torch
import pytest
from packaging import version
from modulus.sym.manager import JitManager
from modulus.sym.utils.benchmark import profile, timeit
from modulus.sym.models.layers.activation import Activation, get_activation_fn
# Allow fusing single node, and prevent tiny autodiff graph are inlined/reverted.
# These flags are automatically set when specifying jit_manager.enabled is True.
# User needs to set these flags manually if they would like to fuse activation
# function for standalone code.
#
# torch._C._jit_set_nvfuser_single_node_mode(True)
# torch._C._debug_set_autodiff_subgraph_inlining(False)
skip_if_no_gpu = pytest.mark.skipif(
not torch.cuda.is_available(), reason="There is no GPU to run this test"
)
def test_activation_jit():
jit_manager = JitManager()
jit_manager.enabled = True
jit_manager.arch_mode = "only_activation"
for act in Activation:
act_scripted = get_activation_fn(act)
assert isinstance(
act_scripted, (torch.jit.ScriptFunction, torch.jit.ScriptModule)
)
def sin(x):
return torch.sin(x)
sin_scripted = get_activation_fn(sin)
assert isinstance(sin_scripted, torch.jit.ScriptFunction)
@skip_if_no_gpu
def test_activation_fused_silu():
"""
Make sure SiLU derivative kernels are fused when jit_manager.arch_mode == "only_activation".
We need to rely on the fused SiLU derivative kernels for AMP, because the unfused path
may have intermediate results that overflow the FP16 dynamic range.
"""
jit_manager = JitManager()
jit_manager.enabled = True
jit_manager.arch_mode = "only_activation"
jit_manager.use_nvfuser = True
silu_scripted = get_activation_fn(Activation.SILU)
assert isinstance(silu_scripted, torch.jit.ScriptFunction)
device = "cuda"
batch_size = 10000
x = torch.rand([batch_size, 512], device=device, requires_grad=True)
I_N = torch.ones_like(x)
def run(func, order, *args):
torch.cuda.nvtx.range_push("forward")
y = func(*args)
torch.cuda.nvtx.range_pop()
if order >= 1:
torch.cuda.nvtx.range_push("1st order")
(y__x,) = torch.autograd.grad(y, [x], I_N, create_graph=True)
torch.cuda.nvtx.range_pop()
if order >= 2:
torch.cuda.nvtx.range_push("2nd order")
(y__x__x,) = torch.autograd.grad(y__x, [x], I_N, create_graph=True)
torch.cuda.nvtx.range_pop()
if order >= 3:
torch.cuda.nvtx.range_push("3rd order")
(y__x__x__x,) = torch.autograd.grad(y__x__x, [x], I_N, create_graph=True)
torch.cuda.nvtx.range_pop()
def cleanup_events(event_keys):
keys = ["cuLaunchKernel", "cudaLaunchKernel", "cudaDeviceSynchronize"]
for evt in keys:
if evt in event_keys:
event_keys.remove(evt)
return event_keys
# benchmark
silu = torch.nn.functional.silu
timeit(run, silu, 1, x, label="silu_1st", verbose=True)
timeit(run, silu_scripted, 1, x, label="silu_scripted_1st", verbose=True)
timeit(run, silu, 2, x, label="silu_2nd", verbose=True)
timeit(run, silu_scripted, 2, x, label="silu_scripted_2nd", verbose=True)
timeit(run, silu, 3, x, label="silu_3rd", verbose=True)
timeit(run, silu_scripted, 3, x, label="silu_scripted_3rd", verbose=True)
# profile and get the number of kernels
verbose = False # set to True to debug
_, events = profile(
run, silu_scripted, 1, x, label="silu_scripted_1st", verbose=verbose
)
event_keys = cleanup_events([evt.key for evt in events])
num_kernels = len(event_keys)
print("silu_scripted_1st num_events: ", num_kernels)
if version.parse(torch.__version__) >= version.parse("1.12.9"):
# this depends on the SiLU autodiff PR: https://github.com/pytorch/pytorch/pull/81724
# fwd + 1st_deriv kernels
assert num_kernels == 2
else:
warnings.warn(f"Fused SiLU is not supported for torch {torch.__version__}")
_, events = profile(
run, silu_scripted, 2, x, label="silu_scripted_2nd", verbose=verbose
)
event_keys = cleanup_events([evt.key for evt in events])
num_kernels = len(event_keys)
print("silu_scripted_2nd num_events: ", num_kernels)
if version.parse(torch.__version__) >= version.parse("1.12.9"):
# fwd + 1st_deriv + 2nd_deriv kernels
assert num_kernels == 3
else:
warnings.warn(f"Fused SiLU is not supported for torch {torch.__version__}")
_, events = profile(
run, silu_scripted, 3, x, label="silu_scripted_3rd", verbose=verbose
)
event_keys = cleanup_events([evt.key for evt in events])
num_kernels = len(event_keys)
print("silu_scripted_3rd num_events: ", num_kernels)
if version.parse(torch.__version__) >= version.parse("1.12.9"):
# fwd + 1st_deriv + 2nd_deriv + 3rd_deriv kernels
assert num_kernels <= 6
else:
warnings.warn(f"Fused SiLU is not supported for torch {torch.__version__}")
if __name__ == "__main__":
test_activation_jit()
test_activation_fused_silu()
| modulus-sym-main | test/test_models/test_activation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.sym.graph import Graph
from modulus.sym.key import Key
from modulus.sym.models.arch import FuncArch, Arch
from typing import List
# ensure torch.rand() is deterministic
_ = torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# disable tf32 for accuracy
torch.backends.cuda.matmul.allow_tf32 = False
def validate_func_arch_net(
ref_net: Arch,
deriv_keys: List[Key],
validate_with_dict_forward: bool,
):
"""
Using double precision for testing.
"""
if validate_with_dict_forward:
ref_net.forward = ref_net._dict_forward
ref_graph = (
Graph(
[
ref_net.make_node("ref_net", jit=False),
],
ref_net.input_keys,
deriv_keys + ref_net.output_keys,
func_arch=False,
)
.double()
.to(device)
)
ft_net = FuncArch(arch=ref_net, deriv_keys=deriv_keys).double().to(device)
# check result
batch_size = 20
in_vars = {
v.name: torch.rand(
[batch_size, v.size], device=device, dtype=torch.double
).requires_grad_()
for v in ref_net.input_keys
}
ft_out = ft_net(in_vars)
ref_out = ref_graph(in_vars)
for k in ref_out.keys():
assert torch.allclose(ref_out[k], ft_out[k])
return ft_net
| modulus-sym-main | test/test_models/model_test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from modulus.sym.constants import diff
from modulus.sym.key import Key
from modulus.sym.models.arch import Arch
# ensure torch.rand() is deterministic
torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_slice_input():
# prepare inputs
x = torch.rand([100, 1])
y = torch.rand([100, 2])
z = torch.rand([100, 1])
input_variables = {"x": x, "y": y, "z": z}
input_keys = [Key("x", 1), Key("y", 2), Key("z", 1)]
input_key_dict = {str(var): var.size for var in input_keys}
ipt = Arch.prepare_input(input_variables, input_key_dict.keys(), {}, dim=-1)
slice_keys = ["x", "z"]
# expected result
expected = Arch.prepare_input(input_variables, slice_keys, {}, dim=-1)
# sliced result
slice_index = Arch.prepare_slice_index(input_key_dict, slice_keys)
result = Arch.slice_input(ipt, slice_index, dim=-1)
assert torch.allclose(result, expected)
slice_keys = ["y", "z"]
# expected result
expected = Arch.prepare_input(input_variables, slice_keys, {}, dim=-1)
# sliced result
slice_index = Arch.prepare_slice_index(input_key_dict, slice_keys)
result = Arch.slice_input(ipt, slice_index, dim=-1)
assert torch.allclose(result, expected)
def validate_process_input_output(input_variables, arch):
# -------------------------- input --------------------------
# expected
expected = Arch.prepare_input(
input_variables,
arch.input_key_dict.keys(),
{},
dim=-1,
input_scales=arch.input_scales,
periodicity=arch.periodicity,
)
# result
result = Arch.concat_input(input_variables, arch.input_key_dict.keys(), {}, dim=-1)
result = Arch.process_input(
result, arch.input_scales_tensor, arch.periodicity, arch.input_key_dict, dim=-1
)
# check result
assert torch.allclose(expected, result)
# -------------------------- output --------------------------
batch_size, output_size = expected.shape[0], sum(arch.output_key_dict.values())
y = torch.rand([batch_size, output_size])
# expected
expected = Arch.prepare_output(
y,
arch.output_key_dict,
dim=-1,
output_scales=arch.output_scales,
)
# result
result = Arch.process_output(y, output_scales_tensor=arch.output_scales_tensor)
result = Arch.split_output(result, output_dict=arch.output_key_dict, dim=-1)
# check result
assert expected.keys() == result.keys()
for key in expected:
assert torch.allclose(expected[key], result[key])
def test_process_input_output():
# prepare inputs
x = torch.ones([100, 1])
y = torch.ones([100, 2])
z = torch.ones([100, 1])
input_variables = {"x": x, "y": y, "z": z}
# no input scales
input_keys = [Key("x", 1), Key("y", 2), Key("z", 1)]
output_keys = [Key("u", 1), Key("v", 1)]
arch = Arch(input_keys, output_keys)
validate_process_input_output(input_variables, arch)
assert arch.input_scales_tensor is None
assert arch.output_scales_tensor is None
# input scales
input_keys = [
Key("x", 1, scale=(0.0, 1.0)),
Key("y", 2, scale=(0.0, 2.0)),
Key("z", 1, scale=(0.0, 3.0)),
]
output_keys = [Key("u", 1, scale=(1.0, 2.0)), Key("v", 1)]
arch = Arch(input_keys, output_keys)
validate_process_input_output(input_variables, arch)
assert torch.allclose(
arch.input_scales_tensor,
torch.tensor([[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 2.0, 3.0]]),
)
assert torch.allclose(
arch.output_scales_tensor, torch.tensor([[1.0, 0.0], [2.0, 1.0]])
)
# input scales and also periodicity
arch = Arch(
input_keys,
output_keys,
periodicity={"x": (0.0, 2 * torch.pi), "y": (torch.pi, 4 * torch.pi)},
)
validate_process_input_output(input_variables, arch)
if __name__ == "__main__":
test_slice_input()
test_process_input_output()
| modulus-sym-main | test/test_models/test_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from modulus.sym.key import Key
from modulus.sym.models.super_res_net import SRResNetArch
def test_srresnet():
# check 3D
model = SRResNetArch(
input_keys=[Key("x", size=4)],
output_keys=[Key("y", size=4), Key("z", size=2)],
n_resid_blocks=8,
scaling_factor=8,
)
bsize = 4
x = {"x": torch.randn((bsize, 4, 32, 20, 8))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 4, 256, 160, 64)
assert outvar["z"].shape == (bsize, 2, 256, 160, 64)
# check 3D
model = SRResNetArch(
input_keys=[Key("x", size=4)],
output_keys=[Key("y", size=3), Key("z", size=1)],
n_resid_blocks=8,
scaling_factor=2,
)
bsize = 2
x = {"x": torch.randn((bsize, 4, 24, 24, 20))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 3, 48, 48, 40)
assert outvar["z"].shape == (bsize, 1, 48, 48, 40)
# check 3D
model = SRResNetArch(
input_keys=[Key("x", size=4)],
output_keys=[Key("y", size=3), Key("z", size=3)],
n_resid_blocks=8,
scaling_factor=2,
)
bsize = 5
x = {"x": torch.randn((bsize, 4, 16, 16, 32))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 3, 32, 32, 64)
assert outvar["z"].shape == (bsize, 3, 32, 32, 64)
| modulus-sym-main | test/test_models/test_super_res.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.siren import SirenArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases")]
for i in range(nr_layers + 1):
for pt_name, tf_name in names:
_dict["layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
return _dict
def test_siren():
filename = dir_path / "data/test_siren.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
# create graph
arch = SirenArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
first_omega=params["first_omega"],
omega=params["omega"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
def validate_tensor_normalize(input_variables, arch):
# expected
expected = arch._normalize(input_variables, arch.normalization)
expected = SirenArch.concat_input(expected, arch.input_key_dict.keys(), dim=-1)
# result
result = SirenArch.concat_input(input_variables, arch.input_key_dict.keys(), dim=-1)
result = SirenArch._tensor_normalize(result, arch.normalization_tensor)
# check result
assert torch.allclose(expected, result)
def test_tensor_normalize():
# prepare inputs
x = torch.ones([100, 1])
y = torch.ones([100, 2])
z = torch.ones([100, 1])
input_variables = {"x": x, "y": y, "z": z}
input_keys = [Key("x", 1), Key("y", 2), Key("z", 1)]
output_keys = [Key("u", 1), Key("v", 1)]
# normalization is None
normalization = None
arch = SirenArch(input_keys, output_keys, normalization=normalization)
validate_tensor_normalize(input_variables, arch)
assert arch.normalization_tensor is None
# normalization for part of the inputs, z will use no_op_norm
normalization = {"x": (-2.5, 2.5), "y": (-2.5, 2.5)}
arch = SirenArch(input_keys, output_keys, normalization=normalization)
validate_tensor_normalize(input_variables, arch)
assert torch.allclose(
arch.normalization_tensor,
torch.tensor([[-2.5, -2.5, -2.5, -1.0], [2.5, 2.5, 2.5, 1.0]]),
)
# normalization for all inputs
normalization = {"x": (-2.5, 2.5), "y": (-2.5, 2.5), "z": (-3.5, 3.5)}
arch = SirenArch(input_keys, output_keys, normalization=normalization)
validate_tensor_normalize(input_variables, arch)
assert torch.allclose(
arch.normalization_tensor,
torch.tensor([[-2.5, -2.5, -2.5, -3.5], [2.5, 2.5, 2.5, 3.5]]),
)
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
@pytest.mark.parametrize("normalization", [None, {"x": (-2.5, 2.5), "y": (-2.5, 2.5)}])
def test_func_arch_siren(input_keys, validate_with_dict_forward, normalization):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = SirenArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
normalization=normalization,
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
if __name__ == "__main__":
test_siren()
test_tensor_normalize()
| modulus-sym-main | test/test_models/test_siren.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.fourier_net import FourierNetArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["fc.layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["fc.final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
def test_fourier_net():
filename = dir_path / "data/test_fourier.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
frequencies = test_data["frequencies"]
frequencies_params = test_data["frequencies_params"]
# create graph
arch = FourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=("axis,diagonal", frequencies),
frequencies_params=("axis,diagonal", frequencies_params),
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
arch.fourier_layer_xyzt.frequencies = torch.from_numpy(
Wbs["fourier_layer_xyzt:0"].T
)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_fourier_net(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = FourierNetArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
test_fourier_net()
| modulus-sym-main | test/test_models/test_fourier_net.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
from modulus.sym.models.arch import FuncArch
from modulus.sym.key import Key
from modulus.sym.graph import Graph
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.manager import JitManager
# ensure torch.rand() is deterministic
torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Disable tf32 for accuracy:
# FuncArch uses BatchedTensors, and the floating point calculation
# results could be different.
torch.backends.cuda.matmul.allow_tf32 = False
@pytest.mark.parametrize("jit_activation", [True, False])
def test_func_arch_graph_1(jit_activation):
"""
Explicitly specify the needed derivative terms as Graph argument.
"""
# setup jit_manager
jit_manager = JitManager()
jit_manager.enabled = jit_activation
jit_manager.arch_mode = "only_activation"
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
network = FullyConnectedArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v")],
)
nodes = [network.make_node("ref_net", jit=False)]
ft_graph = Graph(
nodes,
[Key("x"), Key("y")],
req_names=deriv_keys,
func_arch=True,
).to(device)
ref_graph = Graph(
nodes,
[Key("x"), Key("y")],
req_names=deriv_keys,
func_arch=False,
).to(device)
if jit_activation:
# ensure we are using fused SiLU from torchscript
assert isinstance(
network._impl.layers[0].callable_activation_fn, torch.jit.ScriptFunction
)
# check FuncArch presence
func_arch_node = None
for node in ft_graph.node_evaluation_order:
evaluate = node.evaluate
if isinstance(evaluate, FuncArch):
func_arch_node = node
assert func_arch_node is not None, "No FuncArch found in the graph"
# check result
x = torch.rand([100, 1], device=device).requires_grad_()
y = torch.rand([100, 1], device=device).requires_grad_()
in_vars = {"x": x, "y": y}
ft_out = ft_graph(in_vars)
ref_out = ref_graph(in_vars)
for k in ref_out.keys():
assert torch.allclose(ref_out[k], ft_out[k], atol=5e-5)
@pytest.mark.parametrize("func_arch_allow_partial_hessian", [True, False])
def test_func_arch_graph_2(func_arch_allow_partial_hessian):
"""
Test the graph could automatically add intermediate derivatives to
FuncArch.
"""
# the ldc example
flow_net = FullyConnectedArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
)
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
nodes = ns.make_nodes() + [flow_net.make_node(name="flow_network", jit=False)]
ft_graph = Graph(
nodes,
[Key("x"), Key("y")],
req_names=Key.convert_list(["continuity", "momentum_x", "momentum_y"]),
func_arch=True,
func_arch_allow_partial_hessian=func_arch_allow_partial_hessian,
).to(device)
ref_graph = Graph(
nodes,
[Key("x"), Key("y")],
req_names=Key.convert_list(["continuity", "momentum_x", "momentum_y"]),
func_arch=False,
).to(device)
# check FuncArch presence
func_arch_node = None
for node in ft_graph.node_evaluation_order:
evaluate = node.evaluate
if isinstance(evaluate, FuncArch):
func_arch_node = node
assert func_arch_node is not None, "No FuncArch found in the graph"
# check allow_partial_hessian flag
expected_outputs = [
"u",
"v",
"p",
"u__y",
"v__x",
"u__x",
"v__y",
"u__x__x",
"v__y__y",
"u__y__y",
"v__x__x",
]
if not func_arch_allow_partial_hessian:
expected_outputs += [
"p__y",
"p__x",
]
ft_outputs = [str(key) for key in func_arch_node.outputs]
assert len(ft_outputs) == len(expected_outputs)
assert sorted(ft_outputs) == sorted(expected_outputs)
# check result
x = torch.rand([100, 1], device=device).requires_grad_()
y = torch.rand([100, 1], device=device).requires_grad_()
in_vars = {"x": x, "y": y}
ft_out = ft_graph(in_vars)
ref_out = ref_graph(in_vars)
for k in ref_out.keys():
assert torch.allclose(ref_out[k], ft_out[k], atol=1e-4)
def test_get_key_dim():
input_keys = [Key("x", 1), Key("y", 1), Key("z", 1)]
key_dims = FuncArch._get_key_dim(input_keys)
expected = {"x": 0, "y": 1, "z": 2}
for key in key_dims:
assert expected[key] == key_dims[key]
input_keys = [Key("x", 1), Key("y", 2), Key("z", 1)]
key_dims = FuncArch._get_key_dim(input_keys)
expected = {"x": 0, "z": 3}
for key in key_dims:
assert expected[key] == key_dims[key]
input_keys = [Key("x", 100), Key("y", 1), Key("z", 1)]
key_dims = FuncArch._get_key_dim(input_keys)
expected = {"y": 100, "z": 101}
for key in key_dims:
assert expected[key] == key_dims[key]
if __name__ == "__main__":
test_func_arch_graph_1(True)
test_func_arch_graph_1(False)
test_func_arch_graph_2(True)
test_func_arch_graph_2(False)
test_get_key_dim()
| modulus-sym-main | test/test_models/test_func_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| modulus-sym-main | test/test_models/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from modulus.sym.key import Key
from modulus.sym.models.afno import AFNOArch
########################
# load & verify
########################
def test_afno():
# Construct FNO model
model = AFNOArch(
input_keys=[Key("x", size=2)],
output_keys=[Key("u", size=2), Key("p")],
img_shape=(240, 240),
patch_size=16,
embed_dim=256,
depth=4,
num_blocks=8,
)
# Testing JIT
node = model.make_node(name="AFNO", jit=True)
bsize = 5
invar = {
"x": torch.randn(bsize, 2, 240, 240),
}
# Model forward
outvar = node.evaluate(invar)
# Check output size
assert outvar["u"].shape == (bsize, 2, 240, 240)
assert outvar["p"].shape == (bsize, 1, 240, 240)
test_afno()
| modulus-sym-main | test/test_models/test_afno.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.fused_mlp import (
FusedMLPArch,
FusedFourierNetArch,
FusedGridEncodingNetArch,
)
import torch
import numpy as np
from modulus.sym.key import Key
import pytest
layer_size_params = [
pytest.param(128, id="fused_128"),
pytest.param(256, id="cutlass_256"),
]
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["_impl.layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["_impl.final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
@pytest.mark.parametrize("layer_size", layer_size_params)
def test_fully_fused_mlp(layer_size):
batch_size = 1024
data_in = np.random.random((batch_size, 2))
fully_fused = False
if layer_size in set([16, 32, 64, 128]):
fully_fused = True
# create graph
arch = FusedMLPArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=layer_size,
nr_layers=6,
fully_fused=fully_fused,
)
data_out2 = arch(
{
"x": torch.from_numpy(data_in[:, 0:1]).cuda(),
"y": torch.from_numpy(data_in[:, 1:2]).cuda(),
}
)
data_out2 = data_out2["u"].cpu().detach().numpy()
# TODO: Figure out arch.params slicing to initialize pytorch model
# and compare TCNN output to that
# assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
@pytest.mark.parametrize("layer_size", layer_size_params)
def test_fused_fourier_net(layer_size):
batch_size = 1024
data_in = np.random.random((batch_size, 2))
fully_fused = False
if layer_size in set([16, 32, 64, 128]):
fully_fused = True
# create graph
arch = FusedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=layer_size,
nr_layers=6,
fully_fused=fully_fused,
n_frequencies=12,
)
data_out2 = arch(
{
"x": torch.from_numpy(data_in[:, 0:1]).cuda(),
"y": torch.from_numpy(data_in[:, 1:2]).cuda(),
}
)
data_out2 = data_out2["u"].cpu().detach().numpy()
# TODO: Figure out arch.params slicing to initialize pytorch model
# and compare TCNN output to that
# assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
@pytest.mark.parametrize("layer_size", layer_size_params)
def test_fused_grid_encoding_net(layer_size):
batch_size = 1024
data_in = np.random.random((batch_size, 2))
fully_fused = False
if layer_size in set([16, 32, 64, 128]):
fully_fused = True
# create graph
arch = FusedGridEncodingNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=layer_size,
nr_layers=6,
fully_fused=fully_fused,
indexing="Hash",
n_levels=16,
n_features_per_level=2,
log2_hashmap_size=19,
base_resolution=16,
per_level_scale=2.0,
interpolation="Smoothstep",
)
data_out2 = arch(
{
"x": torch.from_numpy(data_in[:, 0:1]).cuda(),
"y": torch.from_numpy(data_in[:, 1:2]).cuda(),
}
)
data_out2 = data_out2["u"].cpu().detach().numpy()
# TODO: Figure out arch.params slicing to initialize pytorch model
# and compare TCNN output to that
# assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
if __name__ == "__main__":
# Fused MLP tests
test_fully_fused_mlp(128) # Fully Fused MLP
test_fully_fused_mlp(256) # Cutlass MLP
# Fused Fourier Net tests
test_fused_fourier_net(128) # Fully Fused MLP
test_fused_fourier_net(256) # Cutlass MLP
# Fused Grid encoding tests
test_fused_grid_encoding_net(128) # Fully Fused MLP
test_fused_grid_encoding_net(256) # Cutlass MLP
| modulus-sym-main | test/test_models/test_fused_mlp.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.dgm import DGMArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
dgm_name = ["z", "g", "r", "h"]
# start layer
for pt_name, tf_name in names:
_dict["fc_start.linear." + pt_name] = "fc_start/" + tf_name + ":0"
# end layer
for pt_name, tf_name in names[:2]:
_dict["fc_end.linear." + pt_name] = "fc_end/" + tf_name + ":0"
# middle layers
for i in range(nr_layers - 1):
for dn in dgm_name:
_dict["dgm_layers." + str(i) + "." + dn + ".bias"] = (
"dgm_" + dn + str(i + 1) + "/biases:0"
)
_dict["dgm_layers." + str(i) + "." + dn + ".linear_1.weight"] = (
"dgm_" + dn + str(i + 1) + "/weights1:0"
)
_dict["dgm_layers." + str(i) + "." + dn + ".linear_2.weight"] = (
"dgm_" + dn + str(i + 1) + "/weights2:0"
)
_dict["dgm_layers." + str(i) + "." + dn + ".linear_1.weight_g"] = (
"dgm_" + dn + str(i + 1) + "/alphas1:0"
)
_dict["dgm_layers." + str(i) + "." + dn + ".linear_2.weight_g"] = (
"dgm_" + dn + str(i + 1) + "/alphas2:0"
)
return _dict
def test_dgm():
filename = dir_path / "data/test_dgm.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
# create graph
arch = DGMArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_dgm(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = DGMArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
if __name__ == "__main__":
test_dgm()
| modulus-sym-main | test/test_models/test_dgm.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.multiplicative_filter_net import (
MultiplicativeFilterNetArch,
FilterType,
)
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
tri_names = ("frequency", "phase")
for tri_name in tri_names:
_dict["first_filter." + tri_name] = "fourier_filter_first_" + tri_name + ":0"
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["fc_layers." + str(i) + ".linear." + pt_name] = (
"fc_" + str(i) + "/" + tf_name + ":0"
)
for tri_name in tri_names:
_dict["filters." + str(i) + "." + tri_name] = (
"fourier_filter_layer" + str(i) + "_" + tri_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
def test_multiplicative_filter():
filename = dir_path / "data/test_multiplicative_filter.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
# create graph
arch = MultiplicativeFilterNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
if "filter" in _name:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]])
else:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, atol=1e-4), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
@pytest.mark.parametrize("normalization", [None, {"x": (-2.5, 2.5), "y": (-2.5, 2.5)}])
@pytest.mark.parametrize("filter_type", [FilterType.FOURIER, FilterType.GABOR])
def test_func_arch_multiplicative_filter(
input_keys, validate_with_dict_forward, normalization, filter_type
):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = MultiplicativeFilterNetArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
normalization=normalization,
filter_type=filter_type,
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
if __name__ == "__main__":
test_multiplicative_filter()
| modulus-sym-main | test/test_models/test_multiplicative_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from modulus.sym.key import Key
from modulus.sym.models.fno import FNOArch
from modulus.sym.models.fully_connected import FullyConnectedArch
########################
# load & verify
########################
def test_fno_1d():
# Construct FNO model
decoder = FullyConnectedArch(
input_keys=[Key("z", size=32)],
output_keys=[Key("u", size=2), Key("p")],
nr_layers=1,
layer_size=8,
)
model = FNOArch(
input_keys=[Key("x", size=2)],
decoder_net=decoder,
dimension=1,
fno_modes=4,
padding=0,
)
# Testing JIT
model.make_node(name="FNO1d", jit=True)
bsize = 5
invar = {
"x": torch.randn(bsize, 2, 64),
}
# Model forward
outvar = model(invar)
# Check output size
assert outvar["u"].shape == (bsize, 2, 64)
assert outvar["p"].shape == (bsize, 1, 64)
def test_fno_2d():
# Construct FNO model
decoder = FullyConnectedArch(
input_keys=[Key("z", size=32)],
output_keys=[Key("u", size=2), Key("p")],
nr_layers=2,
layer_size=16,
)
model = FNOArch(
input_keys=[Key("x"), Key("y"), Key("rho", size=2)],
decoder_net=decoder,
dimension=2,
fno_modes=16,
)
# Testing JIT
model.make_node(name="FNO2d", jit=True)
bsize = 5
invar = {
"x": torch.randn(bsize, 1, 32, 32),
"y": torch.randn(bsize, 1, 32, 32),
"rho": torch.randn(bsize, 2, 32, 32),
}
# Model forward
outvar = model(invar)
# Check output size
assert outvar["u"].shape == (bsize, 2, 32, 32)
assert outvar["p"].shape == (bsize, 1, 32, 32)
def test_fno_3d():
# Construct FNO model
decoder = FullyConnectedArch(
input_keys=[Key("z", size=32)],
output_keys=[Key("u"), Key("v")],
nr_layers=1,
layer_size=8,
)
model = FNOArch(
input_keys=[Key("x", size=3), Key("y")],
decoder_net=decoder,
dimension=3,
fno_modes=16,
)
# Testing JIT
model.make_node(name="FNO3d", jit=True)
bsize = 5
invar = {
"x": torch.randn(bsize, 3, 32, 32, 32),
"y": torch.randn(bsize, 1, 32, 32, 32),
}
# Model forward
outvar = model(invar)
# Check output size
assert outvar["u"].shape == (bsize, 1, 32, 32, 32)
assert outvar["v"].shape == (bsize, 1, 32, 32, 32)
def test_fno():
test_fno_1d()
test_fno_2d()
test_fno_3d()
test_fno()
| modulus-sym-main | test/test_models/test_fno.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.highway_fourier_net import HighwayFourierNetArch
from pathlib import Path
import torch
import numpy as np
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for layer_name in ("fc_t", "fc_v"):
for pt_name, tf_name in names:
_dict[layer_name + ".linear." + pt_name] = layer_name + "/" + tf_name + ":0"
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["fc_layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
def test_highway_fourier_net():
filename = dir_path / "data/test_highway_fourier.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
frequencies = test_data["frequencies"]
frequencies_params = test_data["frequencies_params"]
# create graph
arch = HighwayFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=("axis,diagonal", frequencies),
frequencies_params=("axis,diagonal", frequencies_params),
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
arch.fourier_layer_xyzt.frequencies = torch.from_numpy(
Wbs["fourier_layer_xyzt:0"].T
)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_highway_fourier(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = HighwayFourierNetArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
test_highway_fourier_net()
| modulus-sym-main | test/test_models/test_highway_fourier.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import torch
from modulus.sym.key import Key
from modulus.sym.models.pix2pix import Pix2PixArch
def test_pix2pix():
# check 1D
model = Pix2PixArch(
input_keys=[Key("x", size=4)],
output_keys=[Key("y", size=4), Key("z", size=2)],
dimension=1,
scaling_factor=2,
)
bsize = 4
x = {"x": torch.randn((bsize, 4, 32))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 4, 64)
assert outvar["z"].shape == (bsize, 2, 64)
# check 2D
model = Pix2PixArch(
input_keys=[Key("x", size=2)],
output_keys=[Key("y", size=2), Key("z", size=1)],
dimension=2,
n_downsampling=1,
scaling_factor=4,
)
bsize = 4
x = {"x": torch.randn((bsize, 2, 28, 28))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 2, 112, 112)
assert outvar["z"].shape == (bsize, 1, 112, 112)
# check 3D
model = Pix2PixArch(
input_keys=[Key("x", size=1)],
output_keys=[Key("y", size=2), Key("z", size=2)],
dimension=3,
)
bsize = 4
x = {"x": torch.randn((bsize, 1, 64, 64, 64))}
outvar = model.forward(x)
# Check output size
assert outvar["y"].shape == (bsize, 2, 64, 64, 64)
assert outvar["z"].shape == (bsize, 2, 64, 64, 64)
| modulus-sym-main | test/test_models/test_pix2pix.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.radial_basis import RadialBasisArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict():
_dict = dict()
_dict["fc_layer.linear.weight"] = "fc_final/weights:0"
_dict["fc_layer.linear.bias"] = "fc_final/biases:0"
return _dict
def test_radial_basis():
filename = dir_path / "data/test_radial_basis.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
# create graph
arch = RadialBasisArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
bounds={"x": [0.0, 1.0], "y": [0.0, 1.0]},
nr_centers=params["nr_centers"],
sigma=params["sigma"],
)
name_dict = make_dict()
center_data = np.hstack(
(Wbs["c_x:0"].reshape((-1, 1)), Wbs["c_y:0"].reshape((-1, 1)))
)
for _name, _tensor in arch.named_parameters():
if _name == "centers":
_tensor.data = torch.from_numpy(center_data)
else:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_radial_basis(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = RadialBasisArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
bounds={"x": [0.0, 1.0], "y": [0.0, 1.0]},
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
if __name__ == "__main__":
test_radial_basis()
| modulus-sym-main | test/test_models/test_radial_basis.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.multiscale_fourier_net import MultiscaleFourierNetArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for i in range(nr_layers):
for pt_name, tf_name in names:
_dict["fc_layers." + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
def test_multiscale_fourier_net():
filename = dir_path / "data/test_multiscale_fourier.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
frequency_1 = tuple(
[test_data["frequency_1_name"][()]] + list(test_data["frequency_1_data"])
)
frequency_2 = tuple(
[test_data["frequency_2_name"][()]] + list(test_data["frequency_2_data"])
)
frequencies = test_data["frequencies"]
frequencies_params = test_data["frequencies_params"]
# create graph
arch = MultiscaleFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=(frequency_1, frequency_2),
frequencies_params=(frequency_1, frequency_2),
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
arch.fourier_layers_xyzt[0].frequencies = torch.from_numpy(
Wbs["fourier_layer_xyzt_0:0"].T
)
arch.fourier_layers_xyzt[1].frequencies = torch.from_numpy(
Wbs["fourier_layer_xyzt_1:0"].T
)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_multiscale_fourier_net(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = MultiscaleFourierNetArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
frequencies=(("gaussian", 1, 256), ("gaussian", 10, 256)),
frequencies_params=(("gaussian", 1, 256), ("gaussian", 10, 256)),
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
test_multiscale_fourier_net()
| modulus-sym-main | test/test_models/test_multiscale_fourier.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.models.modified_fourier_net import ModifiedFourierNetArch
import torch
import numpy as np
from pathlib import Path
from modulus.sym.key import Key
import pytest
from .model_test_utils import validate_func_arch_net
dir_path = Path(__file__).parent
def make_dict(nr_layers):
_dict = dict()
names = [("weight", "weights"), ("bias", "biases"), ("weight_g", "alphas")]
for layer_name in ("fc_u", "fc_v"):
for pt_name, tf_name in names:
_dict[layer_name + ".linear." + pt_name] = layer_name + "/" + tf_name + ":0"
for i in range(nr_layers):
for pt_name, tf_name in names:
if i == 0:
_dict["fc_" + str(i) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
else:
_dict["fc_layers." + str(i - 1) + ".linear." + pt_name] = (
"fc" + str(i) + "/" + tf_name + ":0"
)
for pt_name, tf_name in names[:2]:
_dict["final_layer.linear." + pt_name] = "fc_final/" + tf_name + ":0"
return _dict
def test_modified_fourier_net():
filename = dir_path / "data/test_modified_fourier.npz"
test_data = np.load(filename, allow_pickle=True)
data_in = test_data["data_in"]
Wbs = test_data["Wbs"][()]
params = test_data["params"][()]
frequencies = test_data["frequencies"]
frequencies_params = test_data["frequencies_params"]
# create graph
arch = ModifiedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=("axis,diagonal", frequencies),
frequencies_params=("axis,diagonal", frequencies_params),
layer_size=params["layer_size"],
nr_layers=params["nr_layers"],
)
name_dict = make_dict(params["nr_layers"])
for _name, _tensor in arch.named_parameters():
if _tensor.requires_grad:
_tensor.data = torch.from_numpy(Wbs[name_dict[_name]].T)
arch.fourier_layer_xyzt.frequencies = torch.from_numpy(
Wbs["fourier_layer_xyzt:0"].T
)
data_out2 = arch(
{"x": torch.from_numpy(data_in[:, 0:1]), "y": torch.from_numpy(data_in[:, 1:2])}
)
data_out2 = data_out2["u"].detach().numpy()
# load outputs
data_out1 = test_data["data_out"]
# verify
assert np.allclose(data_out1, data_out2, rtol=1e-3), "Test failed!"
print("Success!")
@pytest.mark.parametrize(
"input_keys", [[Key("x"), Key("y")], [Key("x"), Key("y", scale=(1.0, 2.0))]]
)
@pytest.mark.parametrize("validate_with_dict_forward", [True, False])
def test_func_arch_modified_fourier_net(input_keys, validate_with_dict_forward):
deriv_keys = [
Key.from_str("u__x"),
Key.from_str("u__x__x"),
Key.from_str("v__y"),
Key.from_str("v__y__y"),
]
ref_net = ModifiedFourierNetArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v")],
)
validate_func_arch_net(ref_net, deriv_keys, validate_with_dict_forward)
test_modified_fourier_net()
| modulus-sym-main | test/test_models/test_modified_fourier.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import operator
from functools import reduce
from functools import partial
torch.manual_seed(0)
np.random.seed(0)
cuda_device = torch.device("cpu:0")
################################################################
# 2d fourier neural operator
# Based on: https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_2d.py
################################################################
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super().__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(
in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat
)
)
self.weights2 = nn.Parameter(
self.scale
* torch.rand(
in_channels, out_channels, self.modes1, self.modes2, dtype=torch.cfloat
)
)
# Complex multiplication
def compl_mul2d(self, input, weights):
# (batch, in_channel, x,y ), (in_channel, out_channel, x,y) -> (batch, out_channel, x,y)
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d(
x_ft[:, :, : self.modes1, : self.modes2], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d(
x_ft[:, :, -self.modes1 :, : self.modes2], self.weights2
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
class FNO2d(nn.Module):
def __init__(self, modes1, modes2, width):
super().__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the coefficient function and locations (a(x, y), x, y)
input shape: (batchsize, x=s, y=s, c=3)
output: the solution
output shape: (batchsize, x=s, y=s, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.width = width
self.padding = 9 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(3, self.width) # input channel is 3: (a(x, y), x, y)
self.conv0 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv1 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv2 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.conv3 = SpectralConv2d(self.width, self.width, self.modes1, self.modes2)
self.w0 = nn.Conv2d(self.width, self.width, 1)
self.w1 = nn.Conv2d(self.width, self.width, 1)
self.w2 = nn.Conv2d(self.width, self.width, 1)
self.w3 = nn.Conv2d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
batchsize = x.shape[0]
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 3, 1, 2)
x = F.pad(x, [0, self.padding, 0, self.padding])
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x[..., : -self.padding, : -self.padding]
x = x.permute(0, 2, 3, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
################################################################
# configurations
################################################################
modes = 12
width = 32
model = FNO2d(modes, modes, width).to(cuda_device)
x_numpy = np.random.rand(100, 50, 50, 1).astype(np.float32)
x_tensor = torch.from_numpy(x_numpy).to(cuda_device)
y_tensor = model(x_tensor)
y_numpy = y_tensor.detach().numpy()
Wbs = {
_name: _value.data.detach().numpy() for _name, _value in model.named_parameters()
}
params = {"modes": [modes, modes], "width": width, "padding": [9, 9]}
np.savez_compressed(
"test_fno2d.npz", data_in=x_numpy, data_out=y_numpy, params=params, Wbs=Wbs
)
| modulus-sym-main | test/test_models/data/fno2d_generate_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from re import S
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
torch.manual_seed(0)
np.random.seed(0)
cuda_device = torch.device("cpu:0")
################################################################
# Baseline AFNO implementation from Jiadeeps original wind dataset implementation
# Based on: https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
################################################################
def compl_mul_add_act(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = torch.einsum("bxykis,kiot->stbxyko", a, b)
res = (
torch.stack(
[tmp[0, 0, ...] - tmp[1, 1, ...], tmp[1, 0, ...] + tmp[0, 1, ...]], dim=-1
)
+ c
)
return res
def compl_mul_add_act_c(
a: torch.Tensor, b: torch.Tensor, c: torch.Tensor
) -> torch.Tensor:
tmp = torch.einsum("bxyki,kio->bxyko", a, b)
res = tmp + c
return res
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.0))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
r"""Fills the input Tensor with values drawn from a truncated
normal distribution.
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def drop_path(
x: torch.Tensor, drop_prob: float = 0.0, training: bool = False
) -> torch.Tensor:
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1.0 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob=None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop) if drop > 0.0 else nn.Identity()
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class AFNO2D(nn.Module):
def __init__(
self,
hidden_size,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1,
hidden_size_factor=1,
):
super().__init__()
assert (
hidden_size % num_blocks == 0
), f"hidden_size {hidden_size} should be divisble by num_blocks {num_blocks}"
self.hidden_size = hidden_size
self.sparsity_threshold = sparsity_threshold
self.num_blocks = num_blocks
self.block_size = self.hidden_size // self.num_blocks
self.hard_thresholding_fraction = hard_thresholding_fraction
self.hidden_size_factor = hidden_size_factor
self.scale = 0.02
# new
self.w1 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks,
self.block_size,
self.block_size * self.hidden_size_factor,
2,
)
)
self.b1 = nn.Parameter(
self.scale
* torch.randn(self.num_blocks, self.block_size * self.hidden_size_factor, 2)
)
self.w2 = nn.Parameter(
self.scale
* torch.randn(
self.num_blocks,
self.block_size * self.hidden_size_factor,
self.block_size,
2,
)
)
self.b2 = nn.Parameter(
self.scale * torch.randn(self.num_blocks, self.block_size, 2)
)
def forward(self, x):
bias = x
dtype = x.dtype
x = x.float()
B, H, W, C = x.shape
total_modes = H // 2 + 1
kept_modes = int(total_modes * self.hard_thresholding_fraction)
x = torch.fft.rfft2(x, dim=(1, 2), norm="ortho")
x = x.view(B, H, W // 2 + 1, self.num_blocks, self.block_size)
# new
x = torch.view_as_real(x)
o2 = torch.zeros(x.shape, device=x.device)
o1 = F.relu(
compl_mul_add_act(
x[
:,
total_modes - kept_modes : total_modes + kept_modes,
:kept_modes,
...,
],
self.w1,
self.b1,
)
)
o2[
:, total_modes - kept_modes : total_modes + kept_modes, :kept_modes, ...
] = compl_mul_add_act(o1, self.w2, self.b2)
# finalize
x = F.softshrink(o2, lambd=self.sparsity_threshold)
x = torch.view_as_complex(x)
x = x.reshape(B, H, W // 2 + 1, C)
x = torch.fft.irfft2(x, s=(H, W), dim=(1, 2), norm="ortho")
x = x.type(dtype)
return x + bias
class Block(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.0,
drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
double_skip=True,
num_blocks=8,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
):
super().__init__()
# print("LN normalized shape", dim)
self.norm1 = norm_layer(dim)
self.filter = AFNO2D(
dim, num_blocks, sparsity_threshold, hard_thresholding_fraction
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
# original
self.norm2 = norm_layer(dim)
# new
# self.norm2 = norm_layer((h, w, dim))
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.double_skip = double_skip
def forward(self, x):
residual = x
x = self.norm1(x)
x = self.filter(x)
if self.double_skip:
x = x + residual
residual = x
x = self.norm2(x)
x = self.mlp(x)
x = self.drop_path(x)
x = x + residual
return x
class AFNONet(nn.Module):
def __init__(
self,
img_size=(720, 1440),
patch_size=(16, 16),
in_chans=2,
out_chans=2,
embed_dim=768,
depth=12,
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
num_blocks=16,
sparsity_threshold=0.01,
hard_thresholding_fraction=1.0,
):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.in_chans = in_chans
self.out_chans = out_chans
self.embed_dim = embed_dim
self.num_features = self.embed_dim = embed_dim
self.num_blocks = num_blocks
norm_layer = partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=self.patch_size,
in_chans=self.in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
# new: x = B, C, H*W
self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, num_patches))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
num_blocks=self.num_blocks,
sparsity_threshold=sparsity_threshold,
hard_thresholding_fraction=hard_thresholding_fraction,
)
for i in range(depth)
]
)
# new
self.head = nn.Conv2d(
embed_dim,
self.out_chans * self.patch_size[0] * self.patch_size[1],
1,
bias=False,
)
trunc_normal_(self.pos_embed, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def forward_features(self, x):
b, h, w = x.shape[0], x.shape[-2], x.shape[-1]
x = self.patch_embed(x)
x = x + self.pos_embed
x = self.pos_drop(x)
# new
x = x.reshape(
b, self.embed_dim, h // self.patch_size[0], w // self.patch_size[1]
)
# transpose here to see if rest is OK: (B, H, W, C)
x = x.permute((0, 2, 3, 1)).contiguous()
for blk in self.blocks:
x = blk(x)
# permute back: (B, C, H, W)
x = x.permute((0, 3, 1, 2)).contiguous()
return x
def forward(self, x):
# new: B, C, H, W
b, h, w = x.shape[0], x.shape[-2], x.shape[-1]
x = self.forward_features(x)
x = self.head(x)
xv = x.view(
b,
self.patch_size[0],
self.patch_size[1],
-1,
h // self.patch_size[0],
w // self.patch_size[1],
)
xvt = torch.permute(xv, (0, 3, 4, 1, 5, 2)).contiguous()
x = xvt.view(b, -1, h, w)
return x
class PatchEmbed(nn.Module):
def __init__(
self, img_size=(224, 224), patch_size=(16, 16), in_chans=3, embed_dim=768
):
super().__init__()
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=patch_size, stride=patch_size
)
def forward(self, x):
B, C, H, W = x.shape
assert (
H == self.img_size[0] and W == self.img_size[1]
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2)
return x
################################################################
# configurations
################################################################
img_size = (64, 64)
patch_size = (16, 16)
in_channels = 2
out_channels = 5
n_layers = 4
modes = 16
embed_dim = 64
model = AFNONet(
img_size=img_size,
patch_size=patch_size,
in_chans=in_channels,
out_chans=out_channels,
embed_dim=embed_dim,
depth=n_layers, # Number of model layers
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
num_blocks=modes, # Number of modes
).to(cuda_device)
x_numpy = np.random.rand(2, in_channels, img_size[0], img_size[1]).astype(np.float32)
x_tensor = torch.from_numpy(x_numpy).to(cuda_device)
y_tensor = model(x_tensor)
y_numpy = y_tensor.detach().numpy()
Wbs = {
_name: _value.data.detach().numpy() for _name, _value in model.named_parameters()
}
params = {
"modes": modes,
"img_size": img_size,
"patch_size": patch_size,
"in_channels": in_channels,
"out_channels": out_channels,
"n_layers": n_layers,
"modes": modes,
"embed_dim": embed_dim,
}
np.savez_compressed(
"test_ano.npz", data_in=x_numpy, data_out=y_numpy, params=params, Wbs=Wbs
)
| modulus-sym-main | test/test_models/data/ano_generate_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import operator
from functools import reduce
from functools import partial
torch.manual_seed(0)
np.random.seed(0)
cuda_device = torch.device("cpu:0")
################################################################
# 3d fourier neural operator
# Based on: https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_3d.py
################################################################
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super().__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights2 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights3 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
self.weights4 = nn.Parameter(
self.scale
* torch.rand(
in_channels,
out_channels,
self.modes1,
self.modes2,
self.modes3,
dtype=torch.cfloat,
)
)
# Complex multiplication
def compl_mul3d(self, input, weights):
# (batch, in_channel, x,y,t ), (in_channel, out_channel, x,y,t) -> (batch, out_channel, x,y,t)
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2
)
out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3
)
out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
class FNO3d(nn.Module):
def __init__(self, modes1, modes2, modes3, width):
super().__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t). It's a constant function in time, except for the last index.
input shape: (batchsize, x=64, y=64, t=40, c=13)
output: the solution of the next 40 timesteps
output shape: (batchsize, x=64, y=64, t=40, c=1)
"""
self.modes1 = modes1
self.modes2 = modes2
self.modes3 = modes3
self.width = width
self.padding = 6 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(4, self.width)
# input channel is 12: the solution of the first 10 timesteps + 3 locations (u(1, x, y), ..., u(10, x, y), x, y, t)
self.conv0 = SpectralConv3d(
self.width, self.width, self.modes1, self.modes2, self.modes3
)
self.conv1 = SpectralConv3d(
self.width, self.width, self.modes1, self.modes2, self.modes3
)
self.conv2 = SpectralConv3d(
self.width, self.width, self.modes1, self.modes2, self.modes3
)
self.conv3 = SpectralConv3d(
self.width, self.width, self.modes1, self.modes2, self.modes3
)
self.w0 = nn.Conv3d(self.width, self.width, 1)
self.w1 = nn.Conv3d(self.width, self.width, 1)
self.w2 = nn.Conv3d(self.width, self.width, 1)
self.w3 = nn.Conv3d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
batchsize = x.shape[0]
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 4, 1, 2, 3)
x = F.pad(x, [0, self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x[..., : -self.padding]
x = x.permute(0, 2, 3, 4, 1) # pad the domain if input is non-periodic
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x, size_y, size_z = shape[0], shape[1], shape[2], shape[3]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1, 1).repeat(
[batchsize, 1, size_y, size_z, 1]
)
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1, 1).repeat(
[batchsize, size_x, 1, size_z, 1]
)
gridz = torch.tensor(np.linspace(0, 1, size_z), dtype=torch.float)
gridz = gridz.reshape(1, 1, 1, size_z, 1).repeat(
[batchsize, size_x, size_y, 1, 1]
)
return torch.cat((gridx, gridy, gridz), dim=-1).to(device)
################################################################
# configurations
################################################################
modes = 5
width = 5
model = FNO3d(modes, modes, modes, width).to(cuda_device)
x_numpy = np.random.rand(5, 10, 10, 10, 1).astype(np.float32)
x_tensor = torch.from_numpy(x_numpy).to(cuda_device)
y_tensor = model(x_tensor)
y_numpy = y_tensor.detach().numpy()
Wbs = {
_name: _value.data.detach().numpy() for _name, _value in model.named_parameters()
}
params = {"modes": [modes, modes, modes], "width": width, "padding": [6, 0, 0]}
np.savez_compressed(
"test_fno3d.npz", data_in=x_numpy, data_out=y_numpy, params=params, Wbs=Wbs
)
| modulus-sym-main | test/test_models/data/fno3d_generate_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import operator
from functools import reduce
from functools import partial
torch.manual_seed(0)
np.random.seed(0)
cuda_device = torch.device("cpu:0")
################################################################
# 1d fourier neural operator
# Based on: https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_1d.py
################################################################
class SpectralConv1d(nn.Module):
def __init__(self, in_channels, out_channels, modes1):
super().__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
self.scale
* torch.rand(in_channels, out_channels, self.modes1, dtype=torch.cfloat)
)
# Complex multiplication
def compl_mul1d(self, input, weights):
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-1) // 2 + 1,
device=x.device,
dtype=torch.cfloat,
)
out_ft[:, :, : self.modes1] = self.compl_mul1d(
x_ft[:, :, : self.modes1], self.weights1
)
# Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
class FNO1d(nn.Module):
def __init__(self, modes, width):
super().__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the initial condition and location (a(x), x)
input shape: (batchsize, x=s, c=2)
output: the solution of a later timestep
output shape: (batchsize, x=s, c=1)
"""
self.modes1 = modes
self.width = width
self.padding = 2 # pad the domain if input is non-periodic
self.fc0 = nn.Linear(2, self.width) # input channel is 2: (a(x), x)
self.conv0 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv1 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv2 = SpectralConv1d(self.width, self.width, self.modes1)
self.conv3 = SpectralConv1d(self.width, self.width, self.modes1)
self.w0 = nn.Conv1d(self.width, self.width, 1)
self.w1 = nn.Conv1d(self.width, self.width, 1)
self.w2 = nn.Conv1d(self.width, self.width, 1)
self.w3 = nn.Conv1d(self.width, self.width, 1)
self.fc1 = nn.Linear(self.width, 128)
self.fc2 = nn.Linear(128, 1)
def forward(self, x):
grid = self.get_grid(x.shape, x.device)
batchsize = x.shape[0]
x = torch.cat((x, grid), dim=-1)
x = self.fc0(x)
x = x.permute(0, 2, 1)
x = F.pad(x, [0, self.padding]) # pad the domain if input is non-periodic
x1 = self.conv0(x)
x2 = self.w0(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv1(x)
x2 = self.w1(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv2(x)
x2 = self.w2(x)
x = x1 + x2
x = F.gelu(x)
x1 = self.conv3(x)
x2 = self.w3(x)
x = x1 + x2
x = x[..., : -self.padding] # pad the domain if input is non-periodic
x = x.permute(0, 2, 1)
x = self.fc1(x)
x = F.gelu(x)
x = self.fc2(x)
return x
def get_grid(self, shape, device):
batchsize, size_x = shape[0], shape[1]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1).repeat([batchsize, 1, 1])
return gridx.to(device)
################################################################
# configurations
################################################################
modes = 16
width = 64
model = FNO1d(modes, width).to(cuda_device)
x_numpy = np.random.rand(100, 100, 1).astype(np.float32)
x_tensor = torch.from_numpy(x_numpy).to(cuda_device)
y_tensor = model(x_tensor)
y_numpy = y_tensor.detach().numpy()
Wbs = {
_name: _value.data.detach().numpy() for _name, _value in model.named_parameters()
}
params = {"modes": modes, "width": width, "padding": 2}
np.savez_compressed(
"test_fno1d.npz", data_in=x_numpy, data_out=y_numpy, params=params, Wbs=Wbs
)
| modulus-sym-main | test/test_models/data/fno1d_generate_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from modulus.sym.utils.benchmark import timeit
skip_if_no_gpu = pytest.mark.skipif(
not torch.cuda.is_available(), reason="There is no GPU to run this test"
)
@skip_if_no_gpu
def test_timeit():
def func():
torch.zeros(2**20, device="cuda").exp().cos().sin()
cpu_timing_ms = timeit(func, cpu_timing=False)
cuda_event_timing_ms = timeit(func, cpu_timing=True)
assert cpu_timing_ms - cuda_event_timing_ms < 0.1
if __name__ == "__main__":
test_timeit()
| modulus-sym-main | test/test_utils/test_benchmark.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
from modulus.sym.key import Key
from modulus.sym.hydra import to_yaml, instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.models.afno.distributed import DistributedAFNONet
from modulus.sym.distributed.manager import DistributedManager
import os
import torch
# Set model parallel size to 2
os.environ["MODEL_PARALLEL_SIZE"] = "2"
@modulus.sym.main(config_path="conf", config_name="config_AFNO")
def run(cfg: ModulusConfig) -> None:
manager = DistributedManager()
model_rank = manager.group_rank(name="model_parallel")
model_size = manager.group_size(name="model_parallel")
# Check that GPUs are available
if not manager.cuda:
print("WARNING: No GPUs available. Exiting...")
return
# Check that world_size is a multiple of model parallel size
if manager.world_size % 2 != 0:
print(
"WARNING: Total world size not a multiple of model parallel size (2). Exiting..."
)
return
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
img_shape = (720, 1440)
# make list of nodes to unroll graph on
model = instantiate_arch(
input_keys=input_keys,
output_keys=output_keys,
cfg=cfg.arch.distributed_afno,
img_shape=img_shape,
)
nodes = [model.make_node(name="Distributed AFNO", jit=cfg.jit)]
model = model.to(manager.device)
sample = {
str(k): torch.randn(1, k.size, *img_shape).to(manager.device)
for k in input_keys
}
# Run model in a loop
for i in range(4):
# Forward pass
result = model(sample)
# Compute loss
loss = torch.square(result["sol"]).sum()
# Backward pass
loss.backward()
expected_result_shape = [1, output_keys[0].size, *img_shape]
result_shape = list(result["sol"].shape)
assert (
result_shape == expected_result_shape
), f"Incorrect result size. Expected {expected_result_shape}, got {local_result_shape}"
if __name__ == "__main__":
run()
| modulus-sym-main | test/test_distributed/test_afno_distributed_arch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus
from modulus.sym.hydra import to_yaml, instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.models.afno.distributed import DistributedAFNONet
from modulus.sym.distributed.manager import DistributedManager
import os
import torch
# Set model parallel size to 2
os.environ["MODEL_PARALLEL_SIZE"] = "2"
@modulus.sym.main(config_path="conf", config_name="config_AFNO")
def run(cfg: ModulusConfig) -> None:
input_is_matmul_parallel = False
output_is_matmul_parallel = False
in_chans = 3
out_chans = 10
embed_dim = 768
manager = DistributedManager()
# Check that GPUs are available
if not manager.cuda:
print("WARNING: No GPUs available. Exiting...")
return
# Check that world_size is a multiple of model parallel size
if manager.world_size % 2 != 0:
print(
"WARNING: Total world size not a multiple of model parallel size (2). Exiting..."
)
return
model = DistributedAFNONet(
img_size=(720, 1440),
patch_size=(4, 4),
in_chans=in_chans,
out_chans=out_chans,
embed_dim=embed_dim,
input_is_matmul_parallel=input_is_matmul_parallel,
output_is_matmul_parallel=output_is_matmul_parallel,
).to(manager.device)
model_rank = manager.group_rank(name="model_parallel")
model_size = manager.group_size(name="model_parallel")
# Check that model is using the correct local embedding size
expected_embed_dim_local = embed_dim // model_size
assert (
model.embed_dim_local == expected_embed_dim_local
), f"Incorrect local embedding size. Expected {expected_embed_dim_local}, got {model.embed_dim_local}"
sample = torch.randn(1, in_chans, 720, 1440)
local_in_chans_start = 0
local_in_chans_end = in_chans
if input_is_matmul_parallel:
chunk = (in_chans + model_size - 1) // model_size
local_in_chans_start = model_rank * chunk
local_in_chans_end = min(in_chans, local_in_chans_start + chunk)
# Get sample and run through the model
local_sample = (sample[:, local_in_chans_start:local_in_chans_end, :, :]).to(
manager.device
)
# Run model in a loop
for i in range(4):
# Forward pass
local_result = model(local_sample)
# Compute loss
loss = torch.square(local_result).sum()
# Backward pass
loss.backward()
local_out_chans = out_chans
if output_is_matmul_parallel:
chunk = (out_chans + model_size - 1) // model_size
local_out_chans_start = model_rank * chunk
local_out_chans_end = min(out_chans, local_out_chans_start + chunk)
local_out_chans = local_out_chans_end - local_out_chans_start
expected_result_shape = [1, local_out_chans, 720, 1440]
local_result_shape = list(local_result.shape)
assert (
local_result_shape == expected_result_shape
), f"Incorrect result size. Expected {expected_result_shape}, got {local_result_shape}"
if __name__ == "__main__":
run()
| modulus-sym-main | test/test_distributed/test_afno_distributed.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from sympy import Symbol, Eq, cos, sin, pi
from modulus.sym.node import Node
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.geometry.primitives_3d import Plane
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
VariationalDomainConstraint,
)
from modulus.sym.loss import Loss
from modulus.sym.geometry.parameterization import Parameterization, Bounds
# TODO: Add some more complex geometery that is the union of multiple shapes to check boundary sampling
def test_PointwiseBoundaryConstraint():
"define a sinusodial node, create pointwise boundary constraints over it and check their losses are zero"
ntests = 10
for fixed_dataset in [True, False]:
# define sinusodial node
x, y = Symbol("x"), Symbol("y")
node = Node.from_sympy(cos(x) + sin(y), "u")
# make geometry
height = pi
width = pi
rec = Rectangle((0, 0), (width, height))
# top wall
top_wall = PointwiseBoundaryConstraint(
nodes=[node],
geometry=rec,
outvar={"u": cos(x) + sin(height)},
batch_size=1000,
criteria=Eq(y, height),
fixed_dataset=fixed_dataset,
batch_per_epoch=2 * ntests,
)
# right wall
right_wall = PointwiseBoundaryConstraint(
nodes=[node],
geometry=rec,
outvar={"u": cos(width) + sin(y)},
batch_size=1000,
criteria=Eq(x, width),
fixed_dataset=fixed_dataset,
batch_per_epoch=2 * ntests,
)
# bottom wall
bottom_wall = PointwiseBoundaryConstraint(
nodes=[node],
geometry=rec,
outvar={"u": cos(x) + sin(0)},
batch_size=1000,
criteria=Eq(y, 0),
fixed_dataset=fixed_dataset,
batch_per_epoch=2 * ntests,
)
# left wall
left_wall = PointwiseBoundaryConstraint(
nodes=[node],
geometry=rec,
outvar={"u": cos(0) + sin(y)},
batch_size=1000,
criteria=Eq(x, 0),
fixed_dataset=fixed_dataset,
batch_per_epoch=2 * ntests,
)
height = float(height)
width = float(width)
for _ in range(ntests):
# check losses are zero
top_wall.load_data()
top_wall.forward()
loss = top_wall.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
right_wall.load_data()
right_wall.forward()
loss = right_wall.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
bottom_wall.load_data()
bottom_wall.forward()
loss = bottom_wall.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
left_wall.load_data()
left_wall.forward()
loss = left_wall.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
# check invars correct
invar, _, _ = next(top_wall.dataloader)
assert torch.allclose(
invar["y"], height * torch.ones_like(invar["y"]), rtol=1e-5, atol=1e-5
)
assert torch.all(torch.logical_and(invar["x"] <= width, invar["x"] >= 0))
invar, _, _ = next(right_wall.dataloader)
assert torch.allclose(
invar["x"], width * torch.ones_like(invar["x"]), rtol=1e-5, atol=1e-5
)
assert torch.all(torch.logical_and(invar["y"] <= height, invar["y"] >= 0))
invar, _, _ = next(bottom_wall.dataloader)
assert torch.allclose(
invar["y"], torch.zeros_like(invar["y"]), rtol=1e-5, atol=1e-5
)
assert torch.all(torch.logical_and(invar["x"] <= width, invar["x"] >= 0))
invar, _, _ = next(left_wall.dataloader)
assert torch.allclose(
invar["x"], torch.zeros_like(invar["x"]), rtol=1e-5, atol=1e-5
)
assert torch.all(torch.logical_and(invar["y"] <= height, invar["y"] >= 0))
def test_PointwiseInteriorConstraint():
"define a sinusodial node, create pointwise interior constraint over it and check its loss is zero"
ntests = 10
for fixed_dataset in [True, False]:
# define sinusodial node
x, y = Symbol("x"), Symbol("y")
node = Node.from_sympy(cos(x) + sin(y), "u")
# make geometry
height = 3.14159
width = 3.14159
rec = Rectangle((0, 0), (width, height))
constraint = PointwiseInteriorConstraint(
nodes=[node],
geometry=rec,
outvar={"u": cos(x) + sin(y)},
bounds=Bounds({x: (0, width), y: (0, height)}),
batch_size=1000,
fixed_dataset=fixed_dataset,
batch_per_epoch=2 * ntests,
)
height = float(height)
width = float(width)
for _ in range(ntests):
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
# check invar correct
invar, _, _ = next(constraint.dataloader)
assert torch.all(torch.logical_and(invar["x"] <= width, invar["x"] >= 0))
assert torch.all(torch.logical_and(invar["y"] <= height, invar["y"] >= 0))
def test_IntegralBoundaryConstraint():
"define a parabola node, create integral boundary constraint over it and check its loss is zero"
ntests = 10
for fixed_dataset in [True, False]:
# define parabola node
node = Node.from_sympy(Symbol("z") ** 2, "u")
# make geometry
plane = Plane((0, 0, 0), (0, 2, 1), 1)
# make constraint
constraint = IntegralBoundaryConstraint(
nodes=[node],
geometry=plane,
outvar={"u": 1.0 / 3.0},
batch_size=1,
integral_batch_size=100000,
batch_per_epoch=ntests,
fixed_dataset=fixed_dataset,
criteria=Symbol("y") > 1,
)
for _ in range(ntests):
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-3, atol=1e-3)
# define parabola node
node = Node.from_sympy(Symbol("z") ** 3 + Symbol("y") ** 3, "u")
# make geometry
z_len = Symbol("z_len")
y_len = Symbol("y_len")
plane = Plane((0, -y_len, -z_len), (0, y_len, z_len), 1)
# make constraint
constraint = IntegralBoundaryConstraint(
nodes=[node],
geometry=plane,
outvar={"u": 0},
batch_size=1,
integral_batch_size=100000,
batch_per_epoch=ntests,
fixed_dataset=fixed_dataset,
parameterization=Parameterization({y_len: (0.1, 1.0), z_len: (0.1, 1.0)}),
)
for _ in range(ntests):
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-3, atol=1e-3)
def test_VariationalDomainConstraint():
"define a parabola node, create variational domain constraint over it and check its loss is zero"
ntests = 10
# define parabola node
x, y = Symbol("x"), Symbol("y")
node = Node.from_sympy(x**2 + y**2, "u")
# make geometry
rec = Rectangle((-0.5, -0.5), (0.5, 0.5))
# define variational loss
class VariationalLoss(Loss):
"fake loss for testing only"
def forward(self, list_invar, list_outvar, step):
losses = []
for invar, outvar in zip(list_invar, list_outvar):
expected = invar["x"] ** 2 + invar["y"] ** 2
losses.append(torch.sum(outvar["u"] - expected))
return {"u": sum(losses)}
# make constraint
constraint = VariationalDomainConstraint(
nodes=[node],
geometry=rec,
outvar_names=["u"],
boundary_batch_size=1000,
interior_batch_size=2000,
batch_per_epoch=ntests,
interior_bounds=Bounds({x: (-0.5, 0.5), y: (-0.5, 0.5)}),
loss=VariationalLoss(),
)
for _ in range(ntests):
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
test_PointwiseBoundaryConstraint()
test_PointwiseInteriorConstraint()
test_IntegralBoundaryConstraint()
test_VariationalDomainConstraint()
| modulus-sym-main | test/test_constraints/test_continuous_constraints.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol
from modulus.sym.node import Node
from modulus.sym.domain.constraint.discrete import (
SupervisedGridConstraint,
DeepONetConstraint_Data,
DeepONetConstraint_Physics,
)
from modulus.sym.dataset import DictGridDataset
def test_SupervisedGridConstraint():
"define a parabola node, create grid constraint over it and check its loss is zero"
# define parabola node
node = Node.from_sympy(Symbol("x") ** 2 + Symbol("y") ** 2, "u")
# define 2D grid inputs
x, y = np.meshgrid(np.linspace(0, 1, 10), np.linspace(0, 1, 10))
# define targets
u = x**2 + y**2
# make dataset
dataset = DictGridDataset(
invar={"x": x[np.newaxis, :], "y": y[np.newaxis, :]},
outvar={"u": u[np.newaxis, :]},
)
# make constraint
constraint = SupervisedGridConstraint(
nodes=[node],
dataset=dataset,
batch_size=1,
)
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
def test_DeepONetConstraints():
"define a parabola node, create deeponet constraints over it and check their losses are zero"
# define parabola node
node = Node.from_sympy(Symbol("x") ** 2 + Symbol("y") ** 2, "u")
# define 2D grid inputs
x, y = np.meshgrid(np.linspace(0, 1, 10), np.linspace(0, 1, 10))
# define targets
u = x**2 + y**2
# make dataset
invar_branch = {"x": x[np.newaxis, :]}
invar_trunk = {"y": y[np.newaxis, :]}
outvar = {"u": u[np.newaxis, :]}
# make constraint
constraint = DeepONetConstraint_Data(
nodes=[node],
invar_branch=invar_branch,
invar_trunk=invar_trunk,
outvar=outvar,
batch_size=1,
)
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
# define parabola node
class Parabola(torch.nn.Module):
def forward(self, invar):
x, y = invar["x"], invar["y"]
u = x**2 + y**2
u = u.reshape((-1, 1)) # reshape output
return {"u": u}
node = Node(inputs=["x", "y"], outputs="u", evaluate=Parabola())
# make constraint
constraint = DeepONetConstraint_Physics(
nodes=[node],
invar_branch=invar_branch,
invar_trunk=invar_trunk,
outvar=outvar,
batch_size=1,
)
# check loss is zero
constraint.load_data()
constraint.forward()
loss = constraint.loss(step=0)
assert torch.isclose(loss["u"], torch.tensor(0.0), rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
test_SupervisedGridConstraint()
test_DeepONetConstraints()
| modulus-sym-main | test/test_constraints/test_discrete_constraints.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import Relobralo
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__), "test_aggregator_data/Relobralo_data.npz"
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = Relobralo(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
# print(w_out,w_np, b_out,b_np, loss_out,loss_np)
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_relobralo.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import LRAnnealing
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__), "test_aggregator_data/LRAnnealing_data.npz"
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = LRAnnealing(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_lrannealing.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import Sum
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__), "test_aggregator_data/Sum_data.npz"
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = Sum(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_sum.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import HomoscedasticUncertainty
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__),
"test_aggregator_data/HomoscedasticUncertainty_data.npz",
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = HomoscedasticUncertainty(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_uncertainty.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import GradNorm
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__), "test_aggregator_data/GradNorm_data.npz"
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = GradNorm(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_gradnorm.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from torch import nn
from modulus.sym.loss.aggregator import SoftAdapt
class FitToPoly(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones((512, 512)))
self.b = nn.Parameter(torch.ones(512, 1))
def forward(self, x):
x1, x2, x3 = x[:, 0:1], x[:, 1:2], x[:, 2:3]
losses = {
"loss_x": (torch.relu(torch.mm(self.w, x1) + self.b - x1**2))
.abs()
.mean(),
"loss_y": (torch.relu(torch.mm(self.w, x2) + self.b - x2**2.0))
.abs()
.mean(),
"loss_z": (torch.relu(torch.mm(self.w, x3) + self.b + x3**2.0))
.abs()
.mean(),
}
return losses
def test_loss_aggregator():
# set device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load data
filename = os.path.join(
os.path.dirname(__file__), "test_aggregator_data/SoftAdapt_data.npz"
)
configs = np.load(filename, allow_pickle=True)
x_np = torch.tensor(configs["x_np"][()]).to(device)
w_np, b_np, loss_np = (
configs["w_np"][()],
configs["b_np"][()],
configs["loss_np"][()],
)
total_steps, learning_rate = (
configs["total_steps"][()],
configs["learning_rate"][()],
)
# Instantiate the optimizer, scheduler, aggregator, and loss fucntion
loss_function = torch.jit.script(FitToPoly()).to(device)
aggregator = SoftAdapt(loss_function.parameters(), 3)
optimizer = torch.optim.SGD(loss_function.parameters(), lr=learning_rate)
scheduler = torch.optim.lr_scheduler.ConstantLR(optimizer)
# Training loop
for step in range(total_steps):
optimizer.zero_grad()
train_losses = loss_function(x_np)
train_loss = aggregator(train_losses, step)
train_loss.backward()
optimizer.step()
scheduler.step()
# check outputs
w_out = list(loss_function.parameters())[0].cpu().detach().numpy()
b_out = list(loss_function.parameters())[1].cpu().detach().numpy()
loss_out = train_loss.cpu().detach().numpy()
assert np.allclose(loss_np, loss_out, rtol=1e-4, atol=1e-4)
assert np.allclose(w_np, w_out, rtol=1e-4, atol=1e-4)
assert np.allclose(b_np, b_out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
test_loss_aggregator()
| modulus-sym-main | test/test_aggregator/test_softadapt.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that copyright headers exists"""
import argparse
import itertools
import re
import sys
import json
from datetime import datetime
from pathlib import Path
def get_top_comments(_data):
"""
Get all lines where comments should exist
"""
lines_to_extract = []
for i, line in enumerate(_data):
# If empty line, skip
if line in ["", "\n", "", "\r", "\r\n"]:
continue
# If it is a comment line, we should get it
if line.startswith("#"):
lines_to_extract.append(i)
# Assume all copyright headers occur before any import or from statements
# and not enclosed in a comment block
elif "import" in line:
break
elif "from" in line:
break
comments = []
for line in lines_to_extract:
comments.append(_data[line])
return comments
def main():
with open(Path(__file__).parent.resolve() / Path("config.json")) as f:
config = json.loads(f.read())
print(f"License check config:")
print(json.dumps(config, sort_keys=True, indent=4))
current_year = int(datetime.today().year)
starting_year = 2023
python_header_path = Path(__file__).parent.resolve() / Path(
config["copyright_file"]
)
working_path = Path(__file__).parent.resolve() / Path(config["dir"])
exts = config["include-ext"]
with open(python_header_path, "r", encoding="utf-8") as original:
pyheader = original.read().split("\n")
pyheader_lines = len(pyheader)
# Build list of files to check
exclude_paths = [
(Path(__file__).parent / Path(path)).resolve().rglob("*")
for path in config["exclude-dir"]
]
all_exclude_paths = itertools.chain.from_iterable(exclude_paths)
exclude_filenames = [p for p in all_exclude_paths if p.suffix in exts]
filenames = [p for p in working_path.resolve().rglob("*") if p.suffix in exts]
filenames = [
filename for filename in filenames if filename not in exclude_filenames
]
problematic_files = []
gpl_files = []
for filename in filenames:
with open(str(filename), "r", encoding="utf-8") as original:
data = original.readlines()
data = get_top_comments(data)
if data and "# ignore_header_test" in data[0]:
continue
if len(data) < pyheader_lines - 1:
print(f"{filename} has less header lines than the copyright template")
problematic_files.append(filename)
continue
found = False
for i, line in enumerate(data):
if re.search(re.compile("Copyright.*NVIDIA.*", re.IGNORECASE), line):
found = True
# Check 1st line manually
year_good = False
for year in range(starting_year, current_year + 1):
year_line = pyheader[0].format(CURRENT_YEAR=year)
if year_line in data[i]:
year_good = True
break
year_line_aff = year_line.split(".")
year_line_aff = (
year_line_aff[0] + " & AFFILIATES." + year_line_aff[1]
)
if year_line_aff in data[i]:
year_good = True
break
if not year_good:
problematic_files.append(filename)
print(f"{filename} had an error with the year")
break
# while "opyright" in data[i]:
# i += 1
# for j in range(1, pyheader_lines):
# if pyheader[j] not in data[i + j - 1]:
# problematic_files.append(filename)
# print(f"{filename} missed the line: {pyheader[j]}")
# break
if found:
break
if not found:
print(f"{filename} did not match the regex: `Copyright.*NVIDIA.*`")
problematic_files.append(filename)
# test if GPL license exists
for lines in data:
if "gpl" in lines.lower():
gpl_files.append(filename)
break
if len(problematic_files) > 0:
print(
"test_header.py found the following files that might not have a copyright header:"
)
for _file in problematic_files:
print(_file)
if len(gpl_files) > 0:
print("test_header.py found the following files that might have GPL copyright:")
for _file in gpl_files:
print(_file)
assert len(problematic_files) == 0, "header test failed!"
assert len(gpl_files) == 0, "found gpl license, header test failed!"
print("Success: File headers look good!")
if __name__ == "__main__":
main()
| modulus-sym-main | test/ci_tests/header_check.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, sin
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.dataset import (
DictImportanceSampledPointwiseIterableDataset,
)
from modulus.sym.domain.constraint.utils import _compute_outvar
from modulus.sym.geometry.parameterization import Bounds
def test_DictImportanceSampledPointwiseIterableDataset():
"sample sin function on a rectangle with importance measure sqrt(x**2 + y**2) and check its integral is zero"
torch.manual_seed(123)
np.random.seed(123)
# make rectangle
rec = Rectangle((-0.5, -0.5), (0.5, 0.5))
# sample interior
invar = rec.sample_interior(
100000,
bounds=Bounds({Symbol("x"): (-0.5, 0.5), Symbol("y"): (-0.5, 0.5)}),
)
# compute outvar
outvar = _compute_outvar(invar, {"u": sin(2 * np.pi * Symbol("x") / 0.5)})
# create importance measure
def importance_measure(invar):
return ((invar["x"] ** 2 + invar["y"] ** 2) ** (0.5)) + 0.01
# make importance dataset
dataset = DictImportanceSampledPointwiseIterableDataset(
invar=invar,
outvar=outvar,
batch_size=10000,
importance_measure=importance_measure,
)
# sample importance dataset
invar, outvar, lambda_weighting = next(iter(dataset))
# check integral calculation
assert np.isclose(torch.sum(outvar["u"] * invar["area"]), 0.0, rtol=1e-2, atol=1e-2)
if __name__ == "__main__":
test_DictImportanceSampledPointwiseIterableDataset()
| modulus-sym-main | test/test_datasets/test_continuous_datasets.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# -- Project information -----------------------------------------------------
import os
import sphinx_rtd_theme
from modulus.sym import __version__ as version
project = 'NVIDIA Modulus Symbolic'
copyright = '2023, NVIDIA Modulus Team'
author = 'NVIDIA Modulus Team'
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'_build',
'external',
'README.md',
'CONTRIBUTING.md',
'LICENSE.txt',
'tests',
'**.ipynb_checkpoints'
]
# Fake imports
autodoc_mock_imports = [
'pysdf',
'quadpy',
'functorch'
]
extensions=[
'recommonmark',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.autosectionlabel',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'nbsphinx'
]
#source_parsers = { '.md': 'recommonmark.parser.CommonMarkParser',}
source_suffix = {'.rst':'restructuredtext', '.md':'markdown'}
pdf_documents = [('index', u'rst2pdf', u'Sample rst2pdf doc', u'Your Name'),]
napoleon_custom_sections = ['Variable Shape']
# -- Options for HTML output -------------------------------------------------
# HTML theme options
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_rtd_theme"
html_theme_options = {
'logo_only': True,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
'style_nav_header_background': '#000000',
# Toc options
'collapse_navigation': False,
'sticky_navigation': False,
# 'navigation_depth': 10,
'sidebarwidth': 12,
'includehidden': True,
'titles_only': False
}
# Additional html options
html_static_path = ['_static']
html_css_files = [
'css/nvidia_styles.css',
]
html_js_files = [
'js/pk_scripts.js'
]
# html_last_updated_fmt = ''
# Additional sphinx switches
math_number_all = True
todo_include_todos = True
numfig = True
_PREAMBLE = r"""
\usepackage{amsmath}
\usepackage{esint}
\usepackage{mathtools}
\usepackage{stmaryrd}
"""
latex_elements = {
'preamble': _PREAMBLE,
# other settings go here
}
latex_preamble =[
('\\usepackage{amssymb}',
'\\usepackage{amsmath}',
'\\usepackage{amsxtra}',
'\\usepackage{bm}',
'\\usepackage{esint}',
'\\usepackage{mathtools}',
'\\usepackage{stmaryrd}'),
]
autosectionlabel_maxdepth = 1
| modulus-sym-main | docs/conf.py |
from git import Repo
from pathlib import Path
import datetime
from pytz import timezone
import re
import gitlab
import argparse
def get_commit_files(
repo_path: Path, branch_name: str, day_range: int = 1, max_count: int = 100
):
"""Gets a list for files changed in commits for past number of days
Parameters
----------
repo_path : Path
Path to git repo
branch_name : str
Branch to use for commits
day_range : int, optional
Number of past days to look for commits, by default 1
max_count : int, optional
Max number of commits to look at , by default 100
Returns
-------
Dict[str, Tuple[int, str]]
Dictionary of changes files with the latest commit time and hash
"""
assert repo_path.is_dir(), "Invalid repo folder path"
repo = Repo(repo_path)
assert not repo.bare, "Repo is bare"
assert branch_name in repo.heads, "Branch name {} not found in available heads"
branch = repo.heads[branch_name]
files = {}
# Iterates from newest to oldest commit
for commit in list(
repo.iter_commits(
rev=branch, since=f"{day_range}.days.ago", max_count=max_count
)
):
for file in commit.stats.files.keys():
if file not in files:
files[file] = tuple([commit.committed_date, commit.hexsha])
return files
def get_doc_codeblock_files(userguide_path: Path, file_pattern: str = "*.rst"):
"""Looks through RST files for any references to example python files
Parameters
----------
userguide_path : Path
Path to user guide RST files
file_pattern : str, optional
Pattern for file types to parse, by default "*.rst"
Returns
-------
Dict[str, Dict[str, List[int]]]
Dictionary of python files that are contained in doc files with line numbers.
Returned dictionary maps between python file to a dictionary containing each
documentation file and line numbers it is referenced.
"""
assert userguide_path.is_dir(), "Invalid repo folder path"
regex_pattern = re.compile("\/modulus\/examples\/(.+?)\.py")
files = {}
for doc_file in userguide_path.rglob(file_pattern):
for i, line in enumerate(open(doc_file)):
for match in re.finditer(regex_pattern, line):
python_file = str(Path(*Path(match.group()).parts[3:]))
doc_file_local = str(Path(*Path(doc_file).parts[1:]))
if not python_file in files:
files[python_file] = {str(doc_file_local): [i + 1]}
else:
if doc_file_local in files[python_file]:
files[python_file][doc_file_local].append(i + 1)
else:
files[python_file][doc_file_local] = [i + 1]
return files
def create_gitlab_issue(commit_files, doc_files, gl_token: str):
"""Creates a Gitlab issue if changed files are present
Parameters
----------
commit_files : Dict[str, Tuple[int, str]]
Dictionary of changes files with the latest commit time and hash
doc_files : Dict[str, Dict[str, List[int]]]
Dictionary of python files that are contained in doc files with line numbers.
gl_token : str
Gitlab API access token, should be passed in via program arguement
(do not hard code!)
"""
# .git urls
examples_repo_url = (
"https://gitlab-master.nvidia.com/simnet/examples/-/blob/develop/"
)
docs_repo_url = "https://gitlab-master.nvidia.com/simnet/docs/-/blob/develop/"
ug_folder = "user_guide/"
def file_desc(file_name, commit_time_stamp, commit_hash, doc_files):
# Create description string for one updated file
# Convert time-stamp to string in pacific time
commit_time = datetime.datetime.fromtimestamp(commit_time_stamp)
commit_time = commit_time.astimezone(timezone("US/Pacific"))
desc_str = f"---\n\n"
desc_str += f"[{file_name}]({examples_repo_url}{file_name})\n\n"
desc_str += f":date: Editted: {commit_time.strftime('%Y-%m-%d %H:%M PST')}\n\n"
desc_str += f":fox: Commit: simnet/examples@{commit_hash[:8]}\n\n"
desc_str += ":mag: Files to check:\n"
for doc_file in doc_files.keys():
doc_file = Path(doc_file)
desc_str += f"- {doc_file.name} : "
for line in doc_files[str(doc_file)]:
desc_str += f"[L{line}]({docs_repo_url}{ug_folder}{str(doc_file)}#L{line}), "
desc_str = desc_str[:-2]
desc_str += "\n\n"
return desc_str
todays_date = datetime.date.today()
issue_title = f"[Overwatch] Example files updated {todays_date.month}/{todays_date.day}/{todays_date.year}"
issue_desc = "### :robot: Overwatch Detected Files:\n\n"
issue_desc += "This is an automated issue created by CI Example Overwatch bot.\n\n"
changed_files = False
# Loop over changed files in detected commits
for commit_file_name in commit_files.keys():
if commit_file_name in doc_files:
issue_desc += file_desc(
commit_file_name,
commit_files[commit_file_name][0],
commit_files[commit_file_name][1],
doc_files[commit_file_name],
)
changed_files = True
# If no updated files just return
if not changed_files:
print("No updated files detected.")
return
else:
print("File changes detected, creating issue.")
# Log into gitlab and create issue
gl = gitlab.Gitlab("https://gitlab-master.nvidia.com", private_token=gl_token)
p = gl.projects.get("simnet/docs")
p.issues.create(
{
"title": issue_title,
"description": issue_desc,
"labels": ["user guide", "update"],
}
)
if __name__ == "__main__":
# This should be ran outside of the doc repo directory (1 level up)
p = argparse.ArgumentParser()
p.add_argument("--gitlab-token", type=str, default=None, help="Gitlab API token")
p.add_argument(
"--day-range", type=int, default=1, help="Day range to check commits"
)
args = vars(p.parse_args())
# Paths inside CI docker container
user_guide_path = Path("./user_guide")
example_repo_path = Path("./external/examples")
print("Parsing .rst files for python references")
doc_files = get_doc_codeblock_files(user_guide_path)
print("Checking examples repo for recent commits")
commit_files = get_commit_files(
example_repo_path, "develop", day_range=args["day_range"]
)
print("Checking for relevant file changes")
create_gitlab_issue(commit_files, doc_files, args["gitlab_token"])
| modulus-sym-main | docs/test/overwatch.py |
import re
import json
from pathlib import Path
from turtle import color
from spellchecker import SpellChecker
from string import punctuation
from typing import List, Set
from termcolor import colored
class RSTSpellChecker:
def __init__(self, spell_checker: SpellChecker):
self.spell_checker = spell_checker
self.sphinx_block = False
# Word regex, these are ran for every word so compile once up here.
# Numeric regex
# https://stackoverflow.com/questions/1323364/in-python-how-to-check-if-a-string-only-contains-certain-characters
# [Prefix syms][nums][intermediate syms][trailing nums][end syms]
self.re_numeric = re.compile(r"^[+\-(vx]*[0-9]+[+\-xe \.]*[0-9]*[xDk%\.]*$")
# Left over sphinx keys
self.re_sphinx_keys = re.compile(r"\s*(:alt:)\s*")
# Stuff from python code blocks and url stuff
self.re_code_words = re.compile(r"(.*\.py|.*\.html|.*\.org|.*\.com|.*\.vti|.*\.vtu|.*\.vtp)")
# All caps for abbrv (can have trailing s)
self.re_caps = re.compile(r"^[^a-z]*[s]?$")
def check_sphinx_block(self, line:str) -> bool:
"""Determins if line is in a code, math or table block based on indent whitespace
Parameters
----------
line : str
line of text
Returns
-------
bool
If line is in code block
"""
# code block
re_sphinx_code_block = re.compile(r"^\s*\.\.\s+(code::|code-block::)")
# math or table block
re_sphinx_math_block = re.compile(r"^\s*\.\.\s+(math::|table::)")
# Leading white space check
re_white_space = re.compile(
r"^(\s{2,}|\t+)"
) # Assuming tab spacing has at least 2 spaces
# Check for start of code or math block
if bool(re_sphinx_code_block.search(line)):
self.sphinx_block = True
return self.sphinx_block
elif bool(re_sphinx_math_block.search(line)):
self.sphinx_block = True
return self.sphinx_block
# Else check to see if exempt line or non-indendent line
if self.sphinx_block:
# End of code block is a line with no white space at the start (no-indent)
if (
not bool(re_white_space.search(line))
and len(re.sub("[\s+]", "", line)) > 0
):
self.sphinx_block = False
return self.sphinx_block
def exempt_lines(self, line: str) -> bool:
"""Checks if line should be exempt from checking, this applys for various
sphinx sections such as code blocks, figures, tables, etc.
Parameters
----------
line : str
line of text
Returns
-------
bool
If line should be skipped
"""
re_sphinx_code_ref = re.compile(
r"code::|role::|literalinclude:|:language:|:lines:|:format:|:start-after:|:end-before:"
)
re_sphinx_fig_ref = re.compile(
r"(^..\s*figure::|^\s*:width:|^\s*:align:|^\s*:name:|^\s*:header-rows:)"
)
re_title_boaders = re.compile(r"^=+\s+$|^~+\s+$|^\^+\s+$")
re_sphinx_citation = re.compile(r"^\s*\.\. \[#.*\]")
re_sphinx_ref_target = re.compile(r"^\s*\.\.\s+\_.*:\s*$")
re_sphinx_math = re.compile(r"^\s*\.\.\s+math::")
if bool(re_sphinx_code_ref.search(line)):
return True
elif bool(re_sphinx_fig_ref.search(line)):
return True
elif bool(re_title_boaders.search(line)):
return True
elif bool(re_sphinx_citation.search(line)):
return True
elif bool(re_sphinx_ref_target.search(line)):
return True
elif bool(re_sphinx_math.search(line)):
return True
return False
def exempt_word(self, word: str) -> bool:
"""Checks for words that should be exempt from spell checking
Parameters
----------
word : str
Word string
Returns
-------
bool
If work should be exempt
"""
# Numericals (numbers, #-#, #x#)
if bool(self.re_numeric.search(word)):
return True
if bool(self.re_sphinx_keys.search(word)):
return True
if bool(self.re_code_words.search(word)):
return True
# All cap abbrive
if bool(self.re_caps.search(word)):
return True
# Works with back-slashes (escape characters, aka weird stuff)
if "\\" in word:
return True
return False
def prepare_line(self, line: str) -> List[str]:
"""Prepares test line for parsing, will check if line should be skipped,
remove any sphinx keywords, then split into words based on white space.
Parameters
----------
line : str
Line of text
Returns
-------
List[str]
List of keywords
"""
# Check if line is in sphinx block or is an exempt line
if self.check_sphinx_block(line):
return []
if self.exempt_lines(line):
return []
# Remove specifc parts of the line that are sphinx items
re_sphinx_inline = re.compile(r"(:ref:|:math:|:numref:|:eq:|:code:)`.*?`")
re_sphinx_code = re.compile(r"(``.*?``|`.*?`)")
re_sphinx_cite = re.compile(r"\[#.*?\]\_")
re_sphinx_link = re.compile(r"<.*?>`\_")
re_sphinx_block_titles = re.compile(
r"(\.\.\s+table::|\.\.\s+list-table::|\.\.\s+note::)"
)
line = line.strip("\n")
if bool(re_sphinx_inline.search(line)):
line = re_sphinx_inline.sub(r"", line)
if bool(re_sphinx_code.search(line)):
line = re_sphinx_code.sub(r"", line)
if bool(re_sphinx_cite.search(line)):
line = re_sphinx_cite.sub(r"", line)
if bool(re_sphinx_link.search(line)):
line = re_sphinx_link.sub(r"", line)
if bool(re_sphinx_block_titles.search(line)):
line = re_sphinx_block_titles.sub(r"", line)
# Split up sentence into words
words = re.split(r"(\s+|/)", line)
# Filter empty strings
words = list(filter(None, words))
return words
def get_unknown_words(self, line: str) -> List[str]:
"""Gets unknown words not present in spelling dictionary
Parameters
----------
line : str
Line of text to parse
Returns
-------
List[str]
List of unknown words (if any)
"""
# Clean line and split into list of words
words = self.prepare_line(line)
# Primative plural word checking
re_plural = re.compile(r"(\’s|\'s|s\'|s\’|s|\(s\))$")
unknown_words = []
for word0 in words:
# Check for miss-spelling of word and without trailing s
if word0 in self.spell_checker or self.exempt_word(word0):
continue
# Strip punctuation and check again
word = word0.strip(punctuation)
if word in self.spell_checker or self.exempt_word(word):
continue
# Add dot after stripping punctuation for abbrv
word = word0.strip(punctuation) + "."
if word in self.spell_checker or self.exempt_word(word):
continue
# Strip plural / posessive
word = re_plural.sub(r"", word0)
if word in self.spell_checker or self.exempt_word(word):
continue
# Strip plural after punctuation
word = re_plural.sub(r"", word0.strip(punctuation))
if word in self.spell_checker or self.exempt_word(word):
continue
# If none of these combos worked mark as unknown
unknown_words.append(word0.strip(punctuation))
return unknown_words
def test_rst_spelling(
userguide_path: Path,
en_dictionary_path: Path = Path("./test/en_dictionary.json.gz"),
extra_dictionary_path: Path = Path("./test/modulus_dictionary.json"),
file_pattern: str = "*.rst",
):
"""Looks through RST files for any references to example python files
Parameters
----------
userguide_path : Path
Path to user guide RST files
en_dictionary_path: Path, optional
Path to english dictionary
extra_dictionary_path: Path, optional
Path to additional Modulus dictionary
file_pattern : str, optional
Pattern for file types to parse, by default "*.rst"
Raises
-------
ValueError: If spelling errors have been found
"""
assert userguide_path.is_dir(), "Invalid user guide folder path"
assert en_dictionary_path.is_file(), "Invalid english dictionary path"
assert extra_dictionary_path.is_file(), "Invalid additional dictionary path"
spell = SpellChecker(language=None, distance=2)
spell.word_frequency.load_dictionary(str(en_dictionary_path), encoding = "utf-8")
# Can be used to export current dictionary for merging dicts
# spell.export('en_dictionary.json.gz', gzipped=True)
# Load extra words
data = json.load(open(extra_dictionary_path))
spell.word_frequency.load_words(data["dictionary"])
rst_checker = RSTSpellChecker(spell)
spelling_errors = []
spelling_warnings = []
for doc_file in userguide_path.rglob(file_pattern):
for i, line in enumerate(open(doc_file)):
# Clean line and split into list of words
words = rst_checker.get_unknown_words(line)
for word in words:
# Get the most likely correction
corr_word = spell.correction(word)
# If there is a potential correct work in dictionary flag as error
if not corr_word == word:
err_msg = f'Found potential spelling error: "{word.lower()}", did you mean "{corr_word}"?' + "\n"
err_msg += f"Located in File: {doc_file}, Line: {i}, Word: {word}" + "\n"
spelling_errors.append(colored(err_msg, "red"))
# Otherwise make it a warning as a unrecognizable word
else:
err_msg = f"Unknown word: {word}, consider adding to dictionary." + "\n"
err_msg += f"Located in File: {doc_file}, Line: {i}, Word: {word}" + "\n"
spelling_warnings.append(colored(err_msg, "yellow"))
# Print warnings
if len(spelling_warnings) > 0:
print(colored("Spelling WARNINGS:", "yellow"))
for msg in spelling_warnings:
print(msg)
# Print likely spelling errors
if len(spelling_errors) > 0:
print(colored("Spelling ERRORS:", "red"))
for msg in spelling_errors:
print(msg)
if len(spelling_errors) > 0:
raise ValueError("Spelling errors found, either correct or add new words to dictionary.")
if __name__ == "__main__":
# Paths inside CI docker container
user_guide_path = Path("./user_guide")
test_rst_spelling(user_guide_path)
| modulus-sym-main | docs/test/test_spelling.py |
from pathlib import Path
from sphinx.application import Sphinx
import logging
logger = logging.getLogger(__name__)
def build_html(src_dir: Path, warning_file: Path = Path("./warnings.txt")):
"""Builds sphinx HTML files
Parameters
----------
src_dir : Path
Path to base directory of documentation with conf.py
warning_file : Path, optional
File name/ path for logging warnings/errors, by default Path("./warnings.txt")
"""
# Main arguments
conf_dir = src_dir
build_dir = src_dir / Path("_build")
doctree_dir = build_dir / Path("doctrees")
html_dir = build_dir / Path("html")
builder = "html"
# Write warning messages to a file (instead of stderr)
try:
warning_file.unlink()
except OSError as e:
print("Couldn't delete old warning file")
warning = open(warning_file, "w")
# clean build path if it exists
try:
build_dir.unlink()
except OSError as e:
print("Couldn't delete build directory")
# Create the Sphinx application object
app = Sphinx(src_dir, conf_dir, html_dir, doctree_dir, builder, warning=warning)
# Run the build
app.build()
if __name__ == "__main__":
build_html(Path("."))
| modulus-sym-main | docs/test/build_html.py |
from pathlib import Path
from sphinx.application import Sphinx
import logging
def check_error_log(
warning_file: Path = Path("./warnings.txt"),
ignore_docstrings: bool = True,
ignore_warnings: bool = True,
):
"""Checks the error log of Sphinx app for errors/warnings
Parameters
----------
warning_file : Path, optional
Path to error log files from sphinx, by default Path("./warnings.txt")
ignore_docstrings : bool, optional
Ignore docstring related errors, by default True
ignore_warnings : bool, optional
Ignore sphinx warnings, by default True
Raises
------
SystemError
If error/warning is found which is not ignored
"""
assert warning_file.is_file(), "Error log not found"
errors = []
with open(warning_file) as file:
lines = file.read().splitlines()
# Loop over lines
i = 0
print(f"Number of lines in warnings log file: {len(lines)}")
while i < len(lines):
line_str = lines[i]
# Make sure multi-line errors are included
# Sphinx logs end with this escape character we can check for (for color)
while not "[39;49;00m" in line_str and i < len(lines):
i += 1
line_str = line_str + lines[i]
i += 1
# Ignore warnings
if ignore_warnings and "WARNING" in line_str:
print(f"Ignoring docstring warning: {line_str}")
continue
# Ignore docstring issues
elif ignore_docstrings and ".py:docstring" in line_str:
print(f"Ignoring docstring issue: {line_str}")
continue
else:
errors.append(str(line_str))
if len(errors) > 0:
print("Errors found when building docs:")
for error in errors:
print(error)
raise SystemError("Sphinx build failed")
if __name__ == "__main__":
check_error_log(ignore_warnings=False)
| modulus-sym-main | docs/test/test_html.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
import os
import modulus
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from ops import dx, ddx
from modulus.sym.models.fno import *
import shutil
import cupy as cp
import scipy.io as sio
import requests
from modulus.sym.utils.io.plotter import ValidatorPlotter
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
wells,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.wells = wells
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
look = (pressure_pred[0, itt, :, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look[0, :, :], cmap="jet")
plt.title("Layer 1 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[0, :, :], (-1,))),
np.max(np.reshape(lookf[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf[0, :, :], cmap="jet")
plt.title(" Layer 1 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, abs(look[0, :, :] - lookf[0, :, :]), cmap="jet")
plt.title(" Layer 1 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look[1, :, :], cmap="jet")
plt.title("Layer 2 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[1, :, :], (-1,))),
np.max(np.reshape(lookf[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf[1, :, :], cmap="jet")
plt.title(" Layer 2 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, abs(look[1, :, :] - lookf[1, :, :]), cmap="jet")
plt.title(" Layer 2 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look[2, :, :], cmap="jet")
plt.title("Layer 3 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[2, :, :], (-1,))),
np.max(np.reshape(lookf[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf[2, :, :], cmap="jet")
plt.title(" Layer 3 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, abs(look[2, :, :] - lookf[2, :, :]), cmap="jet")
plt.title(" Layer 3 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
# name = namet + str(int(itt)) + '.png'
# plt.savefig(name)
# #plt.show()
# plt.clf()
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
Reinvent(look),
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
Reinvent(lookf),
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
Reinvent(diff1),
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=16)
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
R2p, L2p = compute_metrics(look.ravel()), lookf.ravel()
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
fig4, axs = plt.subplots(2, 1, figsize=(10, 10))
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
for i, data in enumerate([Accuracy_presure]):
axs[0, i].plot(
Time_vector,
data[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[0, i].set_title(["Pressure"][i], fontproperties=font)
axs[0, i].set_xlabel("Time (days)", fontproperties=font)
axs[0, i].set_ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
for i, data in enumerate([Accuracy_presure]):
axs[1, i].plot(
Time_vector,
data[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[1, i].set_title(["Pressure"][i], fontproperties=font)
axs[1, i].set_xlabel("Time (days)", fontproperties=font)
axs[1, i].set_ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
wells,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.wells = wells
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(20, 20), dpi=100)
look_sat = water_pred[0, itt, :, :, :]
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :, :]
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(6, 3, 1)
plt.pcolormesh(XX.T, YY.T, look_sat[0, :, :], cmap="jet")
plt.title(" Layer 1 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[0, :, :], (-1,))),
np.max(np.reshape(lookf_sat[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf_sat[0, :, :], cmap="jet")
plt.title(" Layer 1 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 3)
plt.pcolormesh(XX.T, YY.T, diff1_wat[0, :, :], cmap="jet")
plt.title(" Layer 1- water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 4)
plt.pcolormesh(XX.T, YY.T, look_sat[1, :, :], cmap="jet")
plt.title(" Layer 2 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[1, :, :], (-1,))),
np.max(np.reshape(lookf_sat[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_sat[1, :, :], cmap="jet")
plt.title(" Layer 2 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 6)
plt.pcolormesh(XX.T, YY.T, diff1_wat[1, :, :], cmap="jet")
plt.title(" Layer 2- water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 7)
plt.pcolormesh(XX.T, YY.T, look_sat[2, :, :], cmap="jet")
plt.title(" Layer 3 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[2, :, :], (-1,))),
np.max(np.reshape(lookf_sat[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_sat[2, :, :], cmap="jet")
plt.title(" Layer 3 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 9)
plt.pcolormesh(XX.T, YY.T, diff1_wat[2, :, :], cmap="jet")
plt.title(" Layer 3- water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 10)
plt.pcolormesh(XX.T, YY.T, look_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[0, :, :], (-1,))),
np.max(np.reshape(lookf_oil[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 11)
plt.pcolormesh(XX.T, YY.T, lookf_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 12)
plt.pcolormesh(XX.T, YY.T, diff1_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 13)
plt.pcolormesh(XX.T, YY.T, look_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[1, :, :], (-1,))),
np.max(np.reshape(lookf_oil[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 14)
plt.pcolormesh(XX.T, YY.T, lookf_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 15)
plt.pcolormesh(XX.T, YY.T, diff1_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 16)
plt.pcolormesh(XX.T, YY.T, look_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[2, :, :], (-1,))),
np.max(np.reshape(lookf_oil[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 17)
plt.pcolormesh(XX.T, YY.T, lookf_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 18)
plt.pcolormesh(XX.T, YY.T, diff1_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
# name = namet + str(int(itt)) + '.png'
# plt.savefig(name)
# #plt.show()
# plt.clf()
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
R2o, L2o = compute_metrics(look_oil.ravel()), lookf_oil.ravel()
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
Reinvent(look_sat),
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
Reinvent(lookf_sat),
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
Reinvent(diff1_sat),
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
Reinvent(look_oil),
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
Reinvent(lookf_oil),
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
Reinvent(diff1_oil),
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=16)
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
fig4, axs = plt.subplots(2, 2, figsize=(20, 10))
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
for i, data in enumerate([Accuracy_water, Accuracy_oil]):
axs[0, i].plot(
Time_vector,
data[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[0, i].set_title(
["Water_saturation", "Oil_saturation"][i], fontproperties=font
)
axs[0, i].set_xlabel("Time (days)", fontproperties=font)
axs[0, i].set_ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
for i, data in enumerate([Accuracy_water, Accuracy_oil]):
axs[1, i].plot(
Time_vector,
data[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[1, i].set_title(
["Water_saturation", "Oil_saturation"][i], fontproperties=font
)
axs[1, i].set_xlabel("Time (days)", fontproperties=font)
axs[1, i].set_ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
return f_big
# [pde-loss]
# define custom class for black oil model
class Black_oil(torch.nn.Module):
"Custom Black oil PDE definition for PINO"
def __init__(
self,
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
nz,
):
super().__init__()
self.UIR = UIR
self.UWR = UIR
self.pini_alt = pini_alt
self.LUB = LUB
self.HUB = HUB
self.aay = aay
self.bby = bby
self.SWI = SWI
self.SWR = SWR
self.UW = UW
self.BW = BW
self.UO = UO
self.BO = BO
self.MAXZ = MAXZ
self.nx = nx
self.ny = ny
self.nz = nz
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# get inputs
u = input_var["pressure"]
perm = input_var["perm"]
fin = input_var["Q"]
finwater = input_var["Qw"]
dt = input_var["Time"]
pini = input_var["Pini"]
poro = input_var["Phi"]
sini = input_var["Swini"]
sat = input_var["water_sat"]
siniuse = sini[0, 0, 0, 0, 0]
dtin = dt * self.MAXZ
dxf = 1.0 / u.shape[3]
u = u * self.pini_alt
pini = pini * self.pini_alt
# Pressure equation Loss
fin = fin * self.UIR
finwater = finwater * self.UIR
cuda = 0
device = torch.device(f"cuda:{cuda}" if torch.cuda.is_available() else "cpu")
# print(pressurey.shape)
p_loss = torch.zeros_like(u).to(device, torch.float32)
s_loss = torch.zeros_like(u).to(device, torch.float32)
a = perm # absolute permeability
v_min, v_max = self.LUB, self.HUB
new_min, new_max = self.aay, self.bby
m = (new_max - new_min) / (v_max - v_min)
b = new_min - m * v_min
a = m * a + b
finusew = finwater
dta = dtin
pressure = u
# water_sat = sat
prior_pressure = torch.zeros(
sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny
).to(device, torch.float32)
prior_pressure[:, 0, :, :, :] = self.pini_alt * (
torch.ones(sat.shape[0], self.nz, self.nx, self.ny).to(
device, torch.float32
)
)
prior_pressure[:, 1:, :, :, :] = u[:, :-1, :, :, :]
# dsp = u - prior_pressure #dp
prior_sat = torch.zeros(
sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny
).to(device, torch.float32)
prior_sat[:, 0, :, :, :] = siniuse * (
torch.ones(sat.shape[0], self.nz, self.nx, self.ny).to(
device, torch.float32
)
)
prior_sat[:, 1:, :, :, :] = sat[:, :-1, :, :, :]
dsw = sat - prior_sat # ds
dsw = torch.clip(dsw, 0.001, None)
S = torch.div(
torch.sub(prior_sat, self.SWI, alpha=1), (1 - self.SWI - self.SWR)
)
# Pressure equation Loss
Mw = torch.divide(torch.square(S), (self.UW * self.BW)) # Water mobility
Mo = torch.div(
torch.square(torch.sub(torch.ones(S.shape, device=u.device), S)),
(self.UO * self.BO),
)
Mt = Mw + Mo
a1 = torch.mul(Mt, a) # overall Effective permeability
a1water = torch.mul(Mw, a) # water Effective permeability
# compute first dffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :, :]
gulp = []
gulp2 = []
for i in range(self.nz):
now = inn_now[:, i, :, :][:, None, :, :]
dudx_fdma = dx(
now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = dx(
now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulp.append(dudx_fdma)
gulp2.append(dudy_fdma)
check = torch.stack(gulp, 2)[:, 0, :, :, :]
check2 = torch.stack(gulp2, 2)[:, 0, :, :]
gulpa.append(check)
gulp2a.append(check2)
dudx_fdm = torch.stack(gulpa, 0)
dudy_fdm = torch.stack(gulp2a, 0)
# Compute second diffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :, :]
gulp = []
gulp2 = []
for i in range(self.nz):
now = inn_now[:, i, :, :][:, None, :, :]
dudx_fdma = ddx(
now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = ddx(
now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulp.append(dudx_fdma)
gulp2.append(dudy_fdma)
check = torch.stack(gulp, 2)[:, 0, :, :, :]
check2 = torch.stack(gulp2, 2)[:, 0, :, :]
gulpa.append(check)
gulp2a.append(check2)
dduddx_fdm = torch.stack(gulpa, 0)
dduddy_fdm = torch.stack(gulp2a, 0)
gulp = []
gulp2 = []
for i in range(self.nz):
inn_now2 = a1[:, :, i, :, :]
dudx_fdma = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulp.append(dudx_fdma)
gulp2.append(dudy_fdma)
dcdx = torch.stack(gulp, 2)
dcdy = torch.stack(gulp2, 2)
# Expand dcdx
# dss = dcdx
dsout = torch.zeros((sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny)).to(
device, torch.float32
)
for k in range(dcdx.shape[0]):
see = dcdx[k, :, :, :, :]
gulp = []
for i in range(sat.shape[1]):
gulp.append(see)
checkken = torch.vstack(gulp)
dsout[k, :, :, :, :] = checkken
dcdx = dsout
dsout = torch.zeros((sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny)).to(
device, torch.float32
)
for k in range(dcdx.shape[0]):
see = dcdy[k, :, :, :, :]
gulp = []
for i in range(sat.shape[1]):
gulp.append(see)
checkken = torch.vstack(gulp)
dsout[k, :, :, :, :] = checkken
dcdy = dsout
darcy_pressure = (
fin
+ (dcdx * dudx_fdm)
+ (a1 * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (a1 * dduddy_fdm)
)
# Zero outer boundary
# darcy_pressure = F.pad(darcy_pressure[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_pressure = dxf * darcy_pressure * 1e-5
p_loss = darcy_pressure
# Saruration equation loss
dudx = dudx_fdm
dudy = dudy_fdm
dduddx = dduddx_fdm
dduddy = dduddy_fdm
gulp = []
gulp2 = []
for i in range(self.nz):
inn_now2 = a1water[:, :, i, :, :]
dudx_fdma = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulp.append(dudx_fdma)
gulp2.append(dudy_fdma)
dadx = torch.stack(gulp, 2)
dady = torch.stack(gulp2, 2)
dsout = torch.zeros((sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny)).to(
device, torch.float32
)
for k in range(dadx.shape[0]):
see = dadx[k, :, :, :, :]
gulp = []
for i in range(sat.shape[1]):
gulp.append(see)
checkken = torch.vstack(gulp)
dsout[k, :, :, :, :] = checkken
dadx = dsout
dsout = torch.zeros((sat.shape[0], sat.shape[1], self.nz, self.nx, self.ny)).to(
device, torch.float32
)
for k in range(dady.shape[0]):
see = dady[k, :, :, :, :]
gulp = []
for i in range(sat.shape[1]):
gulp.append(see)
checkken = torch.vstack(gulp)
dsout[k, :, :, :, :] = checkken
dady = dsout
flux = (dadx * dudx) + (a1water * dduddx) + (dady * dudy) + (a1water * dduddy)
fifth = poro * (dsw / dta)
toge = flux + finusew
darcy_saturation = fifth - toge
# Zero outer boundary
# darcy_saturation = F.pad(darcy_saturation[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_saturation = dxf * darcy_saturation * 1e-5
s_loss = darcy_saturation
# output_var["darcy_saturation"] = torch.mean(s_loss,dim = 0)[None,:,:,:]
output_var = {"pressured": p_loss, "saturationd": s_loss}
return output_var
# [pde-loss]
@modulus.sym.main(config_path="conf", config_name="config_PINO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 3D PINO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
wells = np.array(
[
1,
24,
1,
1,
1,
1,
31,
1,
1,
31,
31,
1,
7,
9,
2,
14,
12,
2,
28,
19,
2,
14,
27,
2,
]
)
wells = np.reshape(wells, (-1, 3), "C")
oldfolder = os.getcwd()
os.chdir(oldfolder)
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
pini_alt = cfg.custom.NVRS.pini_alt
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
# Low_K, High_K = aay,bby
# batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1wYyREUcpp0qLhbRItG5RMPeRMxVtntDi",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1PX2XFG1-elzQItvkUERJqeOerTO2kevq",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nz, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nz, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPerm[kk, :, :, :, :] = permin
perm = X_data1[kk, 1, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQ[kk, :, :, :, :] = permin
perm = X_data1[kk, 2, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQw[kk, :, :, :, :] = permin
perm = X_data1[kk, 3, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPhi[kk, :, :, :, :] = permin
perm = X_data1[kk, 4, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cTime[kk, :, :, :, :] = permin
perm = X_data1[kk, 5, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPini[kk, :, :, :, :] = permin
perm = X_data1[kk, 6, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cSini[kk, :, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cPress[kk, :, :, :, :] = np.clip(perm_big, 1 / pini_alt, 2.0)
perm = data_use1[kk, steppi:, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cSat[kk, :, :, :, :] = perm_big
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nz, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nz, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPerm[kk, :, :, :, :] = permin
perm = X_data2[kk, 1, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQ[kk, :, :, :, :] = permin
perm = X_data2[kk, 2, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQw[kk, :, :, :, :] = permin
perm = X_data2[kk, 3, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPhi[kk, :, :, :, :] = permin
perm = X_data2[kk, 4, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cTime[kk, :, :, :, :] = permin
perm = X_data2[kk, 5, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPini[kk, :, :, :, :] = permin
perm = X_data2[kk, 6, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cSini[kk, :, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cPress[kk, :, :, :, :] = np.clip(perm_big, 1 / pini_alt, 2.0)
perm = data_use2[kk, steppi:, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cSat[kk, :, :, :, :] = perm_big
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
input_keys = [
Key("perm", scale=(5.38467e-01, 2.29917e-01)),
Key("Q", scale=(1.33266e-03, 3.08151e-02)),
Key("Qw", scale=(1.39516e-03, 3.07869e-02)),
Key("Phi", scale=(2.69233e-01, 1.14958e-01)),
Key("Time", scale=(1.66666e-02, 1.08033e-07)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(1.99998e-01, 2.07125e-06)),
]
output_keys_pressure = [Key("pressure", scale=(1.16260e00, 5.75724e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.61902e-01, 1.97300e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
# add additional constraining values for darcy variable
outvar_train_pressure["pressured"] = np.zeros_like(
outvar_train_pressure["pressure"]
)
outvar_train_saturation["saturationd"] = np.zeros_like(
outvar_train_saturation["water_sat"]
)
# outvar_train1_pressure["pressured"] = np.zeros_like(outvar_train1_pressure["pressure"])
# outvar_train1_saturation["saturationd"] = np.zeros_like(outvar_train1_saturation["water_sat"])
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# [init-node]
# Define FNO model for forward model (pressure)
decoder1 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("pressure", size=steppi)]
)
fno_pressure = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder1,
)
# Define FNO model for forward model (saturation)
decoder2 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("water_sat", size=steppi)]
)
fno_saturation = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder2,
)
inputs = [
"perm",
"Q",
"Qw",
"Phi",
"Time",
"Pini",
"Swini",
"pressure",
"water_sat",
]
darcyy = Node(
inputs=inputs,
outputs=[
"pressured",
"saturationd",
],
evaluate=Black_oil(
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
nz,
),
name="Darcy node",
)
nodes = (
[darcyy]
+ [fno_pressure.make_node("pino_forward_model_pressure", jit=cfg.jit)]
+ [fno_saturation.make_node("pino_forward_model_saturation", jit=cfg.jit)]
)
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# [constraint]
# add validator
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=cfg.batch_size.test,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,\
# wells,steppi,tc2,dt,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=cfg.batch_size.test,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=cfg.batch_size.test,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,\
# wells,steppi,tc2,dt,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=cfg.batch_size.test,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/3D/src/Forward_problem_PINO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import math
from typing import Callable
from typing import Optional
from typing import Union
from typing import List
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class Activation(enum.Enum):
ELU = enum.auto()
LEAKY_RELU = enum.auto()
MISH = enum.auto()
POLY = enum.auto()
RELU = enum.auto()
GELU = enum.auto()
SELU = enum.auto()
PRELU = enum.auto()
SIGMOID = enum.auto()
SILU = enum.auto()
SIN = enum.auto()
SQUAREPLUS = enum.auto()
SOFTPLUS = enum.auto()
TANH = enum.auto()
IDENTITY = enum.auto()
def identity(x: Tensor) -> Tensor:
return x
def squareplus(x: Tensor) -> Tensor:
b = 4
return 0.5 * (x + torch.sqrt(x * x + b))
def gelu(x: Tensor) -> Tensor:
# Applies GELU approximation, slower than sigmoid but more accurate. See: https://github.com/hendrycks/GELUs
# Standard GELU that is present in PyTorch does not JIT compile!
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
# return 0.5 * x * (1 + torch.tanh(torch.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3))))
class WeightNormLinear(nn.Module):
def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
self.weight_g = nn.Parameter(torch.empty((out_features, 1)))
if bias:
self.bias = nn.Parameter(torch.empty(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.weight)
nn.init.constant_(self.weight_g, 1.0)
if self.bias is not None:
nn.init.constant_(self.bias, 0.0)
def forward(self, input: Tensor) -> Tensor:
norm = self.weight.norm(dim=1, p=2, keepdim=True)
weight = self.weight_g * self.weight / norm
return F.linear(input, weight, self.bias)
def extra_repr(self) -> str:
return "in_features={}, out_features={}, bias={}".format(
self.in_features, self.out_features, self.bias is not None
)
def get_activation_fn(
activation: Union[Activation, Callable[[Tensor], Tensor]],
module: bool = False,
**kwargs # Optional parameters
) -> Callable[[Tensor], Tensor]:
activation_mapping = {
Activation.ELU: F.elu,
Activation.LEAKY_RELU: F.leaky_relu,
Activation.MISH: F.mish,
Activation.RELU: F.relu,
Activation.GELU: F.gelu,
Activation.SELU: F.selu,
Activation.SIGMOID: torch.sigmoid,
Activation.SILU: F.silu,
Activation.SIN: torch.sin,
Activation.SQUAREPLUS: squareplus,
Activation.SOFTPLUS: F.softplus,
Activation.TANH: torch.tanh,
Activation.IDENTITY: identity,
}
# Some activations have parameters in them thus must
# be in a Module before forward call
module_activation_mapping = {
Activation.ELU: nn.ELU,
Activation.LEAKY_RELU: nn.LeakyReLU,
Activation.MISH: nn.Mish,
Activation.RELU: nn.ReLU,
Activation.GELU: nn.GLU,
Activation.SELU: nn.SELU,
Activation.PRELU: nn.PReLU,
Activation.SIGMOID: nn.Sigmoid,
Activation.SILU: nn.SiLU,
Activation.TANH: nn.Tanh,
}
if activation in activation_mapping and not module:
activation_fn = activation_mapping[activation]
elif activation in module_activation_mapping:
activation_fn = module_activation_mapping[activation](**kwargs)
else:
activation_fn = activation
return activation_fn
class FCLayer(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
weight_norm: bool = False,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(activation_fn)
self.weight_norm = weight_norm
self.activation_par = activation_par
if weight_norm:
self.linear = WeightNormLinear(in_features, out_features, bias=True)
else:
self.linear = nn.Linear(in_features, out_features, bias=True)
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def reset_parameters(self) -> None:
nn.init.constant_(self.linear.bias, 0)
nn.init.xavier_uniform_(self.linear.weight)
if self.weight_norm:
nn.init.constant_(self.linear.weight_g, 1.0)
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
# FC like layer for image channels
class ConvFCLayer(nn.Module):
def __init__(
self,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(activation_fn)
self.activation_par = activation_par
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def apply_activation(self, x: Tensor) -> Tensor:
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
class Conv1dFCLayer(ConvFCLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv2dFCLayer(ConvFCLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
self.conv.bias.requires_grad = False
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class Conv3dFCLayer(ConvFCLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__(activation_fn, activation_par)
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=1, bias=True)
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.conv.bias, 0)
nn.init.xavier_uniform_(self.conv.weight)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.apply_activation(x)
return x
class SirenLayerType(enum.Enum):
FIRST = enum.auto()
HIDDEN = enum.auto()
LAST = enum.auto()
class SirenLayer(nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
layer_type: SirenLayerType = SirenLayerType.HIDDEN,
omega_0: float = 30.0,
) -> None:
super().__init__()
self.in_features = in_features
self.layer_type = layer_type
self.omega_0 = omega_0
self.linear = nn.Linear(in_features, out_features, bias=True)
self.apply_activation = layer_type in {
SirenLayerType.FIRST,
SirenLayerType.HIDDEN,
}
self.reset_parameters()
def reset_parameters(self) -> None:
weight_ranges = {
SirenLayerType.FIRST: 1.0 / self.in_features,
SirenLayerType.HIDDEN: math.sqrt(6.0 / self.in_features) / self.omega_0,
SirenLayerType.LAST: math.sqrt(6.0 / self.in_features),
}
weight_range = weight_ranges[self.layer_type]
nn.init.uniform_(self.linear.weight, -weight_range, weight_range)
k_sqrt = math.sqrt(1.0 / self.in_features)
nn.init.uniform_(self.linear.bias, -k_sqrt, k_sqrt)
def forward(self, x: Tensor) -> Tensor:
x = self.linear(x)
if self.apply_activation:
x = torch.sin(self.omega_0 * x)
return x
class FourierLayer(nn.Module):
def __init__(
self,
in_features: int,
frequencies,
) -> None:
super().__init__()
# To do: Need more robust way for these params
if isinstance(frequencies[0], str):
if "gaussian" in frequencies[0]:
nr_freq = frequencies[2]
np_f = (
np.random.normal(0, 1, size=(nr_freq, in_features)) * frequencies[1]
)
else:
nr_freq = len(frequencies[1])
np_f = []
if "full" in frequencies[0]:
np_f_i = np.meshgrid(
*[np.array(frequencies[1]) for _ in range(in_features)],
indexing="ij",
)
np_f.append(
np.reshape(
np.stack(np_f_i, axis=-1),
(nr_freq**in_features, in_features),
)
)
if "axis" in frequencies[0]:
np_f_i = np.zeros((nr_freq, in_features, in_features))
for i in range(in_features):
np_f_i[:, i, i] = np.reshape(
np.array(frequencies[1]), (nr_freq)
)
np_f.append(
np.reshape(np_f_i, (nr_freq * in_features, in_features))
)
if "diagonal" in frequencies[0]:
np_f_i = np.reshape(np.array(frequencies[1]), (nr_freq, 1, 1))
np_f_i = np.tile(np_f_i, (1, in_features, in_features))
np_f_i = np.reshape(np_f_i, (nr_freq * in_features, in_features))
np_f.append(np_f_i)
np_f = np.concatenate(np_f, axis=-2)
else:
np_f = frequencies # [nr_freq, in_features]
frequencies = torch.tensor(np_f, dtype=torch.get_default_dtype())
frequencies = frequencies.t().contiguous()
self.register_buffer("frequencies", frequencies)
def out_features(self) -> int:
return int(self.frequencies.size(1) * 2)
def forward(self, x: Tensor) -> Tensor:
x_hat = torch.matmul(x, self.frequencies)
x_sin = torch.sin(2.0 * math.pi * x_hat)
x_cos = torch.cos(2.0 * math.pi * x_hat)
x_i = torch.cat([x_sin, x_cos], dim=-1)
return x_i
class FourierFilter(nn.Module):
def __init__(
self,
in_features: int,
layer_size: int,
nr_layers: int,
input_scale: float,
) -> None:
super().__init__()
self.weight_scale = input_scale / math.sqrt(nr_layers + 1)
self.frequency = nn.Parameter(torch.empty(in_features, layer_size))
self.phase = nn.Parameter(torch.empty(1, layer_size))
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.frequency)
nn.init.uniform_(self.phase, -math.pi, math.pi)
def forward(self, x: Tensor) -> Tensor:
frequency = self.weight_scale * self.frequency
x_i = torch.sin(torch.matmul(x, 2.0 * math.pi * frequency) + self.phase)
return x_i
class GaborFilter(nn.Module):
def __init__(
self,
in_features: int,
layer_size: int,
nr_layers: int,
input_scale: float,
alpha: float,
beta: float,
) -> None:
super().__init__()
self.layer_size = layer_size
self.alpha = alpha
self.beta = beta
self.weight_scale = input_scale / math.sqrt(nr_layers + 1)
self.frequency = nn.Parameter(torch.empty(in_features, layer_size))
self.phase = nn.Parameter(torch.empty(1, layer_size))
self.mu = nn.Parameter(torch.empty(in_features, layer_size))
self.gamma = nn.Parameter(torch.empty(1, layer_size))
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.frequency)
nn.init.uniform_(self.phase, -math.pi, math.pi)
nn.init.uniform_(self.mu, -1.0, 1.0)
with torch.no_grad():
self.gamma.copy_(
torch.from_numpy(
np.random.gamma(self.alpha, 1.0 / self.beta, (1, self.layer_size)),
)
)
def forward(self, x: Tensor) -> Tensor:
frequency = self.weight_scale * (self.frequency * self.gamma.sqrt())
x_c = x.unsqueeze(-1)
x_c = x_c - self.mu
x_c = torch.square(x_c.norm(p=2, dim=1))
x_c = torch.exp(-0.5 * x_c * self.gamma)
x_i = x_c * torch.sin(torch.matmul(x, 2.0 * math.pi * frequency) + self.phase)
return x_i
class DGMLayer(nn.Module):
def __init__(
self,
in_features_1: int,
in_features_2: int,
out_features: int,
activation_fn: Union[
Activation, Callable[[Tensor], Tensor]
] = Activation.IDENTITY,
weight_norm: bool = False,
activation_par: Optional[nn.Parameter] = None,
) -> None:
super().__init__()
self.activation_fn = activation_fn
self.callable_activation_fn = get_activation_fn(activation_fn)
self.weight_norm = weight_norm
self.activation_par = activation_par
if weight_norm:
self.linear_1 = WeightNormLinear(in_features_1, out_features, bias=False)
self.linear_2 = WeightNormLinear(in_features_2, out_features, bias=False)
else:
self.linear_1 = nn.Linear(in_features_1, out_features, bias=False)
self.linear_2 = nn.Linear(in_features_2, out_features, bias=False)
self.bias = nn.Parameter(torch.empty(out_features))
self.reset_parameters()
def exec_activation_fn(self, x: Tensor) -> Tensor:
return self.callable_activation_fn(x)
def reset_parameters(self) -> None:
nn.init.xavier_uniform_(self.linear_1.weight)
nn.init.xavier_uniform_(self.linear_2.weight)
nn.init.constant_(self.bias, 0)
if self.weight_norm:
nn.init.constant_(self.linear_1.weight_g, 1.0)
nn.init.constant_(self.linear_2.weight_g, 1.0)
def forward(self, input_1: Tensor, input_2: Tensor) -> Tensor:
x = self.linear_1(input_1) + self.linear_2(input_2) + self.bias
if self.activation_fn is not Activation.IDENTITY:
if self.activation_par is None:
x = self.exec_activation_fn(x)
else:
x = self.exec_activation_fn(self.activation_par * x)
return x
class SpectralConv1d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, modes1: int):
super().__init__()
"""
1D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, 2)
)
self.reset_parameters()
# Complex multiplication
def compl_mul1d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
# (batch, in_channel, x ), (in_channel, out_channel, x) -> (batch, out_channel, x)
cweights = torch.view_as_complex(weights)
return torch.einsum("bix,iox->box", input, cweights)
def forward(self, x: Tensor) -> Tensor:
bsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
bsize,
self.out_channels,
x.size(-1) // 2 + 1,
device=x.device,
dtype=torch.cfloat,
)
out_ft[:, :, : self.modes1] = self.compl_mul1d(
x_ft[:, :, : self.modes1],
self.weights1,
)
# Return to physical space
x = torch.fft.irfft(out_ft, n=x.size(-1))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
class SpectralConv2d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2):
super().__init__()
"""
2D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.weights2 = nn.Parameter(
torch.empty(in_channels, out_channels, self.modes1, self.modes2, 2)
)
self.reset_parameters()
# Complex multiplication
def compl_mul2d(self, input: Tensor, weights: Tensor) -> Tensor:
# (batch, in_channel, x, y), (in_channel, out_channel, x, y) -> (batch, out_channel, x, y)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixy,ioxy->boxy", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfft2(x)
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2] = self.compl_mul2d(
x_ft[:, :, : self.modes1, : self.modes2],
self.weights1,
)
out_ft[:, :, -self.modes1 :, : self.modes2] = self.compl_mul2d(
x_ft[:, :, -self.modes1 :, : self.modes2],
self.weights2,
)
# Return to physical space
x = torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
class SpectralConv3d(nn.Module):
def __init__(self, in_channels, out_channels, modes1, modes2, modes3):
super().__init__()
"""
3D Fourier layer. It does FFT, linear transform, and Inverse FFT.
"""
self.in_channels = in_channels
self.out_channels = out_channels
self.modes1 = (
modes1 # Number of Fourier modes to multiply, at most floor(N/2) + 1
)
self.modes2 = modes2
self.modes3 = modes3
self.scale = 1 / (in_channels * out_channels)
self.weights1 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights2 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights3 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.weights4 = nn.Parameter(
torch.empty(
in_channels, out_channels, self.modes1, self.modes2, self.modes3, 2
)
)
self.reset_parameters()
# Complex multiplication
def compl_mul3d(
self,
input: Tensor,
weights: Tensor,
) -> Tensor:
# (batch, in_channel, x, y, z), (in_channel, out_channel, x, y, z) -> (batch, out_channel, x, y, z)
cweights = torch.view_as_complex(weights)
return torch.einsum("bixyz,ioxyz->boxyz", input, cweights)
def forward(self, x: Tensor) -> Tensor:
batchsize = x.shape[0]
# Compute Fourier coeffcients up to factor of e^(- something constant)
x_ft = torch.fft.rfftn(x, dim=[-3, -2, -1])
# Multiply relevant Fourier modes
out_ft = torch.zeros(
batchsize,
self.out_channels,
x.size(-3),
x.size(-2),
x.size(-1) // 2 + 1,
dtype=torch.cfloat,
device=x.device,
)
out_ft[:, :, : self.modes1, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, : self.modes2, : self.modes3], self.weights1
)
out_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, : self.modes2, : self.modes3], self.weights2
)
out_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, : self.modes1, -self.modes2 :, : self.modes3], self.weights3
)
out_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3] = self.compl_mul3d(
x_ft[:, :, -self.modes1 :, -self.modes2 :, : self.modes3], self.weights4
)
# Return to physical space
x = torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1)))
return x
def reset_parameters(self):
self.weights1.data = self.scale * torch.rand(self.weights1.data.shape)
self.weights2.data = self.scale * torch.rand(self.weights2.data.shape)
self.weights3.data = self.scale * torch.rand(self.weights3.data.shape)
self.weights4.data = self.scale * torch.rand(self.weights4.data.shape)
def fourier_derivatives(x: Tensor, l: List[float]) -> Tuple[Tensor, Tensor]:
# check that input shape maches domain length
assert len(x.shape) - 2 == len(l), "input shape doesn't match domain dims"
# set pi from numpy
pi = float(np.pi)
# get needed dims
batchsize = x.size(0)
n = x.shape[2:]
dim = len(l)
# get device
device = x.device
# compute fourier transform
x_h = torch.fft.fftn(x, dim=list(range(2, dim + 2)))
# make wavenumbers
k_x = []
for i, nx in enumerate(n):
k_x.append(
torch.cat(
(
torch.arange(start=0, end=nx // 2, step=1, device=device),
torch.arange(start=-nx // 2, end=0, step=1, device=device),
),
0,
).reshape((i + 2) * [1] + [nx] + (dim - i - 1) * [1])
)
# compute laplacian in fourier space
j = torch.complex(
torch.tensor([0.0], device=device), torch.tensor([1.0], device=device)
) # Cuda graphs does not work here
wx_h = [j * k_x_i * x_h * (2 * pi / l[i]) for i, k_x_i in enumerate(k_x)]
wxx_h = [
j * k_x_i * wx_h_i * (2 * pi / l[i])
for i, (wx_h_i, k_x_i) in enumerate(zip(wx_h, k_x))
]
# inverse fourier transform out
wx = torch.cat(
[torch.fft.ifftn(wx_h_i, dim=list(range(2, dim + 2))).real for wx_h_i in wx_h],
dim=1,
)
wxx = torch.cat(
[
torch.fft.ifftn(wxx_h_i, dim=list(range(2, dim + 2))).real
for wxx_h_i in wxx_h
],
dim=1,
)
return (wx, wxx)
def hessian_tanh_fc_layer(
u: Tensor,
ux: Tensor,
uxx: Tensor,
weights_1: Tensor,
weights_2: Tensor,
bias_1: Tensor,
) -> Tuple[Tensor]:
# dim for einsum
dim = len(u.shape) - 2
dim_str = "xyz"[:dim]
# compute first order derivatives of input
# compute first layer
if dim == 1:
u_hidden = F.conv1d(u, weights_1, bias_1)
elif dim == 2:
u_hidden = F.conv2d(u, weights_1, bias_1)
elif dim == 3:
u_hidden = F.conv3d(u, weights_1, bias_1)
# compute derivative hidden layer
diff_tanh = 1 / torch.cosh(u_hidden) ** 2
# compute diff(f(g))
diff_fg = torch.einsum(
"mi" + dim_str + ",bm" + dim_str + ",km" + dim_str + "->bi" + dim_str,
weights_1,
diff_tanh,
weights_2,
)
# compute diff(f(g)) * diff(g)
vx = [
torch.einsum("bi" + dim_str + ",bi" + dim_str + "->b" + dim_str, diff_fg, w)
for w in ux
]
vx = [torch.unsqueeze(w, dim=1) for w in vx]
# compute diagonal of hessian
# double derivative of hidden layer
diff_diff_tanh = -2 * diff_tanh * torch.tanh(u_hidden)
# compute diff(g) * hessian(f) * diff(g)
vxx1 = [
torch.einsum(
"bi"
+ dim_str
+ ",mi"
+ dim_str
+ ",bm"
+ dim_str
+ ",mj"
+ dim_str
+ ",bj"
+ dim_str
+ "->b"
+ dim_str,
w,
weights_1,
weights_2 * diff_diff_tanh,
weights_1,
w,
)
for w in ux
] # (b,x,y,t)
# compute diff(f) * hessian(g)
vxx2 = [
torch.einsum("bi" + dim_str + ",bi" + dim_str + "->b" + dim_str, diff_fg, w)
for w in uxx
]
vxx = [torch.unsqueeze(a + b, dim=1) for a, b in zip(vxx1, vxx2)]
return (vx, vxx)
| modulus-sym-main | examples/reservoir_simulation/3D/src/layers_torch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
from math import ceil, floor
def deriveSizeFromScale(img_shape, scale):
output_shape = []
for k in range(2):
output_shape.append(int(ceil(scale[k] * img_shape[k])))
return output_shape
def deriveScaleFromSize(img_shape_in, img_shape_out):
scale = []
for k in range(2):
scale.append(1.0 * img_shape_out[k] / img_shape_in[k])
return scale
def triangle(x):
x = np.array(x).astype(np.float64)
lessthanzero = np.logical_and((x >= -1), x < 0)
greaterthanzero = np.logical_and((x <= 1), x >= 0)
f = np.multiply((x + 1), lessthanzero) + np.multiply((1 - x), greaterthanzero)
return f
def cubic(x):
x = np.array(x).astype(np.float64)
absx = np.absolute(x)
absx2 = np.multiply(absx, absx)
absx3 = np.multiply(absx2, absx)
f = np.multiply(1.5 * absx3 - 2.5 * absx2 + 1, absx <= 1) + np.multiply(
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2, (1 < absx) & (absx <= 2)
)
return f
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length + 1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate(
(np.arange(in_length), np.arange(in_length - 1, -1, step=-1))
).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def imresizemex(inimg, weights, indices, dim):
in_shape = inimg.shape
w_shape = weights.shape
out_shape = list(in_shape)
out_shape[dim] = w_shape[0]
outimg = np.zeros(out_shape)
if dim == 0:
for i_img in range(in_shape[1]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[ind, i_img].astype(np.float64)
outimg[i_w, i_img] = np.sum(
np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0
)
elif dim == 1:
for i_img in range(in_shape[0]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[i_img, ind].astype(np.float64)
outimg[i_img, i_w] = np.sum(
np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0
)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def imresizevec(inimg, weights, indices, dim):
wshape = weights.shape
if dim == 0:
weights = weights.reshape((wshape[0], wshape[2], 1, 1))
outimg = np.sum(
weights * ((inimg[indices].squeeze(axis=1)).astype(np.float64)), axis=1
)
elif dim == 1:
weights = weights.reshape((1, wshape[0], wshape[2], 1))
outimg = np.sum(
weights * ((inimg[:, indices].squeeze(axis=2)).astype(np.float64)), axis=2
)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def resizeAlongDim(A, dim, weights, indices, mode="vec"):
if mode == "org":
out = imresizemex(A, weights, indices, dim)
else:
out = imresizevec(A, weights, indices, dim)
return out
def imresize(I, scalar_scale=None, method="bicubic", output_shape=None, mode="vec"):
if method == "bicubic":
kernel = cubic
elif method == "bilinear":
kernel = triangle
else:
print("Error: Unidentified method supplied")
kernel_width = 4.0
# Fill scale and output_size
if scalar_scale is not None:
scalar_scale = float(scalar_scale)
scale = [scalar_scale, scalar_scale]
output_size = deriveSizeFromScale(I.shape, scale)
elif output_shape is not None:
scale = deriveScaleFromSize(I.shape, output_shape)
output_size = list(output_shape)
else:
print("Error: scalar_scale OR output_shape should be defined!")
return
scale_np = np.array(scale)
order = np.argsort(scale_np)
weights = []
indices = []
for k in range(2):
w, ind = contributions(
I.shape[k], output_size[k], scale[k], kernel, kernel_width
)
weights.append(w)
indices.append(ind)
B = np.copy(I)
flag2D = False
if B.ndim == 2:
B = np.expand_dims(B, axis=2)
flag2D = True
for k in range(2):
dim = order[k]
B = resizeAlongDim(B, dim, weights[dim], indices[dim], mode)
if flag2D:
B = np.squeeze(B, axis=2)
return B
def convertDouble2Byte(I):
B = np.clip(I, 0.0, 1.0)
B = 255 * B
return np.around(B).astype(np.uint8)
| modulus-sym-main | examples/reservoir_simulation/3D/src/imresize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Nvidia Finite volume reservoir simulator with flexible solver
AMG to solve the pressure and saturation well possed inverse problem
Geostatistics packages are also provided
@Author: Clement Etienam
"""
print(".........................IMPORT SOME LIBRARIES.....................")
import os
import numpy as np
def is_available():
"""
Check NVIDIA with nvidia-smi command
Returning code 0 if no error, it means NVIDIA is installed
Other codes mean not installed
"""
code = os.system("nvidia-smi")
return code
Yet = is_available()
if Yet == 0:
print("GPU Available with CUDA")
try:
import pyamgx
except:
pyamgx = None
import cupy as cp
from numba import cuda
print(cuda.detect()) # Print the GPU information
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.InteractiveSession(config=config)
# import pyamgx
from cupyx.scipy.sparse import csr_matrix, spmatrix
clementtt = 0
else:
print("No GPU Available")
import numpy as cp
from scipy.sparse import csr_matrix
clementtt = 1
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import MiniBatchKMeans
import os.path
import torch
# import yaml
from scipy import interpolate
import multiprocessing
import mpslib as mps
import numpy.matlib
from scipy.spatial.distance import cdist
from pyDOE import lhs
import matplotlib.colors
from matplotlib import cm
from shutil import rmtree
from kneed import KneeLocator
import numpy
# from PIL import Image
from scipy.fftpack import dct
import numpy.matlib
import matplotlib.lines as mlines
# os.environ['KERAS_BACKEND'] = 'tensorflow'
import os.path
import time
import random
import os.path
from datetime import timedelta
# import dolfin as df
import sys
from numpy import *
import scipy.optimize.lbfgsb as lbfgsb
import numpy.linalg
from numpy.linalg import norm
from scipy.fftpack.realtransforms import idct
import numpy.ma as ma
from matplotlib.font_manager import FontProperties
import logging
import os
import matplotlib as mpl
from FyeldGenerator import generate_field
from imresize import *
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # I have just 1 GPU
from cpuinfo import get_cpu_info
# Prints a json string describing the cpu
s = get_cpu_info()
print("Cpu info")
for k, v in s.items():
print(f"\t{k}: {v}")
cores = multiprocessing.cpu_count()
import math
logger = logging.getLogger(__name__)
# numpy.random.seed(99)
print(" ")
print(" This computer has %d cores, which will all be utilised in parallel " % cores)
print(" ")
print("......................DEFINE SOME FUNCTIONS.....................")
def Reinvent(matt):
nx, ny, nz = matt.shape[1], matt.shape[2], matt.shape[0]
dess = np.zeros((nx, ny, nz))
for i in range(nz):
dess[:, :, i] = matt[i, :, :]
return dess
def Add_marker(plt, XX, YY, locc):
"""
Function to add marker to given coordinates on a matplotlib plot
less
Copy code
Parameters:
plt: a matplotlib.pyplot object to add the markers to
XX: a numpy array of X coordinates
YY: a numpy array of Y coordinates
locc: a numpy array of locations where markers need to be added
Return:
None
"""
# iterate through each location
for i in range(locc.shape[0]):
a = locc[i, :]
xloc = int(a[0])
yloc = int(a[1])
# if the location type is 2, add an upward pointing marker
if a[2] == 2:
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=100,
marker="^",
color="white",
)
# otherwise, add a downward pointing marker
else:
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=100,
marker="v",
color="white",
)
def check_cupy_sparse_matrix(A):
"""
Function to check if a matrix is a Cupy sparse matrix and convert it to a CSR matrix if necessary
Parameters:
A: a sparse matrix
Return:
A: a CSR matrix
"""
if not isinstance(A, spmatrix):
# Convert the matrix to a csr matrix if it is not already a cupy sparse matrix
A = csr_matrix(A)
return A
def Plot_RSM_percentile(pertoutt, True_mat, Namesz):
timezz = True_mat[:, 0].reshape(-1, 1)
P10 = pertoutt
plt.figure(figsize=(40, 40))
plt.subplot(4, 4, 1)
plt.plot(timezz, True_mat[:, 1], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 1], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 2)
plt.plot(timezz, True_mat[:, 2], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 2], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 3)
plt.plot(timezz, True_mat[:, 3], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 3], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 4)
plt.plot(timezz, True_mat[:, 4], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 4], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 5)
plt.plot(timezz, True_mat[:, 5], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 5], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 6)
plt.plot(timezz, True_mat[:, 6], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 6], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 7)
plt.plot(timezz, True_mat[:, 7], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 7], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 8)
plt.plot(timezz, True_mat[:, 8], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 8], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 9)
plt.plot(timezz, True_mat[:, 9], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 9], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 10)
plt.plot(timezz, True_mat[:, 10], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 10], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 11)
plt.plot(timezz, True_mat[:, 11], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 11], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 12)
plt.plot(timezz, True_mat[:, 12], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 12], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 13)
plt.plot(timezz, True_mat[:, 13], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 13], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 14)
plt.plot(timezz, True_mat[:, 14], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 14], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 15)
plt.plot(timezz, True_mat[:, 15], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 15], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 16)
plt.plot(timezz, True_mat[:, 16], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 16], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
# os.chdir('RESULTS')
plt.savefig(
Namesz
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def Plot_performance(
PINN, PINN2, trueF, nx, ny, namet, UIR, itt, dt, MAXZ, pini_alt, steppi, wells
):
look = (PINN[itt, :, :, :]) * pini_alt
look_sat = PINN2[itt, :, :, :]
look_oil = 1 - look_sat
lookf = (trueF[itt, :, :, :]) * pini_alt
# print(lookf.shape)
lookf_sat = trueF[itt + steppi, :, :, :]
lookf_oil = 1 - lookf_sat
XX, YY = np.meshgrid(np.arange(nx), np.arange(ny))
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look[0, :, :], cmap="jet")
plt.title(" Layer 1 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[:, :, 0], (-1,))),
np.max(np.reshape(lookf[:, :, 0], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look[1, :, :], cmap="jet")
plt.title(" Layer 2 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[:, :, 1], (-1,))),
np.max(np.reshape(lookf[:, :, 1], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look[2, :, :], cmap="jet")
plt.title(" Layer 3 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[:, :, 2], (-1,))),
np.max(np.reshape(lookf[:, :, 2], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 0], cmap="jet")
plt.title(" Layer 1 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 1], cmap="jet")
plt.title(" Layer 2 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 2], cmap="jet")
plt.title(" Layer 3 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, abs(look[0, :, :] - lookf[:, :, 0]), cmap="jet")
plt.title(" Layer 1 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, abs(look[1, :, :] - lookf[:, :, 1]), cmap="jet")
plt.title(" Layer 2 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, abs(look[2, :, :] - lookf[:, :, 2]), cmap="jet")
plt.title(" Layer 3 - Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = "pressure_evolution" + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
XX, YY = np.meshgrid(np.arange(nx), np.arange(ny))
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look_sat[0, :, :], cmap="jet")
plt.title("Layer 1 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[:, :, 0], (-1,))),
np.max(np.reshape(lookf_sat[:, :, 0], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look_sat[1, :, :], cmap="jet")
plt.title("Layer 2 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[:, :, 1], (-1,))),
np.max(np.reshape(lookf_sat[:, :, 1], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look_sat[2, :, :], cmap="jet")
plt.title("Layer 3 - water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[:, :, 2], (-1,))),
np.max(np.reshape(lookf_sat[:, :, 2], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 0], cmap="jet")
plt.title("Layer 1 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 1], cmap="jet")
plt.title("Layer 2 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 2], cmap="jet")
plt.title("Layer 3 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, abs(look_sat[0, :, :] - lookf_sat[:, :, 0]), cmap="jet")
plt.title("Layer 1 - water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, abs(look_sat[1, :, :] - lookf_sat[:, :, 1]), cmap="jet")
plt.title("Layer 2 - water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, abs(look_sat[2, :, :] - lookf_sat[:, :, 2]), cmap="jet")
plt.title("Layer 3 - water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = "water_evolution" + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
XX, YY = np.meshgrid(np.arange(nx), np.arange(ny))
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look_oil[0, :, :], cmap="jet")
plt.title("Layer 1- oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[:, :, 0], (-1,))),
np.max(np.reshape(lookf_oil[:, :, 0], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look_oil[1, :, :], cmap="jet")
plt.title("Layer 2 - oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[:, :, 1], (-1,))),
np.max(np.reshape(lookf_oil[:, :, 1], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look_oil[2, :, :], cmap="jet")
plt.title("Layer 3 - oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[:, :, 2], (-1,))),
np.max(np.reshape(lookf_oil[:, :, 2], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 0], cmap="jet")
plt.title(" Layer 1 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 1], cmap="jet")
plt.title(" Layer 2 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 2], cmap="jet")
plt.title(" Layer 3 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, abs(look_oil[0, :, :] - lookf_oil[:, :, 0]), cmap="jet")
plt.title(" Layer 1 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, abs(look_oil[1, :, :] - lookf_oil[:, :, 1]), cmap="jet")
plt.title(" Layer 2 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, abs(look_oil[2, :, :] - lookf_oil[:, :, 2]), cmap="jet")
plt.title(" Layer 3 - oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = "oil_evolution" + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
def Plot_performance2(
trueF, nx, ny, namet, UIR, itt, dt, MAXZ, pini_alt, steppi, wells
):
lookf = (trueF[itt, :, :, :]) * pini_alt
lookf_sat = trueF[itt + steppi, :, :, :]
lookf_oil = 1 - lookf_sat
XX, YY = np.meshgrid(np.arange(nx), np.arange(ny))
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 0], cmap="jet")
plt.title(" Layer 1 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 1], cmap="jet")
plt.title(" Layer 2 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, lookf[:, :, 2], cmap="jet")
plt.title(" Layer 3 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 0], cmap="jet")
plt.title("Layer 1 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 1], cmap="jet")
plt.title("Layer 2 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_sat[:, :, 2], cmap="jet")
plt.title("Layer 3 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
# plt.clim(np.min(np.reshape(lookf_sat,(-1,))),np.max(np.reshape(lookf_sat,(-1,))))
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 0], cmap="jet")
plt.title(" Layer 1 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 1], cmap="jet")
plt.title(" Layer 2 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, lookf_oil[:, :, 2], cmap="jet")
plt.title(" Layer 3 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = "evolution" + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
# Geostatistics module
def intial_ensemble(Nx, Ny, Nz, N, permx):
"""
Geostatistics module
Function to generate an initial ensemble of permeability fields using Multiple-Point Statistics (MPS)
Parameters:
Nx: an integer representing the number of grid cells in the x-direction
Ny: an integer representing the number of grid cells in the y-direction
Nz: an integer representing the number of grid cells in the z-direction
N: an integer representing the number of realizations in the ensemble
permx: a numpy array representing the permeability field TI
Return:
ensemble: a numpy array representing the ensemble of permeability fields
"""
# import MPSlib
O = mps.mpslib()
# set the MPS method to 'mps_snesim_tree'
O = mps.mpslib(method="mps_snesim_tree")
# set the number of realizations to N
O.par["n_real"] = N
# set the permeability field TI
k = permx
kjenn = k
O.ti = kjenn
# set the simulation grid size
O.par["simulation_grid_size"] = (Ny, Nx, Nz)
# run MPS simulation in parallel
O.run_parallel()
# get the ensemble of realizations
ensemble = O.sim
# reformat the ensemble
ens = []
for kk in range(N):
temp = np.reshape(ensemble[kk], (-1, 1), "F")
ens.append(temp)
ensemble = np.hstack(ens)
# remove temporary files generated during MPS simulation
from glob import glob
for f3 in glob("thread*"):
rmtree(f3)
for f4 in glob("*mps_snesim_tree_*"):
os.remove(f4)
for f4 in glob("*ti_thread_*"):
os.remove(f4)
return ensemble
def initial_ensemble_gaussian(Nx, Ny, Nz, N, minn, maxx):
"""
Function to generate an initial ensemble of permeability fields using Gaussian distribution
Parameters:
Nx: an integer representing the number of grid cells in the x-direction
Ny: an integer representing the number of grid cells in the y-direction
Nz: an integer representing the number of grid cells in the z-direction
N: an integer representing the number of realizations in the ensemble
minn: a float representing the minimum value of the permeability field
maxx: a float representing the maximum value of the permeability field
Return:
fensemble: a numpy array representing the ensemble of permeability fields
"""
shape = (Nx, Ny)
distrib = "gaussian"
fensemble = np.zeros((Nx * Ny * Nz, N))
for k in range(N):
fout = []
# generate a 3D field
for j in range(Nz):
field = generate_field(distrib, Pkgen(3), shape)
field = imresize(field, output_shape=shape)
foo = np.reshape(field, (-1, 1), "F")
fout.append(foo)
fout = np.vstack(fout)
# scale the field to the desired range
clfy = MinMaxScaler(feature_range=(minn, maxx))
(clfy.fit(fout))
fout = clfy.transform(fout)
fensemble[:, k] = np.ravel(fout)
return fensemble
def Pkgen(n):
def Pk(k):
return np.power(k, -n)
return Pk
# Draw samples from a normal distribution
def distrib(shape):
a = np.random.normal(loc=0, scale=1, size=shape)
b = np.random.normal(loc=0, scale=1, size=shape)
return a + 1j * b
def Peaceman_well(
inn,
ooutp,
oouts,
MAXZ,
mazw,
s1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
CFD,
nz,
):
"""
Calculates the pressure and flow rates for an injection and production well using the Peaceman model.
Args:
- inn (dictionary): dictionary containing the input parameters (including permeability and injection/production rates)
- ooutp (numpy array): 4D numpy array containing pressure values for each time step and grid cell
- oouts (numpy array): 4D numpy array containing saturation values for each time step and grid cell
- MAXZ (float): length of the reservoir in the z-direction
- mazw (float): the injection/production well location in the z-direction
- s1 (float): the length of the computational domain in the z-direction
- LUB (float): the upper bound of the rescaled permeability
- HUB (float): the lower bound of the rescaled permeability
- aay (float): the upper bound of the original permeability
- bby (float): the lower bound of the original permeability
- DX (float): the cell size in the x-direction
- steppi (int): number of time steps
- pini_alt (float): the initial pressure
- SWI (float): the initial water saturation
- SWR (float): the residual water saturation
- UW (float): the viscosity of water
- BW (float): the formation volume factor of water
- DZ (float): the cell thickness in the z-direction
- rwell (float): the well radius
- skin (float): the skin factor
- UO (float): the viscosity of oil
- BO (float): the formation volume factor of oil
- pwf_producer (float): the desired pressure at the producer well
- dt (float): the time step
- N_inj (int): the number of injection wells
- N_pr (int): the number of production wells
- nz (int): the number of cells in the z-direction
Returns:
- overr (numpy array): an array containing the time and flow rates (in BHP, qoil, qwater, and wct) for each time step
"""
matI = Reinvent(inn["Qw"][0, 0, :, :, :].detach().cpu().numpy())
matP = Reinvent(inn["Q"][0, 0, :, :, :].detach().cpu().numpy())
Injector_location = np.where(matI.ravel() > 0)[0]
producer_location = np.where(matP.ravel() < 0)[0]
PERM = Reinvent(inn["perm"][0, 0, :, :, :].detach().cpu().numpy())
PERM = rescale_linear_pytorch_numpy(
np.reshape(PERM, (-1,), "F"), LUB, HUB, aay, bby
)
kuse_inj = PERM[Injector_location]
kuse_prod = PERM[producer_location]
RE = 0.2 * DX
Baa = []
Timz = []
# print(ooutp.shape)
for kk in range(steppi):
Ptito = ooutp[0, kk, :, :, :]
Stito = oouts[0, kk, :, :, :]
if CFD == 0:
Ptito = Reinvent(Ptito)
Stito = Reinvent(Stito)
else:
pass
average_pressure = (
Ptito.ravel()[producer_location]
) * pini_alt # np.mean(Ptito.ravel()) * pini_alt #* (1/aug)
p_inj = (Ptito.ravel()[Injector_location]) * pini_alt # *(1/aug)
# p_prod = (Ptito.ravel()[producer_location] ) * pini_alt
S = Stito.ravel().reshape(-1, 1)
Sout = (S - SWI) / (1 - SWI - SWR)
Krw = Sout**2 # Water mobility
Kro = (1 - Sout) ** 2 # Oil mobility
krwuse = Krw.ravel()[Injector_location]
krwusep = Krw.ravel()[producer_location]
krouse = Kro.ravel()[producer_location]
up = UW * BW
down = 2 * np.pi * kuse_inj * krwuse * DZ
right = np.log(RE / rwell) + skin
temp = (up / down) * right
# temp[temp ==-inf] = 0
Pwf = p_inj + temp
Pwf = np.abs(Pwf)
BHP = np.sum(np.reshape(Pwf, (-1, N_inj), "C"), axis=0) / nz
up = UO * BO
down = 2 * np.pi * kuse_prod * krouse * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qoil = np.abs(-(drawdown * J))
qoil = np.sum(np.reshape(qoil, (-1, N_pr), "C"), axis=0) / nz
up = UW * BW
down = 2 * np.pi * kuse_prod * krwusep * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qwater = np.abs(-(drawdown * J))
qwater = np.sum(np.reshape(qwater, (-1, N_pr), "C"), axis=0) / nz
# qwater[qwater==0] = 0
# water cut
wct = (qwater / (qwater + qoil)) * np.float32(100)
timz = ((kk + 1) * dt) * MAXZ
# timz = timz.reshape(1,1)
qs = [BHP, qoil, qwater, wct]
# print(qs.shape)
qs = np.asarray(qs)
qs = qs.reshape(1, -1)
Baa.append(qs)
Timz.append(timz)
Baa = np.vstack(Baa)
Timz = np.vstack(Timz)
overr = np.hstack([Timz, Baa])
return overr # np.vstack(B)
# Points generation
def test_points_gen(n_test, nder, interval=(-1.0, 1.0), distrib="random", **kwargs):
return {
"random": lambda n_test, nder: (interval[1] - interval[0])
* np.random.rand(n_test, nder)
+ interval[0],
"lhs": lambda n_test, nder: (interval[1] - interval[0])
* lhs(nder, samples=n_test, **kwargs)
+ interval[0],
}[distrib.lower()](n_test, nder)
def getoptimumk(X):
distortions = []
Kss = range(1, 10)
for k in Kss:
kmeanModel = MiniBatchKMeans(n_clusters=k).fit(X)
kmeanModel.fit(X)
distortions.append(
sum(np.min(cdist(X, kmeanModel.cluster_centers_, "euclidean"), axis=1))
/ X.shape[0]
)
myarray = np.array(distortions)
knn = KneeLocator(
Kss, myarray, curve="convex", direction="decreasing", interp_method="interp1d"
)
kuse = knn.knee
# Plot the elbow
plt.figure(figsize=(10, 10))
plt.plot(Kss, distortions, "bx-")
plt.xlabel("cluster size")
plt.ylabel("Distortion")
plt.title("optimal n_clusters for machine")
plt.savefig("machine_elbow.png")
plt.clf()
return kuse
class LpLoss(object):
"""
loss function with rel/abs Lp loss
"""
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
# Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h ** (self.d / self.p)) * torch.norm(
x.view(num_examples, -1) - y.view(num_examples, -1), self.p, 1
)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(
x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1
)
y_norms = torch.norm(y.reshape(num_examples, -1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms / y_norms)
else:
return torch.sum(diff_norms / y_norms)
return diff_norms / y_norms
def __call__(self, x, y):
return self.rel(x, y)
def H(y, t0=0):
"""
Step fn with step at t0
"""
h = np.zeros_like(y)
args = tuple([slice(0, y.shape[i]) for i in y.ndim])
def smoothn(
y,
nS0=10,
axis=None,
smoothOrder=2.0,
sd=None,
verbose=False,
s0=None,
z0=None,
isrobust=False,
W=None,
s=None,
MaxIter=100,
TolZ=1e-3,
weightstr="bisquare",
):
if type(y) == ma.core.MaskedArray: # masked array
# is_masked = True
mask = y.mask
y = np.array(y)
y[mask] = 0.0
if np.any(W != None):
W = np.array(W)
W[mask] = 0.0
if np.any(sd != None):
W = np.array(1.0 / sd**2)
W[mask] = 0.0
sd = None
y[mask] = np.nan
if np.any(sd != None):
sd_ = np.array(sd)
mask = sd > 0.0
W = np.zeros_like(sd_)
W[mask] = 1.0 / sd_[mask] ** 2
sd = None
if np.any(W != None):
W = W / W.max()
sizy = y.shape
# sort axis
if axis == None:
axis = tuple(np.arange(y.ndim))
noe = y.size # number of elements
if noe < 2:
z = y
exitflag = 0
Wtot = 0
return z, s, exitflag, Wtot
# ---
# Smoothness parameter and weights
# if s != None:
# s = []
if np.all(W == None):
W = np.ones(sizy)
# if z0 == None:
# z0 = y.copy()
# ---
# "Weighting function" criterion
weightstr = weightstr.lower()
# ---
# Weights. Zero weights are assigned to not finite values (Inf or NaN),
# (Inf/NaN values = missing data).
IsFinite = np.array(np.isfinite(y)).astype(bool)
nof = IsFinite.sum() # number of finite elements
W = W * IsFinite
if any(W < 0):
raise RuntimeError("smoothn:NegativeWeights", "Weights must all be >=0")
else:
# W = W/np.max(W)
pass
# ---
# Weighted or missing data?
isweighted = any(W != 1)
# ---
# Robust smoothing?
# isrobust
# ---
# Automatic smoothing?
isauto = not s
# ---
# DCTN and IDCTN are required
try:
from scipy.fftpack.realtransforms import dct, idct
except:
z = y
exitflag = -1
Wtot = 0
return z, s, exitflag, Wtot
## Creation of the Lambda tensor
# ---
# Lambda contains the eingenvalues of the difference matrix used in this
# penalized least squares process.
axis = tuple(np.array(axis).flatten())
d = y.ndim
Lambda = np.zeros(sizy)
for i in axis:
# create a 1 x d array (so e.g. [1,1] for a 2D case
siz0 = np.ones((1, y.ndim))[0].astype(int)
siz0[i] = sizy[i]
# cos(pi*(reshape(1:sizy(i),siz0)-1)/sizy(i)))
# (arange(1,sizy[i]+1).reshape(siz0) - 1.)/sizy[i]
Lambda = Lambda + (
np.cos(np.pi * (np.arange(1, sizy[i] + 1) - 1.0) / sizy[i]).reshape(siz0)
)
# else:
# Lambda = Lambda + siz0
Lambda = -2.0 * (len(axis) - Lambda)
if not isauto:
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
## Upper and lower bound for the smoothness parameter
# The average leverage (h) is by definition in [0 1]. Weak smoothing occurs
# if h is close to 1, while over-smoothing appears when h is near 0. Upper
# and lower bounds for h are given to avoid under- or over-smoothing. See
# equation relating h to the smoothness parameter (Equation #12 in the
# referenced CSDA paper).
N = sum(np.array(sizy) != 1)
# tensor rank of the y-array
hMin = 1e-6
hMax = 0.99
# (h/n)**2 = (1 + a)/( 2 a)
# a = 1/(2 (h/n)**2 -1)
# where a = sqrt(1 + 16 s)
# (a**2 -1)/16
try:
sMinBnd = np.sqrt(
(
((1 + np.sqrt(1 + 8 * hMax ** (2.0 / N))) / 4.0 / hMax ** (2.0 / N))
** 2
- 1
)
/ 16.0
)
sMaxBnd = np.sqrt(
(
((1 + np.sqrt(1 + 8 * hMin ** (2.0 / N))) / 4.0 / hMin ** (2.0 / N))
** 2
- 1
)
/ 16.0
)
except:
sMinBnd = None
sMaxBnd = None
## Initialize before iterating
# ---
Wtot = W
# --- Initial conditions for z
if isweighted:
# --- With weighted/missing data
# An initial guess is provided to ensure faster convergence. For that
# purpose, a nearest neighbor interpolation followed by a coarse
# smoothing are performed.
# ---
if z0 != None: # an initial guess (z0) has been provided
z = z0
else:
z = y # InitialGuess(y,IsFinite);
z[~IsFinite] = 0.0
else:
z = np.zeros(sizy)
# ---
z0 = z
y[~IsFinite] = 0
# arbitrary values for missing y-data
# ---
tol = 1.0
RobustIterativeProcess = True
RobustStep = 1
nit = 0
# --- Error on p. Smoothness parameter s = 10^p
errp = 0.1
# opt = optimset('TolX',errp);
# --- Relaxation factor RF: to speedup convergence
RF = 1 + 0.75 * isweighted
# ??
## Main iterative process
# ---
if isauto:
try:
xpost = np.array([(0.9 * np.log10(sMinBnd) + np.log10(sMaxBnd) * 0.1)])
except:
np.array([100.0])
else:
xpost = np.array([np.log10(s)])
while RobustIterativeProcess:
# --- "amount" of weights (see the function GCVscore)
aow = sum(Wtot) / noe
# 0 < aow <= 1
# ---
while tol > TolZ and nit < MaxIter:
if verbose:
print("tol", tol, "nit", nit)
nit = nit + 1
DCTy = dctND(Wtot * (y - z) + z, f=dct)
if isauto and not np.remainder(np.log2(nit), 1):
# ---
# The generalized cross-validation (GCV) method is used.
# We seek the smoothing parameter s that minimizes the GCV
# score i.e. s = Argmin(GCVscore).
# Because this process is time-consuming, it is performed from
# time to time (when nit is a power of 2)
# ---
# errp in here somewhere
# xpost,f,d = lbfgsb.fmin_l_bfgs_b(gcv,xpost,fprime=None,factr=10.,\
# approx_grad=True,bounds=[(log10(sMinBnd),log10(sMaxBnd))],\
# args=(Lambda,aow,DCTy,IsFinite,Wtot,y,nof,noe))
# if we have no clue what value of s to use, better span the
# possible range to get a reasonable starting point ...
# only need to do it once though. nS0 is teh number of samples used
if not s0:
ss = np.arange(nS0) * (1.0 / (nS0 - 1.0)) * (
np.log10(sMaxBnd) - np.log10(sMinBnd)
) + np.log10(sMinBnd)
g = np.zeros_like(ss)
for i, p in enumerate(ss):
g[i] = gcv(
p,
Lambda,
aow,
DCTy,
IsFinite,
Wtot,
y,
nof,
noe,
smoothOrder,
)
# print 10**p,g[i]
xpost = [ss[g == g.min()]]
# print '==============='
# print nit,tol,g.min(),xpost[0],s
# print '==============='
else:
xpost = [s0]
xpost, f, d = lbfgsb.fmin_l_bfgs_b(
gcv,
xpost,
fprime=None,
factr=1e7,
approx_grad=True,
bounds=[(np.log10(sMinBnd), np.log10(sMaxBnd))],
args=(Lambda, aow, DCTy, IsFinite, Wtot, y, nof, noe, smoothOrder),
)
s = 10 ** xpost[0]
# update the value we use for the initial s estimate
s0 = xpost[0]
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
z = RF * dctND(Gamma * DCTy, f=idct) + (1 - RF) * z
# if no weighted/missing data => tol=0 (no iteration)
tol = isweighted * norm(z0 - z) / norm(z)
z0 = z
# re-initialization
exitflag = nit < MaxIter
if isrobust: # -- Robust Smoothing: iteratively re-weighted process
# --- average leverage
h = np.sqrt(1 + 16.0 * s)
h = np.sqrt(1 + h) / np.sqrt(2) / h
h = h**N
# --- take robust weights into account
Wtot = W * RobustWeights(y - z, IsFinite, h, weightstr)
# --- re-initialize for another iterative weighted process
isweighted = True
tol = 1
nit = 0
# ---
RobustStep = RobustStep + 1
RobustIterativeProcess = RobustStep < 3
# 3 robust steps are enough.
else:
RobustIterativeProcess = False
# stop the whole process
## Warning messages
# ---
if isauto:
if abs(np.log10(s) - np.log10(sMinBnd)) < errp:
warning(
"MATLAB:smoothn:SLowerBound",
[
"s = %.3f " % (s)
+ ": the lower bound for s "
+ "has been reached. Put s as an input variable if required."
],
)
elif abs(np.log10(s) - np.log10(sMaxBnd)) < errp:
warning(
"MATLAB:smoothn:SUpperBound",
[
"s = %.3f " % (s)
+ ": the upper bound for s "
+ "has been reached. Put s as an input variable if required."
],
)
return z, s, exitflag, Wtot
def warning(s1, s2):
print(s1)
print(s2[0])
## GCV score
# ---
# function GCVscore = gcv(p)
def gcv(p, Lambda, aow, DCTy, IsFinite, Wtot, y, nof, noe, smoothOrder):
# Search the smoothing parameter s that minimizes the GCV score
# ---
s = 10**p
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
# --- RSS = Residual sum-of-squares
if aow > 0.9: # aow = 1 means that all of the data are equally weighted
# very much faster: does not require any inverse DCT
RSS = norm(DCTy * (Gamma - 1.0)) ** 2
else:
# take account of the weights to calculate RSS:
yhat = dctND(Gamma * DCTy, f=idct)
RSS = norm(np.sqrt(Wtot[IsFinite]) * (y[IsFinite] - yhat[IsFinite])) ** 2
# ---
TrH = sum(Gamma)
GCVscore = RSS / float(nof) / (1.0 - TrH / float(noe)) ** 2
return GCVscore
## Robust weights
# function W = RobustWeights(r,I,h,wstr)
def RobustWeights(r, I, h, wstr):
# weights for robust smoothing.
MAD = np.median(abs(r[I] - np.median(r[I])))
# median absolute deviation
u = abs(r / (1.4826 * MAD) / np.sqrt(1 - h))
# studentized residuals
if wstr == "cauchy":
c = 2.385
W = 1.0 / (1 + (u / c) ** 2)
# Cauchy weights
elif wstr == "talworth":
c = 2.795
W = u < c
# Talworth weights
else:
c = 4.685
W = (1 - (u / c) ** 2) ** 2.0 * ((u / c) < 1)
# bisquare weights
W[np.isnan(W)] = 0
return W
## Initial Guess with weighted/missing data
# function z = InitialGuess(y,I)
def InitialGuess(y, I):
# -- nearest neighbor interpolation (in case of missing values)
if any(~I):
try:
from scipy.ndimage.morphology import distance_transform_edt
# if license('test','image_toolbox')
# [z,L] = bwdist(I);
L = distance_transform_edt(1 - I)
z = y
z[~I] = y[L[~I]]
except:
# If BWDIST does not exist, NaN values are all replaced with the
# same scalar. The initial guess is not optimal and a warning
# message thus appears.
z = y
z[~I] = np.mean(y[I])
else:
z = y
# coarse fast smoothing
z = dctND(z, f=dct)
k = np.array(z.shape)
m = np.ceil(k / 10) + 1
d = []
for i in np.xrange(len(k)):
d.append(np.arange(m[i], k[i]))
d = np.array(d).astype(int)
z[d] = 0.0
z = dctND(z, f=idct)
return z
def dctND(data, f=dct):
nd = len(data.shape)
if nd == 1:
return f(data, norm="ortho", type=2)
elif nd == 2:
return f(f(data, norm="ortho", type=2).T, norm="ortho", type=2).T
elif nd == 3:
return f(
f(f(data, norm="ortho", type=2, axis=0), norm="ortho", type=2, axis=1),
norm="ortho",
type=2,
axis=2,
)
elif nd == 4:
return f(
f(
f(f(data, norm="ortho", type=2, axis=0), norm="ortho", type=2, axis=1),
norm="ortho",
type=2,
axis=2,
),
norm="ortho",
type=2,
axis=3,
)
def peaks(n):
"""
Mimic basic of matlab peaks fn
"""
xp = np.arange(n)
[x, y] = np.meshgrid(xp, xp)
z = np.zeros_like(x).astype(float)
for i in np.xrange(n / 5):
x0 = random() * n
y0 = random() * n
sdx = random() * n / 4.0
sdy = sdx
c = random() * 2 - 1.0
f = np.exp(
-(((x - x0) / sdx) ** 2)
- ((y - y0) / sdy) ** 2
- (((x - x0) / sdx)) * ((y - y0) / sdy) * c
)
# f /= f.sum()
f *= random()
z += f
return z
def RelPerm2(Sa, UW, UO, BW, BO, SWI, SWR, nx, ny, nz):
"""
Computes the relative permeability and its derivative w.r.t saturation S,
based on Brooks and Corey model.
Parameters
----------
Sa : array_like
Saturation value.
UW : float
Water viscosity.
UO : float
Oil viscosity.
BW : float
Water formation volume factor.
BO : float
Oil formation volume factor.
SWI : float
Initial water saturation.
SWR : float
Residual water saturation.
nx, ny, nz : int
The number of grid cells in x, y, and z directions.
Returns
-------
Mw : array_like
Water relative permeability.
Mo : array_like
Oil relative permeability.
dMw : array_like
Water relative permeability derivative w.r.t saturation.
dMo : array_like
Oil relative permeability derivative w.r.t saturation.
"""
S = (Sa - SWI) / (1 - SWI - SWR)
Mw = (S**2) / (UW * BW) # Water mobility
Mo = (1 - S) ** 2 / (UO * BO) # Oil mobility
dMw = 2 * S / (UW * BW) / (1 - SWI - SWR)
dMo = -2 * (1 - S) / (UO * BO) / (1 - SWI - SWR)
return (
cp.reshape(Mw, (-1, 1), "F"),
cp.reshape(Mo, (-1, 1), "F"),
cp.reshape(dMw, (-1, 1), "F"),
cp.reshape(dMo, (-1, 1), "F"),
)
def calc_mu_g(p):
# Avergae reservoir pressure
mu_g = 3e-10 * p**2 + 1e-6 * p + 0.0133
return mu_g
def calc_rs(p_bub, p):
# p=average reservoir pressure
if p < p_bub:
rs_factor = 1
else:
rs_factor = 0
rs = 178.11**2 / 5.615 * ((p / p_bub) ** 1.3 * rs_factor + (1 - rs_factor))
return rs
def calc_dp(p_bub, p_atm, p):
if p < p_bub:
dp = p_atm - p
else:
dp = p_atm - p_bub
return dp
def calc_bg(p_bub, p_atm, p):
# P is avergae reservoir pressure
b_g = 1 / (cp.exp(1.7e-3 * calc_dp(p_bub, p_atm, p)))
return b_g
def calc_bo(p_bub, p_atm, CFO, p):
# p is average reservoir pressure
if p < p_bub:
b_o = 1 / cp.exp(-8e-5 * (p_atm - p))
else:
b_o = 1 / (cp.exp(-8e-5 * (p_atm - p_bub)) * cp.exp(-CFO * (p - p_bub)))
return b_o
def ProgressBar(Total, Progress, BarLength=20, ProgressIcon="#", BarIcon="-"):
try:
# You can't have a progress bar with zero or negative length.
if BarLength < 1:
BarLength = 20
# Use status variable for going to the next line after progress completion.
Status = ""
# Calcuting progress between 0 and 1 for percentage.
Progress = float(Progress) / float(Total)
# Doing this conditions at final progressing.
if Progress >= 1.0:
Progress = 1
Status = "\r\n" # Going to the next line
# Calculating how many places should be filled
Block = int(round(BarLength * Progress))
# Show this
Bar = "[{}] {:.0f}% {}".format(
ProgressIcon * Block + BarIcon * (BarLength - Block),
round(Progress * 100, 0),
Status,
)
return Bar
except:
return "ERROR"
def ShowBar(Bar):
sys.stdout.write(Bar)
sys.stdout.flush()
def Equivalent_time(tim1, max_t1, tim2, max_t2):
tk2 = tim1 / max_t1
tc2 = np.arange(0.0, 1 + tk2, tk2)
tc2[tc2 >= 1] = 1
tc2 = tc2.reshape(-1, 1) # reference scaled to 1
tc2r = np.arange(0.0, max_t1 + tim1, tim1)
tc2r[tc2r >= max_t1] = max_t1
tc2r = tc2r.reshape(-1, 1) # reference original
func = interpolate.interp1d(tc2r.ravel(), tc2.ravel())
tc2rr = np.arange(0.0, max_t2 + tim2, tim2)
tc2rr[tc2rr >= max_t2] = max_t2
tc2rr = tc2rr.reshape(-1, 1) # reference original
ynew = func(tc2rr.ravel())
return ynew
def simulator_to_python(a):
kk = a.shape[2]
anew = []
for i in range(kk):
afirst = a[:, :, i]
afirst = afirst.T
afirst = cp.reshape(afirst, (-1, 1), "F")
anew.append(afirst)
return cp.vstack(anew)
def python_to_simulator(a, ny, nx, nz):
a = cp.reshape(a, (-1, 1), "F")
a = cp.reshape(a, (ny, nx, nz), "F")
anew = []
for i in range(nz):
afirst = a[:, :, i]
afirst = afirst.T
anew.append(afirst)
return cp.vstack(anew)
def compute_f(
pressure, kuse, krouse, krwuse, rwell1, skin, pwf_producer1, UO, BO, DX, UW, BW, DZ
):
RE = 0.2 * cp.asarray(DX)
up = UO * BO
# facc = tf.constant(10,dtype = tf.float64)
DZ = cp.asarray(DZ)
down = 2.0 * cp.pi * kuse * krouse * DZ
# down = piit * pii * krouse * DZ1
right = cp.log(RE / cp.asarray(rwell1)) + cp.asarray(skin)
J = down / (up * right)
drawdown = pressure - cp.asarray(pwf_producer1)
qoil = -((drawdown) * J)
aa = qoil * 1e-5
# aa[aa<=0] = 0
# print(aa)
# water production
up2 = UW * BW
down = 2.0 * cp.pi * kuse * krwuse * DZ
J = down / (up2 * right)
drawdown = pressure - cp.asarray(pwf_producer1)
qwater = -((drawdown) * J)
aaw = qwater * 1e-5
# aaw = (qwater)
# aaw[aaw<=0] = 0
# print(qwater)
ouut = aa + aaw
return -(ouut) # outnew
def rescale_linear(array, new_min, new_max):
"""Rescale an arrary linearly."""
minimum, maximum = np.min(array), np.max(array)
m = (new_max - new_min) / (maximum - minimum)
b = new_min - m * minimum
return m * array + b
def rescale_linear_numpy_pytorch(array, new_min, new_max, minimum, maximum):
"""Rescale an arrary linearly."""
m = (new_max - new_min) / (maximum - minimum)
b = new_min - m * minimum
return m * array + b
def rescale_linear_pytorch_numpy(array, new_min, new_max, minimum, maximum):
"""Rescale an arrary linearly."""
m = (maximum - minimum) / (new_max - new_min)
b = minimum - m * new_min
return m * array + b
def compute_metrics(y_true, y_pred):
y_true_mean = np.mean(y_true)
TSS = np.sum((y_true - y_true_mean) ** 2)
RSS = np.sum((y_true - y_pred) ** 2)
R2 = 1 - (RSS / TSS)
L2_accuracy = 1 - np.sqrt(RSS) / np.sqrt(TSS)
return R2, L2_accuracy
def Add_marker2(plt, XX, YY, injectors, producers):
"""
Function to add marker to given coordinates on a matplotlib plot
less
Copy code
Parameters:
plt: a matplotlib.pyplot object to add the markers to
XX: a numpy array of X coordinates
YY: a numpy array of Y coordinates
locc: a numpy array of locations where markers need to be added
Return:
None
"""
n_inj = len(injectors) # Number of injectors
n_prod = len(producers) # Number of producers
for mm in range(n_inj):
usethis = injectors[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[8])
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=200,
marker="v",
color="white",
)
plt.text(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
discrip,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
for mm in range(n_prod):
usethis = producers[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[8])
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=200,
marker="^",
color="white",
)
plt.text(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
discrip,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
def Plot_2D(XX, YY, plt, nx, ny, nz, Truee, N_injw, N_pr, varii, injectors, producers):
Pressz = np.reshape(Truee, (nx, ny, nz), "F")
maxii = max(Pressz.ravel())
minii = min(Pressz.ravel())
avg_2d = np.mean(Pressz, axis=2)
avg_2d[avg_2d == 0] = np.nan # Convert zeros to NaNs
# XX, YY = np.meshgrid(np.arange(nx),np.arange(ny))
# plt.subplot(224)
plt.pcolormesh(XX.T, YY.T, avg_2d, cmap="jet")
cbar = plt.colorbar()
if varii == "perm":
cbar.set_label("Log K(mD)", fontsize=11)
plt.title("Permeability Field with well locations", fontsize=11, weight="bold")
elif varii == "water Modulus":
cbar.set_label("water saturation", fontsize=11)
plt.title("water saturation -Modulus", fontsize=11, weight="bold")
elif varii == "water Numerical":
cbar.set_label("water saturation", fontsize=11)
plt.title("water saturation - Numerical", fontsize=11, weight="bold")
elif varii == "water diff":
cbar.set_label("unit", fontsize=11)
plt.title(
"water saturation - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold"
)
elif varii == "oil Modulus":
cbar.set_label("Oil saturation", fontsize=11)
plt.title("Oil saturation -Modulus", fontsize=11, weight="bold")
elif varii == "oil Numerical":
cbar.set_label("Oil saturation", fontsize=11)
plt.title("Oil saturation - Numerical", fontsize=11, weight="bold")
elif varii == "oil diff":
cbar.set_label("unit", fontsize=11)
plt.title(
"oil saturation - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold"
)
elif varii == "pressure Modulus":
cbar.set_label("pressure(psia)", fontsize=11)
plt.title("Pressure -Modulus", fontsize=11, weight="bold")
elif varii == "pressure Numerical":
cbar.set_label("pressure(psia)", fontsize=11)
plt.title("Pressure -Numerical", fontsize=11, weight="bold")
elif varii == "pressure diff":
cbar.set_label("unit", fontsize=11)
plt.title("Pressure - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold")
elif varii == "porosity":
cbar.set_label("porosity", fontsize=11)
plt.title("Porosity Field", fontsize=11, weight="bold")
cbar.mappable.set_clim(minii, maxii)
plt.ylabel("Y", fontsize=11)
plt.xlabel("X", fontsize=11)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
Add_marker2(plt, XX, YY, injectors, producers)
def Plot_Modulus(ax, nx, ny, nz, Truee, N_injw, N_pr, varii, injectors, producers):
# matplotlib.use('Agg')
Pressz = np.reshape(Truee, (nx, ny, nz), "F")
avg_2d = np.mean(Pressz, axis=2)
avg_2d[avg_2d == 0] = np.nan # Convert zeros to NaNs
maxii = max(Pressz.ravel())
minii = min(Pressz.ravel())
Pressz = Pressz / maxii
masked_Pressz = Pressz
colors = plt.cm.jet(masked_Pressz)
# colors[np.isnan(Pressz), :3] = 1 # set color to white for NaN values
# alpha = np.where(np.isnan(Pressz), 0.0, 0.8) # set alpha to 0 for NaN values
norm = mpl.colors.Normalize(vmin=minii, vmax=maxii)
arr_3d = Pressz
# fig = plt.figure(figsize=(20, 20), dpi = 200)
# ax = fig.add_subplot(221, projection='3d')
# Shift the coordinates to center the points at the voxel locations
x, y, z = np.indices((arr_3d.shape))
x = x + 0.5
y = y + 0.5
z = z + 0.5
# Set the colors of each voxel using a jet colormap
# colors = plt.cm.jet(arr_3d)
# norm = matplotlib.colors.Normalize(vmin=minii, vmax=maxii)
# Plot each voxel and save the mappable object
ax.voxels(arr_3d, facecolors=colors, alpha=0.5, edgecolor="none", shade=True)
m = cm.ScalarMappable(cmap=plt.cm.jet, norm=norm)
m.set_array([])
# Add a colorbar for the mappable object
# plt.colorbar(mappable)
# Set the axis labels and title
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.set_zlabel("Z axis")
# ax.set_title(titti,fontsize= 14)
# Set axis limits to reflect the extent of each axis of the matrix
ax.set_xlim(0, arr_3d.shape[0])
ax.set_ylim(0, arr_3d.shape[1])
ax.set_zlim(0, arr_3d.shape[2])
# ax.set_zlim(0, 60)
# Remove the grid
ax.grid(False)
# Set lighting to bright
# ax.set_facecolor('white')
# Set the aspect ratio of the plot
ax.set_box_aspect([nx, ny, nz])
# Set the projection type to orthogonal
ax.set_proj_type("ortho")
# Remove the tick labels on each axis
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Remove the tick lines on each axis
ax.xaxis._axinfo["tick"]["inward_factor"] = 0
ax.xaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.yaxis._axinfo["tick"]["inward_factor"] = 0
ax.yaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.zaxis._axinfo["tick"]["inward_factor"] = 0
ax.zaxis._axinfo["tick"]["outward_factor"] = 0.4
# Set the azimuth and elevation to make the plot brighter
ax.view_init(elev=30, azim=45)
n_inj = N_injw # Number of injectors
n_prod = N_pr # Number of producers
for mm in range(n_inj):
usethis = injectors[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
# Define the direction of the line
line_dir = (0, 0, (nz * 2) + 7)
# Define the coordinates of the line end
x_line_end = xloc + line_dir[0]
y_line_end = yloc + line_dir[1]
z_line_end = 0 + line_dir[2]
ax.plot([xloc, xloc], [yloc, yloc], [0, (nz * 2) + 7], "blue", linewidth=1)
ax.text(
x_line_end,
y_line_end,
z_line_end,
discrip,
color="blue",
weight="bold",
fontsize=5,
)
for mm in range(n_prod):
usethis = producers[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
# Define the direction of the line
line_dir = (0, 0, (nz * 2) + 5)
# Define the coordinates of the line end
x_line_end = xloc + line_dir[0]
y_line_end = yloc + line_dir[1]
z_line_end = 0 + line_dir[2]
ax.plot([xloc, xloc], [yloc, yloc], [0, (nz * 2) + 5], "r", linewidth=1)
ax.text(
x_line_end,
y_line_end,
z_line_end,
discrip,
color="g",
weight="bold",
fontsize=5,
)
blue_line = mlines.Line2D([], [], color="blue", linewidth=2, label="water injector")
green_line = mlines.Line2D([], [], color="red", linewidth=2, label="oil Producer")
# Add the legend to the plot
ax.legend(handles=[blue_line, green_line], loc="lower left", fontsize=9)
# Add a horizontal colorbar to the plot
cbar = plt.colorbar(m, orientation="horizontal", shrink=0.5)
if varii == "perm":
cbar.set_label("Log K(mD)", fontsize=12)
ax.set_title(
"Permeability Field with well locations", fontsize=12, weight="bold"
)
elif varii == "water Modulus":
cbar.set_label("water saturation", fontsize=12)
ax.set_title("water saturation -Modulus", fontsize=12, weight="bold")
elif varii == "water Numerical":
cbar.set_label("water saturation", fontsize=12)
ax.set_title("water saturation - Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "water diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"water saturation - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "oil Modulus":
cbar.set_label("Oil saturation", fontsize=12)
ax.set_title("Oil saturation -Modulus", fontsize=12, weight="bold")
elif varii == "oil Numerical":
cbar.set_label("Oil saturation", fontsize=12)
ax.set_title("Oil saturation - Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "oil diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"oil saturation - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "pressure Modulus":
cbar.set_label("pressure", fontsize=12)
ax.set_title("Pressure -Modulus", fontsize=12, weight="bold")
elif varii == "pressure Numerical":
cbar.set_label("pressure", fontsize=12)
ax.set_title("Pressure -Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "pressure diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"Pressure - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "porosity":
cbar.set_label("porosity", fontsize=12)
ax.set_title("Porosity Field", fontsize=12, weight="bold")
cbar.mappable.set_clim(minii, maxii)
def Plot_Models(True_mat):
colors = ["r", "b", "k"]
linestyles = ["-", "--", "--"]
markers = ["o", "v", "*"]
timezz = True_mat[0][:, 0].reshape(-1, 1)
plt.figure(figsize=(40, 40))
plt.subplot(4, 4, 1)
plt.plot(
timezz,
True_mat[0][:, 1],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 1],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 1],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 2)
plt.plot(
timezz,
True_mat[0][:, 2],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 2],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 2],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 3)
plt.plot(
timezz,
True_mat[0][:, 3],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 3],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 3],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 4)
plt.plot(
timezz,
True_mat[0][:, 4],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 4],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 4],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 5)
plt.plot(
timezz,
True_mat[0][:, 5],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 5],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 5],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 6)
plt.plot(
timezz,
True_mat[0][:, 6],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 6],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 6],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 7)
plt.plot(
timezz,
True_mat[0][:, 7],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 7],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 7],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 8)
plt.plot(
timezz,
True_mat[0][:, 8],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 8],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 8],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 9)
plt.plot(
timezz,
True_mat[0][:, 9],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 9],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 9],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 10)
plt.plot(
timezz,
True_mat[0][:, 10],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 10],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 10],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 11)
plt.plot(
timezz,
True_mat[0][:, 11],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 11],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 11],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 12)
plt.plot(
timezz,
True_mat[0][:, 12],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 12],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 12],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 13)
plt.plot(
timezz,
True_mat[0][:, 13],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 13],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 13],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 14)
plt.plot(
timezz,
True_mat[0][:, 14],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 14],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 14],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 15)
plt.plot(
timezz,
True_mat[0][:, 15],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 15],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 15],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 16)
plt.plot(
timezz,
True_mat[0][:, 16],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 16],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 16],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
# plt.legend()
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
# os.chdir('RESULTS')
plt.savefig(
"Compare_models.png"
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def Plot_bar(True_mat):
a1 = rmsee(True_mat[1][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
a2 = rmsee(True_mat[2][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
models = ["FNO", "PINO"]
rmse_values = [a1, a2]
colors = ["red", "blue"]
# Create a bar chart
plt.figure(figsize=(10, 10))
plt.bar(models, rmse_values, color=colors)
# Add a title and labels
plt.title("RMSE accuracy", weight="bold", fontsize=16)
# Add x and y labels with bold and bigger font
plt.xlabel("Surrogate Models", weight="bold", fontsize=14)
plt.ylabel("RMSE", weight="bold", fontsize=14)
plt.savefig(
"Bar_chat.png"
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def rmsee(predictions, targets):
noww = predictions.reshape(-1, 1)
measurment = targets.reshape(-1, 1)
rmse_val = (np.sum(((noww - measurment) ** 2))) ** (0.5) / (measurment.shape[0])
# the square root of the mean of the squared differences
return rmse_val
| modulus-sym-main | examples/reservoir_simulation/3D/src/NVRS.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
def dx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute first order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
-0.5,
0.0,
0.5,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
-1.0 / 60.0,
3.0 / 20.0,
-3.0 / 4.0,
0.0,
3.0 / 4.0,
-3.0 / 20.0,
1.0 / 60.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
def ddx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute second order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
1.0,
-2.0,
1.0,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
1.0 / 90.0,
-3.0 / 20.0,
3.0 / 2.0,
-49.0 / 18.0,
3.0 / 2.0,
-3.0 / 20.0,
1.0 / 90.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx**2) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
| modulus-sym-main | examples/reservoir_simulation/3D/src/ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from NVRS import *
from modulus.sym.models.fno import *
import shutil
import pandas as pd
import scipy.io as sio
import torch
import yaml
from multiprocessing import Lock, Value
from PIL import Image
import requests
import concurrent.futures
def read_yaml(fname):
"""Read Yaml file into a dict of parameters"""
print(f"Read simulation plan from {fname}...")
with open(fname, "r") as stream:
try:
data = yaml.safe_load(stream)
# print(data)
except yaml.YAMLError as exc:
print(exc)
return data
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def process_chunk(chunk):
chunk_results = []
for kk in chunk:
result = process_step(kk)
chunk_results.append(result)
with lock:
Runs = len(chunks)
processed_chunks.value += 1
completion_percentage = (processed_chunks.value / len(chunks)) * 100
remaining_percentage = 100 - completion_percentage
# print(f"Processed chunk {processed_chunks.value}. {completion_percentage:.2f}% completed. {remaining_percentage:.2f}% remaining.")
progressBar = "\rPlotting Progress: " + ProgressBar(
Runs, processed_chunks.value, Runs
)
ShowBar(progressBar)
# time.sleep(1)
return chunk_results
def sort_key(s):
"""Extract the number from the filename for sorting."""
return int(re.search(r"\d+", s).group())
def process_step(kk):
f_3 = plt.figure(figsize=(20, 20), dpi=200)
current_time = int((kk + 1) * dt * MAXZ)
f_3 = plt.figure(figsize=(20, 20), dpi=200)
look = Reinvent(ouut_p[0, kk, :, :, :])
look = look * pini_alt
lookf = Reinvent(cPress[0, kk, :, :, :])
lookf = lookf * pini_alt
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(331, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "pressure Modulus", injectors, producers
)
ax2 = f_3.add_subplot(332, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "pressure Numerical", injectors, producers
)
ax3 = f_3.add_subplot(333, projection="3d")
Plot_Modulus(
ax3, nx, ny, nz, diff1, N_injw, N_pr, "pressure diff", injectors, producers
)
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
look = Reinvent(ouut_s[0, kk, :, :, :])
lookf = Reinvent(cSat[0, kk, :, :, :])
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(334, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "water Modulus", injectors, producers
)
ax2 = f_3.add_subplot(335, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "water Numerical", injectors, producers
)
ax3 = f_3.add_subplot(336, projection="3d")
Plot_Modulus(
ax3, nx, ny, nz, diff1, N_injw, N_pr, "water diff", injectors, producers
)
R2w, L2w = compute_metrics(look.ravel(), lookf.ravel())
look = 1 - (Reinvent(ouut_s[0, kk, :, :, :]))
lookf = 1 - (Reinvent(cSat[0, kk, :, :, :]))
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(337, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "oil Modulus", injectors, producers
)
ax2 = f_3.add_subplot(338, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "oil Numerical", injectors, producers
)
ax3 = f_3.add_subplot(339, projection="3d")
Plot_Modulus(ax3, nx, ny, nz, diff1, N_injw, N_pr, "oil diff", injectors, producers)
R2o, L2o = compute_metrics(look.ravel(), lookf.ravel())
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(current_time) + " days"
plt.suptitle(tita, fontsize=16)
plt.savefig("Dynamic" + str(int(kk)))
plt.clf()
plt.close()
return current_time, (R2p, L2p), (R2w, L2w), (R2o, L2o)
oldfolder = os.getcwd()
os.chdir(oldfolder)
surrogate = None
while True:
surrogate = int(
input(
"Select surrogate method type:\n1=FNO\n\
2=PINO\n"
)
)
if (surrogate > 2) or (surrogate < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-2")
else:
break
if not os.path.exists("../COMPARE_RESULTS"):
os.makedirs("../COMPARE_RESULTS")
if surrogate == 1:
folderr = "../COMPARE_RESULTS/FNO"
if not os.path.exists("../COMPARE_RESULTS/FNO"):
os.makedirs("../COMPARE_RESULTS/FNO")
else:
shutil.rmtree("../COMPARE_RESULTS/FNO")
os.makedirs("../COMPARE_RESULTS/FNO")
elif surrogate == 2:
folderr = "../COMPARE_RESULTS/PINO"
if not os.path.exists("../COMPARE_RESULTS/PINO"):
os.makedirs("../COMPARE_RESULTS/PINO")
else:
shutil.rmtree("../COMPARE_RESULTS/PINO")
os.makedirs("../COMPARE_RESULTS/PINO")
if not os.path.exists("../PACKETS"):
os.makedirs("../PACKETS")
else:
pass
# Varaibles needed for NVRS
plan = read_yaml("conf/config_PINO.yaml")
injectors = plan["custom"]["WELLSPECS"]["water_injector_wells"]
producers = plan["custom"]["WELLSPECS"]["producer_wells"]
N_injw = len(
plan["custom"]["WELLSPECS"]["water_injector_wells"]
) # Number of water injectors
N_pr = len(plan["custom"]["WELLSPECS"]["producer_wells"]) # Number of producers
# Varaibles needed for NVRS
nx = plan["custom"]["NVRS"]["nx"]
ny = plan["custom"]["NVRS"]["ny"]
nz = plan["custom"]["NVRS"]["nz"]
BO = plan["custom"]["NVRS"]["BO"] # oil formation volume factor
BW = plan["custom"]["NVRS"]["BW"] # Water formation volume factor
UW = plan["custom"]["NVRS"]["UW"] # water viscosity in cP
UO = plan["custom"]["NVRS"]["UO"] # oil viscosity in cP
DX = plan["custom"]["NVRS"]["DX"] # size of pixel in x direction
DY = plan["custom"]["NVRS"]["DY"] # sixze of pixel in y direction
DZ = plan["custom"]["NVRS"]["DZ"] # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(plan["custom"]["NVRS"]["SWI"])
SWR = cp.float32(plan["custom"]["NVRS"]["SWR"])
CFO = cp.float32(plan["custom"]["NVRS"]["CFO"]) # oil compressibility in 1/psi
IWSw = plan["custom"]["NVRS"]["IWSw"] # initial water saturation
pini_alt = plan["custom"]["NVRS"]["pini_alt"]
# print(pini_alt)
P1 = cp.float32(pini_alt) # Bubble point pressure psia
PB = P1
mpor, hpor = (
plan["custom"]["NVRS"]["mpor"],
plan["custom"]["NVRS"]["hpor"],
) # minimum and maximum porosity
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
PATM = cp.float32(plan["custom"]["NVRS"]["PATM"]) # Atmospheric pressure in psi
# training
LUB, HUB = (
plan["custom"]["NVRS"]["LUB"],
plan["custom"]["NVRS"]["HUB"],
) # Permeability rescale
aay, bby = (
plan["custom"]["NVRS"]["aay"],
plan["custom"]["NVRS"]["bby"],
) # Permeability range mD
Low_K, High_K = aay, bby
batch_size = plan["custom"]["NVRS"][
"batch_size"
] #'size of simulated labelled dtaa to run'
timmee = plan["custom"]["NVRS"][
"timmee"
] # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = plan["custom"]["NVRS"][
"max_t"
] # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = plan["custom"]["NVRS"]["MAXZ"] # reference maximum time in days of simulation
steppi = int(max_t / timmee)
choice = 1 # 1= Non-Gaussian prior, 2 = Gaussian prior
factorr = 0.1 # from [0 1] excluding the limits for PermZ
LIR = plan["custom"]["NVRS"]["LIR"] # lower injection rate
UIR = plan["custom"]["NVRS"]["UIR"] # uppwer injection rate
RE = 0.2 * DX
rwell = 200 # well radius
skin = 0 # well deformation
pwf_producer = 100
cuda = 0
input_channel = 7 # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
device = torch.device(f"cuda:{cuda}" if torch.cuda.is_available() else "cpu")
N_inj = 4
N_pr = 4
# dt = timmee/max_t
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
# 4 injector and 4 producer wells
wells = np.array(
[1, 24, 1, 1, 1, 1, 31, 1, 1, 31, 31, 1, 7, 9, 2, 14, 12, 2, 28, 19, 2, 14, 27, 2]
)
wells = np.reshape(wells, (-1, 3), "C")
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1PX2XFG1-elzQItvkUERJqeOerTO2kevq", to_absolute_path("../PACKETS/Test4.mat")
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data11 = matt["INPUT"]
data_use11 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data11 = matt["INPUT"]
data_use11 = matt["OUTPUT"]
index = np.random.choice(X_data11.shape[0], 1, replace=False)
index = 253
X_data1 = X_data11[index, :, :, :][None, :, :, :, :]
data_use1 = data_use11[index, :, :, :][None, :, :, :, :]
X_data2 = X_data1
Ne = 1
ini_ensemble1 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble2 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble3 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble4 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble5 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble6 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
ini_ensemble7 = np.zeros((Ne, 1, nz, nx, ny), dtype=np.float32)
cPress = np.zeros((Ne, steppi, nz, nx, ny)) # Pressure
cSat = np.zeros((Ne, steppi, nz, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble1[kk, :, :, :, :] = permin
perm = X_data2[kk, 1, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble2[kk, :, :, :, :] = permin
perm = X_data2[kk, 2, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble3[kk, :, :, :, :] = permin
perm = X_data2[kk, 3, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble4[kk, :, :, :, :] = permin
perm = X_data2[kk, 4, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble5[kk, :, :, :, :] = permin
perm = X_data2[kk, 5, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble6[kk, :, :, :, :] = permin
perm = X_data2[kk, 6, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
ini_ensemble7[kk, :, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cPress[kk, :, :, :, :] = perm_big
perm = data_use1[kk, steppi:, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cSat[kk, :, :, :, :] = perm_big
print("")
print("Finished constructing Pytorch inputs")
print("*******************Load the trained Forward models*******************")
decoder1 = ConvFullyConnectedArch([Key("z", size=32)], [Key("pressure", size=steppi)])
modelP = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder1,
)
decoder2 = ConvFullyConnectedArch([Key("z", size=32)], [Key("water_sat", size=steppi)])
modelS = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder2,
)
if surrogate == 1:
print("-----------------Surrogate Model learned with FNO----------------")
if not os.path.exists(("outputs/Forward_problem_FNO/ResSim/")):
os.makedirs(("outputs/Forward_problem_FNO/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"11cr3-7zvAZA5zI1SpfevZOQhsYuzAJjy",
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelP.load_state_dict(torch.load("fno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelP.load_state_dict(torch.load("fno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1EnYGi6MiJum-i-QzbRrpmvsqdR0KSa9a",
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelS.load_state_dict(torch.load("fno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelS.load_state_dict(torch.load("fno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
print("-----------------Surrogate Model learned with PINO----------------")
if not os.path.exists(("outputs/Forward_problem_PINO/ResSim/")):
os.makedirs(("outputs/Forward_problem_PINO/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1unX_CW5_9aTV97LqkRWkYElwsjGkLYdl",
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelP.load_state_dict(torch.load("pino_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelP.load_state_dict(torch.load("pino_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1d9Vk9UiVU0sUV2KSh_H4gyH5OVUl2rqS",
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelS.load_state_dict(torch.load("pino_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelS.load_state_dict(torch.load("pino_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
print("********************Model Loaded*************************************")
inn = {
"perm": torch.from_numpy(ini_ensemble1).to(device, torch.float32),
"Q": torch.from_numpy(ini_ensemble2).to(device, dtype=torch.float32),
"Qw": torch.from_numpy(ini_ensemble3).to(device, dtype=torch.float32),
"Phi": torch.from_numpy(ini_ensemble4).to(device, dtype=torch.float32),
"Time": torch.from_numpy(ini_ensemble5).to(device, dtype=torch.float32),
"Pini": torch.from_numpy(ini_ensemble6).to(device, dtype=torch.float32),
"Swini": torch.from_numpy(ini_ensemble7).to(device, dtype=torch.float32),
}
print("")
print("predicting with surrogate model")
start_time_plots1 = time.time()
ouut_p = modelP(inn)["pressure"].detach().cpu().numpy()
ouut_s = modelS(inn)["water_sat"].detach().cpu().numpy()
elapsed_time_secs = time.time() - start_time_plots1
msg = "Surrogate Reservoir simulation took: %s secs (Wall clock time)" % timedelta(
seconds=round(elapsed_time_secs)
)
print(msg)
print("")
ouut_oil = np.ones_like(ouut_s) - ouut_s
print("")
print("Plotting outputs")
os.chdir(folderr)
Runs = steppi
ty = np.arange(1, Runs + 1)
Time_vector = np.zeros((steppi))
Accuracy_presure = np.zeros((steppi, 2))
Accuracy_oil = np.zeros((steppi, 2))
Accuracy_water = np.zeros((steppi, 2))
lock = Lock()
processed_chunks = Value("i", 0)
NUM_CORES = 12 # specify the number of cores you want to use
# Split the range of steps into chunks
chunks = [
list(range(i, min(i + steppi // NUM_CORES, steppi)))
for i in range(0, steppi, steppi // NUM_CORES)
]
with concurrent.futures.ProcessPoolExecutor(max_workers=NUM_CORES) as executor:
chunked_results = list(executor.map(process_chunk, chunks))
# Flatten the chunked results to get the ordered results
results = [result for sublist in chunked_results for result in sublist]
for kk, (current_time, acc_pressure, acc_oil, acc_water) in enumerate(results):
Time_vector[kk] = current_time
Accuracy_presure[kk] = acc_pressure
Accuracy_oil[kk] = acc_oil
Accuracy_water[kk] = acc_water
fig4 = plt.figure(figsize=(20, 20), dpi=100)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=11,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=11,
)
# Plot R2 accuracies
plt.subplot(2, 3, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 3, 2)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 3, 3)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 3, 4)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 3, 5)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 3, 6)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2.png"
plt.savefig(namez)
plt.clf()
plt.close()
print("")
print("Now - Creating GIF")
import glob
import re
frames = []
imgs = sorted(glob.glob("*Dynamic*"), key=sort_key)
for i in imgs:
new_frame = Image.open(i)
frames.append(new_frame)
frames[0].save(
"Evolution.gif",
format="GIF",
append_images=frames[1:],
save_all=True,
duration=500,
loop=0,
)
from glob import glob
for f3 in glob("*Dynamic*"):
os.remove(f3)
print("")
print("Saving prediction in CSV file")
spittsbig = [
"Time(DAY)",
"I1 - WBHP(PSIA)",
"I2 - WBHP (PSIA)",
"I3 - WBHP(PSIA)",
"I4 - WBHP(PSIA)",
"P1 - WOPR(BBL/DAY)",
"P2 - WOPR(BBL/DAY)",
"P3 - WOPR(BBL/DAY)",
"P4 - WOPR(BBL/DAY)",
"P1 - WWPR(BBL/DAY)",
"P2 - WWPR(BBL/DAY)",
"P3 - WWPR(BBL/DAY)",
"P4 - WWPR(BBL/DAY)",
"P1 - WWCT(%)",
"P2 - WWCT(%)",
"P3 - WWCT(%)",
"P4 - WWCT(%)",
]
see = Peaceman_well(
inn,
ouut_p,
ouut_s,
MAXZ,
1,
1e1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
0,
nz,
)
seeTrue = Peaceman_well(
inn,
cPress,
cSat,
MAXZ,
0,
1e1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
0,
nz,
)
seeuse = pd.DataFrame(see)
seeuse.to_csv("RSM_MODULUS.csv", header=spittsbig, sep=",")
seeuse.drop(columns=seeuse.columns[0], axis=1, inplace=True)
seeuset = pd.DataFrame(seeTrue)
seeuset.to_csv("RSM_NUMERICAL.csv", header=spittsbig, sep=",")
seeuset.drop(columns=seeuset.columns[0], axis=1, inplace=True)
Plot_RSM_percentile(see, seeTrue, "Compare.png")
os.chdir(oldfolder)
print("")
print("-------------------PROGRAM EXECUTED-----------------------------------")
| modulus-sym-main | examples/reservoir_simulation/3D/src/Compare_FVM_surrogate.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import modulus
import torch
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from modulus.sym.models.fno import *
import shutil
import cupy as cp
from skimage.transform import resize
import scipy.io as sio
import requests
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
from modulus.sym.utils.io.plotter import ValidatorPlotter
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
wells,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.wells = wells
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
look = (pressure_pred[0, itt, :, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look[0, :, :], cmap="jet")
plt.title("Layer 1 - Pressure FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[0, :, :], (-1,))),
np.max(np.reshape(lookf[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf[0, :, :], cmap="jet")
plt.title(" Layer 1 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, abs(look[0, :, :] - lookf[0, :, :]), cmap="jet")
plt.title(" Layer 1 - Pressure (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look[1, :, :], cmap="jet")
plt.title("Layer 2 - Pressure FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[1, :, :], (-1,))),
np.max(np.reshape(lookf[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf[1, :, :], cmap="jet")
plt.title(" Layer 2 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, abs(look[1, :, :] - lookf[1, :, :]), cmap="jet")
plt.title(" Layer 2 - Pressure (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look[2, :, :], cmap="jet")
plt.title("Layer 3 - Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf[2, :, :], (-1,))),
np.max(np.reshape(lookf[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf[2, :, :], cmap="jet")
plt.title(" Layer 3 - Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, abs(look[2, :, :] - lookf[2, :, :]), cmap="jet")
plt.title(" Layer 3 - Pressure (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
# name = namet + str(int(itt)) + '.png'
# plt.savefig(name)
# #plt.show()
# plt.clf()
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
Reinvent(look),
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
Reinvent(lookf),
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
Reinvent(diff1),
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=16)
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
R2p, L2p = compute_metrics(look.ravel()), lookf.ravel()
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
fig4, axs = plt.subplots(2, 1, figsize=(10, 10))
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
for i, data in enumerate([Accuracy_presure]):
axs[0, i].plot(
Time_vector,
data[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[0, i].set_title(["Pressure"][i], fontproperties=font)
axs[0, i].set_xlabel("Time (days)", fontproperties=font)
axs[0, i].set_ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
for i, data in enumerate([Accuracy_presure]):
axs[1, i].plot(
Time_vector,
data[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[1, i].set_title(["Pressure"][i], fontproperties=font)
axs[1, i].set_xlabel("Time (days)", fontproperties=font)
axs[1, i].set_ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
wells,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.wells = wells
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(20, 20), dpi=100)
look_sat = water_pred[0, itt, :, :, :]
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :, :]
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(6, 3, 1)
plt.pcolormesh(XX.T, YY.T, look_sat[0, :, :], cmap="jet")
plt.title(" Layer 1 - water_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[0, :, :], (-1,))),
np.max(np.reshape(lookf_sat[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf_sat[0, :, :], cmap="jet")
plt.title(" Layer 1 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 3)
plt.pcolormesh(XX.T, YY.T, diff1_wat[0, :, :], cmap="jet")
plt.title(" Layer 1- water_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 4)
plt.pcolormesh(XX.T, YY.T, look_sat[1, :, :], cmap="jet")
plt.title(" Layer 2 - water_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[1, :, :], (-1,))),
np.max(np.reshape(lookf_sat[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_sat[1, :, :], cmap="jet")
plt.title(" Layer 2 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 6)
plt.pcolormesh(XX.T, YY.T, diff1_wat[1, :, :], cmap="jet")
plt.title(" Layer 2- water_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 7)
plt.pcolormesh(XX.T, YY.T, look_sat[2, :, :], cmap="jet")
plt.title(" Layer 3 - water_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_sat[2, :, :], (-1,))),
np.max(np.reshape(lookf_sat[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_sat[2, :, :], cmap="jet")
plt.title(" Layer 3 - water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 9)
plt.pcolormesh(XX.T, YY.T, diff1_wat[2, :, :], cmap="jet")
plt.title(" Layer 3- water_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 10)
plt.pcolormesh(XX.T, YY.T, look_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[0, :, :], (-1,))),
np.max(np.reshape(lookf_oil[0, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 11)
plt.pcolormesh(XX.T, YY.T, lookf_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 12)
plt.pcolormesh(XX.T, YY.T, diff1_oil[0, :, :], cmap="jet")
plt.title(" Layer 1 - oil_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 13)
plt.pcolormesh(XX.T, YY.T, look_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[1, :, :], (-1,))),
np.max(np.reshape(lookf_oil[1, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 14)
plt.pcolormesh(XX.T, YY.T, lookf_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 15)
plt.pcolormesh(XX.T, YY.T, diff1_oil[1, :, :], cmap="jet")
plt.title(" Layer 2 - oil_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 16)
plt.pcolormesh(XX.T, YY.T, look_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat FNO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(
np.min(np.reshape(lookf_oil[2, :, :], (-1,))),
np.max(np.reshape(lookf_oil[2, :, :], (-1,))),
)
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 17)
plt.pcolormesh(XX.T, YY.T, lookf_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.subplot(6, 3, 18)
plt.pcolormesh(XX.T, YY.T, diff1_oil[2, :, :], cmap="jet")
plt.title(" Layer 3 - oil_sat (CFD - FNO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (self.nx - 1), 0, (self.ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, self.wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
# name = namet + str(int(itt)) + '.png'
# plt.savefig(name)
# #plt.show()
# plt.clf()
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
R2o, L2o = compute_metrics(look_oil.ravel()), lookf_oil.ravel()
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
Reinvent(look_sat),
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
Reinvent(lookf_sat),
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
Reinvent(diff1_sat),
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
Reinvent(look_oil),
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
Reinvent(lookf_oil),
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
Reinvent(diff1_oil),
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=16)
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
fig4, axs = plt.subplots(2, 2, figsize=(20, 10))
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
for i, data in enumerate([Accuracy_water, Accuracy_oil]):
axs[0, i].plot(
Time_vector,
data[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[0, i].set_title(
["Water_saturation", "Oil_saturation"][i], fontproperties=font
)
axs[0, i].set_xlabel("Time (days)", fontproperties=font)
axs[0, i].set_ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
for i, data in enumerate([Accuracy_water, Accuracy_oil]):
axs[1, i].plot(
Time_vector,
data[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
axs[1, i].set_title(
["Water_saturation", "Oil_saturation"][i], fontproperties=font
)
axs[1, i].set_xlabel("Time (days)", fontproperties=font)
axs[1, i].set_ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
return f_big
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 3D FNO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
wells = np.array(
[
1,
24,
1,
1,
1,
1,
31,
1,
1,
31,
31,
1,
7,
9,
2,
14,
12,
2,
28,
19,
2,
14,
27,
2,
]
)
wells = np.reshape(wells, (-1, 3), "C")
oldfolder = os.getcwd()
os.chdir(oldfolder)
if not os.path.exists(to_absolute_path("../PACKETS")):
os.makedirs(to_absolute_path("../PACKETS"))
else:
pass
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
pini_alt = cfg.custom.NVRS.pini_alt
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
# Low_K, High_K = aay,bby
# batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1wYyREUcpp0qLhbRItG5RMPeRMxVtntDi",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1PX2XFG1-elzQItvkUERJqeOerTO2kevq",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nz, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nz, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nz, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPerm[kk, :, :, :, :] = permin
perm = X_data1[kk, 1, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQ[kk, :, :, :, :] = permin
perm = X_data1[kk, 2, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQw[kk, :, :, :, :] = permin
perm = X_data1[kk, 3, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPhi[kk, :, :, :, :] = permin
perm = X_data1[kk, 4, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cTime[kk, :, :, :, :] = permin
perm = X_data1[kk, 5, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPini[kk, :, :, :, :] = permin
perm = X_data1[kk, 6, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cSini[kk, :, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cPress[kk, :, :, :, :] = np.clip(perm_big, 1 / pini_alt, 2.0)
perm = data_use1[kk, steppi:, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cSat[kk, :, :, :, :] = perm_big
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nz, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nz, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nz, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPerm[kk, :, :, :, :] = permin
perm = X_data2[kk, 1, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQ[kk, :, :, :, :] = permin
perm = X_data2[kk, 2, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cQw[kk, :, :, :, :] = permin
perm = X_data2[kk, 3, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPhi[kk, :, :, :, :] = permin
perm = X_data2[kk, 4, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cTime[kk, :, :, :, :] = permin
perm = X_data2[kk, 5, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cPini[kk, :, :, :, :] = permin
perm = X_data2[kk, 6, :, :, :]
permin = np.zeros((1, nz, nx, ny))
for i in range(nz):
permin[0, i, :, :] = perm[:, :, i]
cSini[kk, :, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cPress[kk, :, :, :, :] = np.clip(perm_big, 1 / pini_alt, 2.0)
perm = data_use2[kk, steppi:, :, :, :]
perm_big = np.zeros((steppi, nz, nx, ny))
for mum in range(steppi):
use = perm[mum, :, :, :]
mum1 = np.zeros((nz, nx, ny))
for i in range(nz):
mum1[i, :, :] = use[:, :, i]
perm_big[mum, :, :, :] = mum1
cSat[kk, :, :, :, :] = perm_big
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
input_keys = [
Key("perm", scale=(5.38467e-01, 2.29917e-01)),
Key("Q", scale=(1.33266e-03, 3.08151e-02)),
Key("Qw", scale=(1.39516e-03, 3.07869e-02)),
Key("Phi", scale=(2.69233e-01, 1.14958e-01)),
Key("Time", scale=(1.66666e-02, 1.08033e-07)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(1.99998e-01, 2.07125e-06)),
]
output_keys_pressure = [Key("pressure", scale=(1.16260e00, 5.75724e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.61902e-01, 1.97300e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# [init-node]
# Make custom Darcy residual node for PINO
# Define FNO model for forward model (pressure)
decoder1 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("pressure", size=steppi)]
)
fno_pressure = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder1,
)
# Define FNO model for forward model (saturation)
decoder2 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("water_sat", size=steppi)]
)
fno_saturation = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
fno_modes=16,
dimension=3,
padding=13,
nr_fno_layers=4,
decoder_net=decoder2,
)
nodes = [fno_pressure.make_node("fno_forward_model_pressure")] + [
fno_saturation.make_node("fno_forward_model_saturation")
]
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=cfg.batch_size.test,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,\
# wells,steppi,tc2,dt,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=cfg.batch_size.test,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=cfg.batch_size.test,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,\
# wells,steppi,tc2,dt,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=cfg.batch_size.test,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/3D/src/Forward_problem_FNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 21:33:56 2023
@author: clementetienam
"""
import os
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from NVRS import *
from modulus.sym.models.fno import *
from modulus.sym.models.afno.afno import *
import shutil
import pandas as pd
import scipy.io as sio
import torch
import yaml
from PIL import Image
oldfolder = os.getcwd()
os.chdir(oldfolder)
data = []
os.chdir("../COMPARE_RESULTS/FNO")
True_measurement = pd.read_csv("RSM_NUMERICAL.csv")
True_measurement = True_measurement.values.astype(np.float32)[:, 1:]
data.append(True_measurement)
FNO = pd.read_csv("RSM_MODULUS.csv")
FNO = FNO.values.astype(np.float32)[:, 1:]
data.append(FNO)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS/PINO")
PINO = pd.read_csv("RSM_MODULUS.csv")
PINO = PINO.values.astype(np.float32)[:, 1:]
data.append(PINO)
os.chdir(oldfolder)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS")
Plot_Models(data)
Plot_bar(data)
os.chdir(oldfolder)
| modulus-sym-main | examples/reservoir_simulation/3D/src/Compare_Models.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import zipfile
try:
import gdown
except:
gdown = None
import scipy.io
import numpy as np
import h5py
from modulus.sym.hydra import to_absolute_path
# list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt-
_FNO_datatsets_ids = {
"Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV",
"Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf",
}
_FNO_dataset_names = {
"Darcy_241": (
"piececonst_r241_N1024_smooth1.hdf5",
"piececonst_r241_N1024_smooth2.hdf5",
),
"Darcy_421": (
"piececonst_r421_N1024_smooth1.hdf5",
"piececonst_r421_N1024_smooth2.hdf5",
),
}
def load_FNO_dataset(path, input_keys, output_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar = dict(), dict()
for d, keys in [(invar, input_keys), (outvar, output_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar)
def load_FNO_dataset2(path, input_keys, output_keys, output_keys2, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar, outvar2 = dict(), dict(), dict()
for d, keys in [
(invar, input_keys),
(outvar, output_keys),
(outvar2, output_keys2),
]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar, outvar2)
def load_FNO_dataset4(path, input_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar = dict()
for d, keys in [(invar, input_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return invar
def load_deeponet_dataset(
path, input_keys, output_keys, n_examples=None, filter_size=8
):
"Loads a deeponet dataset"
# load dataset
invar, outvar = load_FNO_dataset(path, input_keys, output_keys, n_examples)
# reduce shape needed for deeponet
for key, value in invar.items():
invar[key] = value[:, :, ::filter_size, ::filter_size]
for key, value in outvar.items():
outvar[key] = value[:, :, ::filter_size, ::filter_size]
res = next(iter(invar.values())).shape[-1]
nr_points_per_sample = res**2
# tile invar
tiled_invar = {
key: np.concatenate(
[
np.tile(value[i], (nr_points_per_sample, 1, 1, 1))
for i in range(n_examples)
]
)
for key, value in invar.items()
}
# tile outvar
tiled_outvar = {key: value.flatten()[:, None] for key, value in outvar.items()}
# add cord points
x = np.linspace(0.0, 1.0, res)
y = np.linspace(0.0, 1.0, res)
x, y = [a.flatten()[:, None] for a in np.meshgrid(x, y)]
tiled_invar["x"] = np.concatenate(n_examples * [x], axis=0)
tiled_invar["y"] = np.concatenate(n_examples * [y], axis=0)
return (tiled_invar, tiled_outvar)
def download_FNO_dataset(name, outdir="datasets/"):
"Tries to download FNO dataset from drive"
if name not in _FNO_datatsets_ids:
raise Exception(
f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}"
)
id = _FNO_datatsets_ids[name]
outdir = to_absolute_path(outdir) + "/"
namedir = f"{outdir}{name}/"
# skip if already exists
exists = True
for file_name in _FNO_dataset_names[name]:
if not os.path.isfile(namedir + file_name):
exists = False
break
if exists:
return
print(f"FNO dataset {name} not detected, downloading dataset")
# Make sure we have gdown installed
if gdown is None:
raise ModuleNotFoundError("gdown package is required to download the dataset!")
# get output directory
os.makedirs(namedir, exist_ok=True)
# download dataset
zippath = f"{outdir}{name}.zip"
_download_file_from_google_drive(id, zippath)
# unzip
with zipfile.ZipFile(zippath, "r") as f:
f.extractall(namedir)
os.remove(zippath)
# preprocess files
for file in os.listdir(namedir):
if file.endswith(".mat"):
matpath = f"{namedir}{file}"
preprocess_FNO_mat(matpath)
os.remove(matpath)
def _download_file_from_google_drive(id, path):
"Downloads a file from google drive"
# use gdown library to download file
gdown.download(id=id, output=path)
def preprocess_FNO_mat(path):
"Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays"
assert path.endswith(".mat")
data = scipy.io.loadmat(path)
ks = [k for k in data.keys() if not k.startswith("__")]
with h5py.File(path[:-4] + ".hdf5", "w") as f:
for k in ks:
# x = np.expand_dims(data[k], axis=1) # N, C, H, W
x = data[k]
f.create_dataset(
k, data=x, dtype="float32"
) # note h5 files larger than .mat because no compression used
| modulus-sym-main | examples/reservoir_simulation/3D/src/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
# Import from Modulus
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight1 = (torch.tanh((10000 - step_tensor) * smoothness) + 1.0) * 0.5
lambda_pressure = 1.0
lambda_saturation = 1.0
# # Add losses
for key in losses.keys():
if "pressure" in key:
loss += lambda_pressure * (1 - decay_weight1) * ((losses[key]))
if "water_sat" in key:
loss += lambda_saturation * (1 - decay_weight1) * ((losses[key]))
return loss
| modulus-sym-main | examples/reservoir_simulation/3D/src/custom_aggregator_FNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
# Import from Modulus
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight1 = (torch.tanh((10000 - step_tensor) * smoothness) + 1.0) * 0.5
lambda_pressure = 1.0
lambda_saturation = 1.0
lambda_pressured = 0.1
lambda_saturationd = 0.1
# # Add losses
for key in losses.keys():
if "pressure" in key:
loss += lambda_pressure * (1 - decay_weight1) * ((losses[key]))
if "water_sat" in key:
loss += lambda_saturation * (1 - decay_weight1) * ((losses[key]))
if "pressured" in key:
loss += lambda_pressured * (1 - decay_weight1) * ((losses[key]))
if "saturationd" in key:
loss += lambda_saturationd * (1 - decay_weight1) * ((losses[key]))
return loss
| modulus-sym-main | examples/reservoir_simulation/3D/src/custom_aggregator_PINO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
import os
import modulus
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from ops import dx, ddx
from modulus.sym.models.fno import *
import shutil
import cupy as cp
import scipy.io as sio
import requests
from modulus.sym.utils.io.plotter import ValidatorPlotter
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
# get and interpolate output variable
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
look = (pressure_pred[0, itt, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(10, 10), dpi=100)
plt.subplot(1, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(10, 10), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
plt.subplot(2, 1, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 1, 2)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
look_sat = water_pred[0, itt, :, :] # *1e-2
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :] # * 1e-2
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(2, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
R2w, L2w = compute_metrics(look_sat.ravel(), lookf_sat.ravel())
Accuracy_water[itt, 0] = R2w
Accuracy_water[itt, 1] = L2w
plt.subplot(2, 3, 4)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 5)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 6)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
R2o, L2o = compute_metrics(look_oil.ravel(), lookf_oil.ravel())
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(20, 20), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
plt.subplot(2, 2, 1)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 2)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 3)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 2, 4)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
# [pde-loss]
# define custom class for black oil model
class Black_oil(torch.nn.Module):
"Custom Black oil PDE definition for PINO"
def __init__(
self,
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
approach,
):
super().__init__()
self.UIR = UIR
self.UWR = UIR
self.pini_alt = pini_alt
self.LUB = LUB
self.HUB = HUB
self.aay = aay
self.bby = bby
self.SWI = SWI
self.SWR = SWR
self.UW = UW
self.BW = BW
self.UO = UO
self.BO = BO
self.MAXZ = MAXZ
self.nx = nx
self.ny = ny
self.approach = approach
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# get inputs
u = input_var["pressure"]
perm = input_var["perm"]
fin = input_var["Q"]
finwater = input_var["Qw"]
dt = input_var["Time"]
pini = input_var["Pini"]
poro = input_var["Phi"]
sini = input_var["Swini"]
sat = input_var["water_sat"]
siniuse = sini[0, 0, 0, 0]
dtin = dt * self.MAXZ
dxf = 1.0 / u.shape[3]
if self.approach == 1:
u = u * self.pini_alt
pini = pini * self.pini_alt
# Pressure equation Loss
fin = fin * self.UIR
finwater = finwater * self.UIR
cuda = 0
device = torch.device(
f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
)
# print(pressurey.shape)
p_loss = torch.zeros_like(u).to(device, torch.float32)
s_loss = torch.zeros_like(u).to(device, torch.float32)
a = perm # absolute permeability
v_min, v_max = self.LUB, self.HUB
new_min, new_max = self.aay, self.bby
m = (new_max - new_min) / (v_max - v_min)
b = new_min - m * v_min
a = m * a + b
finuse = fin
finusew = finwater
dta = dtin
pressure = u
# water_sat = sat
prior_pressure = torch.zeros(
sat.shape[0], sat.shape[1], self.nx, self.ny
).to(device, torch.float32)
prior_pressure[:, 0, :, :] = self.pini_alt * (
torch.ones(sat.shape[0], self.nx, self.ny).to(device, torch.float32)
)
prior_pressure[:, 1:, :, :] = u[:, :-1, :, :]
# dsp = u - prior_pressure #dp
prior_sat = torch.zeros(sat.shape[0], sat.shape[1], self.nx, self.ny).to(
device, torch.float32
)
prior_sat[:, 0, :, :] = siniuse * (
torch.ones(sat.shape[0], self.nx, self.ny).to(device, torch.float32)
)
prior_sat[:, 1:, :, :] = sat[:, :-1, :, :]
dsw = sat - prior_sat # ds
dsw = torch.clip(dsw, 0.001, None)
S = torch.div(
torch.sub(prior_sat, self.SWI, alpha=1), (1 - self.SWI - self.SWR)
)
# Pressure equation Loss
Mw = torch.divide(torch.square(S), (self.UW * self.BW)) # Water mobility
Mo = torch.div(
torch.square(torch.sub(torch.ones(S.shape, device=u.device), S)),
(self.UO * self.BO),
)
# krw = torch.square(S)
# kroil = torch.square(torch.sub(torch.ones(S.shape,\
# device = u.device),S))
Mt = Mw + Mo
a1 = torch.mul(Mt, a) # overall Effective permeability
a1water = torch.mul(Mw, a) # water Effective permeability
# compute first dffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :][:, None, :, :]
dudx_fdma = dx(
inn_now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = dx(
inn_now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulpa.append(dudx_fdma)
gulp2a.append(dudy_fdma)
dudx_fdm = torch.stack(gulpa, 0)[:, :, 0, :, :]
dudy_fdm = torch.stack(gulp2a, 0)[:, :, 0, :, :]
# Compute second diffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :][:, None, :, :]
dudx_fdma = ddx(
inn_now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = ddx(
inn_now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulpa.append(dudx_fdma)
gulp2a.append(dudy_fdma)
dduddx_fdm = torch.stack(gulpa, 0)[:, :, 0, :, :]
dduddy_fdm = torch.stack(gulp2a, 0)[:, :, 0, :, :]
inn_now2 = a1
dcdx = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dcdy = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
darcy_pressure = (
fin
+ (dcdx * dudx_fdm)
+ (a1 * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (a1 * dduddy_fdm)
)
# Zero outer boundary
# darcy_pressure = F.pad(darcy_pressure[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_pressure = dxf * darcy_pressure * 1e-7
p_loss = darcy_pressure
# Saruration equation loss
dudx = dudx_fdm
dudy = dudy_fdm
dduddx = dduddx_fdm
dduddy = dduddy_fdm
inn_now2 = a1water
dadx = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dady = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
flux = (
(dadx * dudx) + (a1water * dduddx) + (dady * dudy) + (a1water * dduddy)
)
fifth = poro * (dsw / dta)
toge = flux + finusew
darcy_saturation = fifth - toge
# Zero outer boundary
# darcy_saturation = F.pad(darcy_saturation[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_saturation = dxf * darcy_saturation * 1e-7
s_loss = darcy_saturation
# Slower but more accurate implementation
elif self.approach == 2:
u = u * self.pini_alt
pini = pini * self.pini_alt
# Pressure equation Loss
fin = fin * self.UIR
finwater = finwater * self.UIR
cuda = 0
device = torch.device(
f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
)
# print(pressurey.shape)
p_loss = torch.zeros_like(u).to(device, torch.float32)
s_loss = torch.zeros_like(u).to(device, torch.float32)
# print(sat.shape)
# output_var = dict()
for zig in range(sat.shape[0]):
for count in range(sat.shape[1]):
if count == 0:
prior_sat = sini[zig, 0, :, :][None, None, :, :]
prior_pressure = pini[zig, 0, :, :][None, None, :, :]
else:
prior_sat = sat[zig, (count - 1), :, :][None, None, :, :]
prior_pressure = u[zig, count - 1, :, :][None, None, :, :]
pressure = u[zig, count, :, :][None, None, :, :]
water_sat = sat[zig, count, :, :][None, None, :, :]
finuse = fin
a = perm[zig, 0, :, :][None, None, :, :]
v_min, v_max = self.LUB, self.HUB
new_min, new_max = self.aay, self.bby
m = (new_max - new_min) / (v_max - v_min)
b = new_min - m * v_min
a = m * a + b
S = torch.div(
torch.sub(prior_sat, self.SWI, alpha=1),
(1 - self.SWI - self.SWR),
)
# Pressure equation Loss
Mw = torch.divide(
torch.square(S), (self.UW * self.BW)
) # Water mobility
Mo = torch.div(
torch.square(
torch.sub(torch.ones(S.shape, device=u.device), S)
),
(self.UO * self.BO),
)
Mt = Mw + Mo
a1 = torch.mul(Mt, a) # Effective permeability
ua = pressure
a2 = a1
dyf = 1.0 / u.shape[3]
# FDM gradients
dudx_fdm = dx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdm = dx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dduddx_fdm = ddx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dduddy_fdm = ddx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dcdx = dx(
a2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dcdy = dx(
a2, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
# compute darcy equation
darcy_pressure = (
finuse[zig, 0, :, :][None, None, :, :]
+ (dcdx * dudx_fdm)
+ (a2 * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (a2 * dduddy_fdm)
)
# Zero outer boundary
# darcy_pressure = F.pad(darcy_pressure[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_pressure = dxf * darcy_pressure * 1e-7
p_loss[zig, count, :, :] = darcy_pressure
# output_var["darcy_pressure"] = torch.mean(p_loss,dim = 0)[None,:,:,:]
# Saturation equation Loss
finuse = finwater[zig, 0, :, :][None, None, :, :]
dsw = water_sat - prior_sat
dsw = torch.clip(dsw, 0.001, None)
dta = dtin[zig, 0, :, :][None, None, :, :]
Mw = torch.divide(
torch.square(S), (self.UW * self.BW)
) # Water mobility
Mt = Mw
a1 = torch.mul(Mt, a) # Effective permeability to water
ua = pressure
a2 = a1
dudx = dx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy = dx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dduddx = ddx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dduddy = ddx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dadx = dx(
a2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dady = dx(
a2, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
flux = (dadx * dudx) + (a2 * dduddx) + (dady * dudy) + (a2 * dduddy)
# flux = flux[:,0,:,:]
# temp = dsw_dt
# fourth = poro * CFW * prior_sat * (dsp/dta)
fifth = poro[zig, 0, :, :][None, None, :, :] * (dsw / dta)
toge = flux + finuse
darcy_saturation = fifth - toge
# Zero outer boundary
# darcy_saturation = F.pad(darcy_saturation[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_saturation = dxf * darcy_saturation * 1e-7
# print(darcy_saturation.shape)
s_loss[zig, count, :, :] = darcy_saturation
output_var = {"pressured": p_loss, "saturationd": s_loss}
return output_var
# [pde-loss]
@modulus.sym.main(config_path="conf", config_name="config_PINO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 2D PINO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
oldfolder = os.getcwd()
os.chdir(oldfolder)
default = None
while True:
default = int(
input("Select 1 = use default values | 2 = Use user defined values \n")
)
if (default > 2) or (default < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-2")
else:
break
if not os.path.exists(to_absolute_path("../PACKETS")):
os.makedirs(to_absolute_path("../PACKETS"))
else:
pass
if default == 1:
approach = 1
else:
approach = None
while True:
print("Remark: Option 3 is not fully computed")
approach = int(
input(
"Select computation of spatial gradients -\n\
1 = Approximate and fast computation\n\
2 = Exact but slighly slower computation using FDM\n\
3 = Exact gradient using FNO\n: "
)
)
if (approach > 3) or (approach < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-3")
else:
break
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
pini_alt = cfg.custom.NVRS.pini_alt
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
# Low_K, High_K = aay,bby
# batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1I-27_S53ORRFB_hIN_41r3Ntc6PpOE40",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1G4Cvg8eIObyBK0eoo7iX-0hhMTnpJktj",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data1[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data1[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data1[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data1[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data1[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data1[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use1[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data2[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data2[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data2[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data2[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data2[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data2[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use2[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
# load training/ test data
input_keys = [
Key("perm", scale=(3.46327e-01, 3.53179e-01)),
Key("Q", scale=(1.94683e-03, 3.70558e-02)),
Key("Qw", scale=(2.03866e-03, 3.70199e-02)),
Key("Phi", scale=(1.73163e-01, 1.76590e-01)),
Key("Time", scale=(1.66667e-02, 7.45058e-09)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(2.00000e-01, 4.91738e-07)),
]
output_keys_pressure = [Key("pressure", scale=(2.87008e-01, 1.85386e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.12903e-01, 1.79786e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
# add additional constraining values for darcy variable
outvar_train_pressure["pressured"] = np.zeros_like(
outvar_train_pressure["pressure"]
)
outvar_train_saturation["saturationd"] = np.zeros_like(
outvar_train_saturation["water_sat"]
)
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# [init-node]
# Define FNO model for forward model (pressure)
decoder1 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("pressure", size=steppi)]
)
fno_pressure = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder1,
)
# Define FNO model for forward model (saturation)
decoder2 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("water_sat", size=steppi)]
)
fno_saturation = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder2,
)
# if approach ==3:
# derivatives = [
# Key("pressure", derivatives=[Key("x")]),
# Key("pressure", derivatives=[Key("y")]),
# Key("pressure", derivatives=[Key("x"), Key("x")]),
# Key("pressure", derivatives=[Key("y"), Key("y")]),
# ]
# fno_pressure.add_pino_gradients(
# derivatives=derivatives,
# domain_length=[nx, ny],
# )
inputs = [
"perm",
"Q",
"Qw",
"Phi",
"Time",
"Pini",
"Swini",
"pressure",
"water_sat",
]
# if approach ==3:
# inputs += [
# "pressure__x",
# "pressure__y",
# ]
darcyy = Node(
inputs=inputs,
outputs=[
"pressured",
"saturationd",
],
evaluate=Black_oil(
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
approach,
),
name="Darcy node",
)
nodes = (
[darcyy]
+ [fno_pressure.make_node("pino_forward_model_pressure", jit=cfg.jit)]
+ [fno_saturation.make_node("pino_forward_model_saturation", jit=cfg.jit)]
)
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# [constraint]
# add validator
# [constraint]
# add validator
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=1,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=1,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/2D/src/Forward_problem_PINO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
from math import ceil, floor
def deriveSizeFromScale(img_shape, scale):
output_shape = []
for k in range(2):
output_shape.append(int(ceil(scale[k] * img_shape[k])))
return output_shape
def deriveScaleFromSize(img_shape_in, img_shape_out):
scale = []
for k in range(2):
scale.append(1.0 * img_shape_out[k] / img_shape_in[k])
return scale
def triangle(x):
x = np.array(x).astype(np.float64)
lessthanzero = np.logical_and((x >= -1), x < 0)
greaterthanzero = np.logical_and((x <= 1), x >= 0)
f = np.multiply((x + 1), lessthanzero) + np.multiply((1 - x), greaterthanzero)
return f
def cubic(x):
x = np.array(x).astype(np.float64)
absx = np.absolute(x)
absx2 = np.multiply(absx, absx)
absx3 = np.multiply(absx2, absx)
f = np.multiply(1.5 * absx3 - 2.5 * absx2 + 1, absx <= 1) + np.multiply(
-0.5 * absx3 + 2.5 * absx2 - 4 * absx + 2, (1 < absx) & (absx <= 2)
)
return f
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length + 1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate(
(np.arange(in_length), np.arange(in_length - 1, -1, step=-1))
).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def imresizemex(inimg, weights, indices, dim):
in_shape = inimg.shape
w_shape = weights.shape
out_shape = list(in_shape)
out_shape[dim] = w_shape[0]
outimg = np.zeros(out_shape)
if dim == 0:
for i_img in range(in_shape[1]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[ind, i_img].astype(np.float64)
outimg[i_w, i_img] = np.sum(
np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0
)
elif dim == 1:
for i_img in range(in_shape[0]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[i_img, ind].astype(np.float64)
outimg[i_img, i_w] = np.sum(
np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0
)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def imresizevec(inimg, weights, indices, dim):
wshape = weights.shape
if dim == 0:
weights = weights.reshape((wshape[0], wshape[2], 1, 1))
outimg = np.sum(
weights * ((inimg[indices].squeeze(axis=1)).astype(np.float64)), axis=1
)
elif dim == 1:
weights = weights.reshape((1, wshape[0], wshape[2], 1))
outimg = np.sum(
weights * ((inimg[:, indices].squeeze(axis=2)).astype(np.float64)), axis=2
)
if inimg.dtype == np.uint8:
outimg = np.clip(outimg, 0, 255)
return np.around(outimg).astype(np.uint8)
else:
return outimg
def resizeAlongDim(A, dim, weights, indices, mode="vec"):
if mode == "org":
out = imresizemex(A, weights, indices, dim)
else:
out = imresizevec(A, weights, indices, dim)
return out
def imresize(I, scalar_scale=None, method="bicubic", output_shape=None, mode="vec"):
if method == "bicubic":
kernel = cubic
elif method == "bilinear":
kernel = triangle
else:
print("Error: Unidentified method supplied")
kernel_width = 4.0
# Fill scale and output_size
if scalar_scale is not None:
scalar_scale = float(scalar_scale)
scale = [scalar_scale, scalar_scale]
output_size = deriveSizeFromScale(I.shape, scale)
elif output_shape is not None:
scale = deriveScaleFromSize(I.shape, output_shape)
output_size = list(output_shape)
else:
print("Error: scalar_scale OR output_shape should be defined!")
return
scale_np = np.array(scale)
order = np.argsort(scale_np)
weights = []
indices = []
for k in range(2):
w, ind = contributions(
I.shape[k], output_size[k], scale[k], kernel, kernel_width
)
weights.append(w)
indices.append(ind)
B = np.copy(I)
flag2D = False
if B.ndim == 2:
B = np.expand_dims(B, axis=2)
flag2D = True
for k in range(2):
dim = order[k]
B = resizeAlongDim(B, dim, weights[dim], indices[dim], mode)
if flag2D:
B = np.squeeze(B, axis=2)
return B
def convertDouble2Byte(I):
B = np.clip(I, 0.0, 1.0)
B = 255 * B
return np.around(B).astype(np.uint8)
| modulus-sym-main | examples/reservoir_simulation/2D/src/imresize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
import os
import modulus
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from ops import dx, ddx
from modulus.sym.models.afno.afno import *
import shutil
import cupy as cp
import scipy.io as sio
import requests
from modulus.sym.utils.io.plotter import ValidatorPlotter
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
# get and interpolate output variable
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
look = (pressure_pred[0, itt, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(10, 10), dpi=100)
plt.subplot(1, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(10, 10), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
plt.subplot(2, 1, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 1, 2)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
look_sat = water_pred[0, itt, :, :] # *1e-2
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :] # * 1e-2
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(2, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
R2w, L2w = compute_metrics(look_sat.ravel(), lookf_sat.ravel())
Accuracy_water[itt, 0] = R2w
Accuracy_water[itt, 1] = L2w
plt.subplot(2, 3, 4)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 5)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 6)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
R2o, L2o = compute_metrics(look_oil.ravel(), lookf_oil.ravel())
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(20, 20), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
plt.subplot(2, 2, 1)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 2)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 3)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 2, 4)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
# [pde-loss]
# define custom class for black oil model
class Black_oil(torch.nn.Module):
"Custom Black oil PDE definition for AFNO"
def __init__(
self,
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
approach,
):
super().__init__()
self.UIR = UIR
self.UWR = UIR
self.pini_alt = pini_alt
self.LUB = LUB
self.HUB = HUB
self.aay = aay
self.bby = bby
self.SWI = SWI
self.SWR = SWR
self.UW = UW
self.BW = BW
self.UO = UO
self.BO = BO
self.MAXZ = MAXZ
self.nx = nx
self.ny = ny
self.approach = approach
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# get inputs
u = input_var["pressure"]
perm = input_var["perm"]
fin = input_var["Q"]
finwater = input_var["Qw"]
dt = input_var["Time"]
pini = input_var["Pini"]
poro = input_var["Phi"]
sini = input_var["Swini"]
sat = input_var["water_sat"]
siniuse = sini[0, 0, 0, 0]
dtin = dt * self.MAXZ
dxf = 1.0 / u.shape[3]
if self.approach == 1:
u = u * self.pini_alt
pini = pini * self.pini_alt
# Pressure equation Loss
fin = fin * self.UIR
finwater = finwater * self.UIR
cuda = 0
device = torch.device(
f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
)
# print(pressurey.shape)
p_loss = torch.zeros_like(u).to(device, torch.float32)
s_loss = torch.zeros_like(u).to(device, torch.float32)
a = perm # absolute permeability
v_min, v_max = self.LUB, self.HUB
new_min, new_max = self.aay, self.bby
m = (new_max - new_min) / (v_max - v_min)
b = new_min - m * v_min
a = m * a + b
finuse = fin
finusew = finwater
dta = dtin
pressure = u
# water_sat = sat
prior_pressure = torch.zeros(
sat.shape[0], sat.shape[1], self.nx, self.ny
).to(device, torch.float32)
prior_pressure[:, 0, :, :] = self.pini_alt * (
torch.ones(sat.shape[0], self.nx, self.ny).to(device, torch.float32)
)
prior_pressure[:, 1:, :, :] = u[:, :-1, :, :]
# dsp = u - prior_pressure #dp
prior_sat = torch.zeros(sat.shape[0], sat.shape[1], self.nx, self.ny).to(
device, torch.float32
)
prior_sat[:, 0, :, :] = siniuse * (
torch.ones(sat.shape[0], self.nx, self.ny).to(device, torch.float32)
)
prior_sat[:, 1:, :, :] = sat[:, :-1, :, :]
dsw = sat - prior_sat # ds
dsw = torch.clip(dsw, 0.001, None)
S = torch.div(
torch.sub(prior_sat, self.SWI, alpha=1), (1 - self.SWI - self.SWR)
)
# Pressure equation Loss
Mw = torch.divide(torch.square(S), (self.UW * self.BW)) # Water mobility
Mo = torch.div(
torch.square(torch.sub(torch.ones(S.shape, device=u.device), S)),
(self.UO * self.BO),
)
krw = torch.square(S)
kroil = torch.square(torch.sub(torch.ones(S.shape, device=u.device), S))
Mt = Mw + Mo
a1 = torch.mul(Mt, a) # overall Effective permeability
a1water = torch.mul(Mw, a) # water Effective permeability
# compute first dffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :][:, None, :, :]
dudx_fdma = dx(
inn_now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = dx(
inn_now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulpa.append(dudx_fdma)
gulp2a.append(dudy_fdma)
dudx_fdm = torch.stack(gulpa, 0)[:, :, 0, :, :]
dudy_fdm = torch.stack(gulp2a, 0)[:, :, 0, :, :]
# Compute second diffrential
gulpa = []
gulp2a = []
for m in range(sat.shape[0]): # Batch
inn_now = pressure[m, :, :, :][:, None, :, :]
dudx_fdma = ddx(
inn_now, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdma = ddx(
inn_now, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
gulpa.append(dudx_fdma)
gulp2a.append(dudy_fdma)
dduddx_fdm = torch.stack(gulpa, 0)[:, :, 0, :, :]
dduddy_fdm = torch.stack(gulp2a, 0)[:, :, 0, :, :]
inn_now2 = a1
dcdx = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dcdy = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
darcy_pressure = (
fin
+ (dcdx * dudx_fdm)
+ (a1 * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (a1 * dduddy_fdm)
)
# Zero outer boundary
# darcy_pressure = F.pad(darcy_pressure[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_pressure = dxf * darcy_pressure * 1e-7
p_loss = darcy_pressure
# Saruration equation loss
dudx = dudx_fdm
dudy = dudy_fdm
dduddx = dduddx_fdm
dduddy = dduddy_fdm
inn_now2 = a1water
dadx = dx(
inn_now2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dady = dx(
inn_now2, dx=dxf, channel=0, dim=1, order=1, padding="replication"
)
flux = (
(dadx * dudx) + (a1water * dduddx) + (dady * dudy) + (a1water * dduddy)
)
fifth = poro * (dsw / dta)
toge = flux + finusew
darcy_saturation = fifth - toge
# Zero outer boundary
# darcy_saturation = F.pad(darcy_saturation[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_saturation = dxf * darcy_saturation * 1e-7
s_loss = darcy_saturation
# Slower but more accurate implementation
elif self.approach == 2:
u = u * self.pini_alt
pini = pini * self.pini_alt
# Pressure equation Loss
fin = fin * self.UIR
finwater = finwater * self.UIR
cuda = 0
device = torch.device(
f"cuda:{cuda}" if torch.cuda.is_available() else "cpu"
)
# print(pressurey.shape)
p_loss = torch.zeros_like(u).to(device, torch.float32)
s_loss = torch.zeros_like(u).to(device, torch.float32)
# print(sat.shape)
# output_var = dict()
for zig in range(sat.shape[0]):
for count in range(sat.shape[1]):
if count == 0:
prior_sat = sini[zig, 0, :, :][None, None, :, :]
prior_pressure = pini[zig, 0, :, :][None, None, :, :]
else:
prior_sat = sat[zig, (count - 1), :, :][None, None, :, :]
prior_pressure = u[zig, count - 1, :, :][None, None, :, :]
pressure = u[zig, count, :, :][None, None, :, :]
water_sat = sat[zig, count, :, :][None, None, :, :]
finuse = fin
a = perm[zig, 0, :, :][None, None, :, :]
v_min, v_max = self.LUB, self.HUB
new_min, new_max = self.aay, self.bby
m = (new_max - new_min) / (v_max - v_min)
b = new_min - m * v_min
a = m * a + b
S = torch.div(
torch.sub(prior_sat, self.SWI, alpha=1),
(1 - self.SWI - self.SWR),
)
# Pressure equation Loss
Mw = torch.divide(
torch.square(S), (self.UW * self.BW)
) # Water mobility
Mo = torch.div(
torch.square(
torch.sub(torch.ones(S.shape, device=u.device), S)
),
(self.UO * self.BO),
)
Mt = Mw + Mo
a1 = torch.mul(Mt, a) # Effective permeability
ua = pressure
a2 = a1
dyf = 1.0 / u.shape[3]
# FDM gradients
dudx_fdm = dx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy_fdm = dx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dduddx_fdm = ddx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dduddy_fdm = ddx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dcdx = dx(
a2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dcdy = dx(
a2, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
# compute darcy equation
darcy_pressure = (
finuse[zig, 0, :, :][None, None, :, :]
+ (dcdx * dudx_fdm)
+ (a2 * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (a2 * dduddy_fdm)
)
# Zero outer boundary
# darcy_pressure = F.pad(darcy_pressure[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_pressure = dxf * darcy_pressure * 1e-7
p_loss[zig, count, :, :] = darcy_pressure
# output_var["darcy_pressure"] = torch.mean(p_loss,dim = 0)[None,:,:,:]
# Saturation equation Loss
finuse = finwater[zig, 0, :, :][None, None, :, :]
dsw = water_sat - prior_sat
dsw = torch.clip(dsw, 0.001, None)
dta = dtin[zig, 0, :, :][None, None, :, :]
Mw = torch.divide(
torch.square(S), (self.UW * self.BW)
) # Water mobility
Mt = Mw
a1 = torch.mul(Mt, a) # Effective permeability to water
ua = pressure
a2 = a1
dudx = dx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dudy = dx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dduddx = ddx(
ua, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dduddy = ddx(
ua, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
dadx = dx(
a2, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dady = dx(
a2, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
flux = (dadx * dudx) + (a2 * dduddx) + (dady * dudy) + (a2 * dduddy)
# flux = flux[:,0,:,:]
# temp = dsw_dt
# fourth = poro * CFW * prior_sat * (dsp/dta)
fifth = poro[zig, 0, :, :][None, None, :, :] * (dsw / dta)
toge = flux + finuse
darcy_saturation = fifth - toge
# Zero outer boundary
# darcy_saturation = F.pad(darcy_saturation[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
darcy_saturation = dxf * darcy_saturation * 1e-7
# print(darcy_saturation.shape)
s_loss[zig, count, :, :] = darcy_saturation
output_var = {"pressured": p_loss, "saturationd": s_loss}
return output_var
# [pde-loss]
@modulus.sym.main(config_path="conf", config_name="config_PINO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 2D PHYSICS DRIVEN AFNO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
oldfolder = os.getcwd()
os.chdir(oldfolder)
default = None
while True:
default = int(
input("Select 1 = use default values | 2 = Use user defined values \n")
)
if (default > 2) or (default < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-2")
else:
break
if not os.path.exists(to_absolute_path("../PACKETS")):
os.makedirs(to_absolute_path("../PACKETS"))
else:
pass
if default == 1:
approach = 1
else:
approach = None
while True:
print("Remark: Option 3 is not fully computed")
approach = int(
input(
"Select computation of spatial gradients -\n\
1 = Approximate and fast computation\n\
2 = Exact but slighly slower computation using FDM\n\
3 = Exact gradient using FNO\n: "
)
)
if (approach > 3) or (approach < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-3")
else:
break
# Varaibles needed for NVRS
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
CFO = cp.float32(cfg.custom.NVRS.CFO) # oil compressibility in 1/psi
IWSw = cfg.custom.NVRS.IWSw # initial water saturation
pini_alt = cfg.custom.NVRS.pini_alt
P1 = cp.float32(pini_alt) # Bubble point pressure psia
PB = P1
mpor = cfg.custom.NVRS.mpor
hpor = cfg.custom.NVRS.hpor # minimum and maximum porosity
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
PATM = cp.float32(cfg.custom.NVRS.PATM) # Atmospheric pressure in psi
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
Low_K, High_K = aay, bby
batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1I-27_S53ORRFB_hIN_41r3Ntc6PpOE40",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1G4Cvg8eIObyBK0eoo7iX-0hhMTnpJktj",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data1[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data1[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data1[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data1[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data1[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data1[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use1[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data2[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data2[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data2[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data2[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data2[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data2[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use2[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
# load training/ test data
input_keys = [
Key("perm", scale=(3.46327e-01, 3.53179e-01)),
Key("Q", scale=(1.94683e-03, 3.70558e-02)),
Key("Qw", scale=(2.03866e-03, 3.70199e-02)),
Key("Phi", scale=(1.73163e-01, 1.76590e-01)),
Key("Time", scale=(1.66667e-02, 7.45058e-09)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(2.00000e-01, 4.91738e-07)),
]
output_keys_pressure = [Key("pressure", scale=(2.87008e-01, 1.85386e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.12903e-01, 1.79786e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
# add additional constraining values for darcy variable
outvar_train_pressure["pressured"] = np.zeros_like(
outvar_train_pressure["pressure"]
)
outvar_train_saturation["saturationd"] = np.zeros_like(
outvar_train_saturation["water_sat"]
)
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# [init-node]
# Define AFNO model for forward model (pressure)
afno_pressure = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("pressure", size=steppi)],
(nx, ny),
patch_size=3,
)
# Define AFNO model for forward model (saturation)
afno_saturation = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("water_sat", size=steppi)],
(nx, ny),
patch_size=3,
)
# if approach ==3:
# derivatives = [
# Key("pressure", derivatives=[Key("x")]),
# Key("pressure", derivatives=[Key("y")]),
# Key("pressure", derivatives=[Key("x"), Key("x")]),
# Key("pressure", derivatives=[Key("y"), Key("y")]),
# ]
# afno_pressure.add_pino_gradients(
# derivatives=derivatives,
# domain_length=[nx, ny],
# )
inputs = [
"perm",
"Q",
"Qw",
"Phi",
"Time",
"Pini",
"Swini",
"pressure",
"water_sat",
]
# if approach ==3:
# inputs += [
# "pressure__x",
# "pressure__y",
# ]
darcyy = Node(
inputs=inputs,
outputs=[
"pressured",
"saturationd",
],
evaluate=Black_oil(
UIR,
pini_alt,
LUB,
HUB,
aay,
bby,
SWI,
SWR,
UW,
BW,
UO,
BO,
MAXZ,
nx,
ny,
approach,
),
name="Darcy node",
)
nodes = (
[darcyy]
+ [afno_pressure.make_node("afnop_forward_model_pressure", jit=cfg.jit)]
+ [afno_saturation.make_node("afnop_forward_model_saturation", jit=cfg.jit)]
)
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# [constraint]
# add validator
# [constraint]
# add validator
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=1,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=1,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/2D/src/Forward_problem_AFNOP.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Nvidia Finite volume reservoir simulator with flexible solver
AMG to solve the pressure and saturation well possed inverse problem
Geostatistics packages are also provided
@Author: Clement Etienam
"""
print(".........................IMPORT SOME LIBRARIES.....................")
import os
import numpy as np
def is_available():
"""
Check NVIDIA with nvidia-smi command
Returning code 0 if no error, it means NVIDIA is installed
Other codes mean not installed
"""
code = os.system("nvidia-smi")
return code
Yet = is_available()
if Yet == 0:
print("GPU Available with CUDA")
try:
import pyamgx
except:
pyamgx = None
import cupy as cp
from numba import cuda
print(cuda.detect()) # Print the GPU information
import tensorflow as tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.InteractiveSession(config=config)
# import pyamgx
from cupyx.scipy.sparse import csr_matrix, spmatrix
clementtt = 0
else:
print("No GPU Available")
import numpy as cp
from scipy.sparse import csr_matrix
clementtt = 1
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import MiniBatchKMeans
import os.path
import torch
from scipy import interpolate
import multiprocessing
import mpslib as mps
import numpy.matlib
from scipy.spatial.distance import cdist
from pyDOE import lhs
import matplotlib.colors
from matplotlib import cm
from shutil import rmtree
from kneed import KneeLocator
import numpy
# from PIL import Image
from scipy.fftpack import dct
import numpy.matlib
import matplotlib.lines as mlines
# os.environ['KERAS_BACKEND'] = 'tensorflow'
import os.path
import time
import random
import os.path
from datetime import timedelta
# import dolfin as df
import sys
from numpy import *
import scipy.optimize.lbfgsb as lbfgsb
import numpy.linalg
from numpy.linalg import norm
from scipy.fftpack.realtransforms import idct
import numpy.ma as ma
from matplotlib.font_manager import FontProperties
import logging
import os
import matplotlib as mpl
from FyeldGenerator import generate_field
from imresize import *
import warnings
warnings.filterwarnings("ignore")
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # I have just 1 GPU
from cpuinfo import get_cpu_info
# Prints a json string describing the cpu
s = get_cpu_info()
print("Cpu info")
for k, v in s.items():
print(f"\t{k}: {v}")
cores = multiprocessing.cpu_count()
import math
logger = logging.getLogger(__name__)
# numpy.random.seed(99)
print(" ")
print(" This computer has %d cores, which will all be utilised in parallel " % cores)
print(" ")
print("......................DEFINE SOME FUNCTIONS.....................")
def compute_metrics(y_true, y_pred):
y_true_mean = np.mean(y_true)
TSS = np.sum((y_true - y_true_mean) ** 2)
RSS = np.sum((y_true - y_pred) ** 2)
R2 = 1 - (RSS / TSS)
L2_accuracy = 1 - np.sqrt(RSS) / np.sqrt(TSS)
return R2 * 100, L2_accuracy * 100
def Add_marker(plt, XX, YY, locc):
"""
Function to add marker to given coordinates on a matplotlib plot
less
Copy code
Parameters:
plt: a matplotlib.pyplot object to add the markers to
XX: a numpy array of X coordinates
YY: a numpy array of Y coordinates
locc: a numpy array of locations where markers need to be added
Return:
None
"""
# iterate through each location
for i in range(locc.shape[0]):
a = locc[i, :]
xloc = int(a[0])
yloc = int(a[1])
# if the location type is 2, add an upward pointing marker
if a[2] == 2:
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=100,
marker="^",
color="white",
)
# otherwise, add a downward pointing marker
else:
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=100,
marker="v",
color="white",
)
def check_cupy_sparse_matrix(A):
"""
Function to check if a matrix is a Cupy sparse matrix and convert it to a CSR matrix if necessary
Parameters:
A: a sparse matrix
Return:
A: a CSR matrix
"""
if not isinstance(A, spmatrix):
# Convert the matrix to a csr matrix if it is not already a cupy sparse matrix
A = csr_matrix(A)
return A
def Plot_RSM_percentile(pertoutt, True_mat, Namesz):
timezz = True_mat[:, 0].reshape(-1, 1)
P10 = pertoutt
plt.figure(figsize=(40, 40))
plt.subplot(4, 4, 1)
plt.plot(timezz, True_mat[:, 1], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 1], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 2)
plt.plot(timezz, True_mat[:, 2], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 2], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 3)
plt.plot(timezz, True_mat[:, 3], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 3], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 4)
plt.plot(timezz, True_mat[:, 4], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 4], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 5)
plt.plot(timezz, True_mat[:, 5], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 5], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 6)
plt.plot(timezz, True_mat[:, 6], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 6], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 7)
plt.plot(timezz, True_mat[:, 7], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 7], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 8)
plt.plot(timezz, True_mat[:, 8], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 8], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 9)
plt.plot(timezz, True_mat[:, 9], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 9], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 10)
plt.plot(timezz, True_mat[:, 10], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 10], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 11)
plt.plot(timezz, True_mat[:, 11], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 11], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 12)
plt.plot(timezz, True_mat[:, 12], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 12], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 13)
plt.plot(timezz, True_mat[:, 13], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 13], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 14)
plt.plot(timezz, True_mat[:, 14], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 14], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 15)
plt.plot(timezz, True_mat[:, 15], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 15], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(4, 4, 16)
plt.plot(timezz, True_mat[:, 16], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 16], color="blue", lw="2", label="PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
# os.chdir('RESULTS')
plt.savefig(
Namesz
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def Plot_RSM_percentile2(pertoutt, P12, True_mat, Namesz):
timezz = True_mat[:, 0].reshape(-1, 1)
P10 = pertoutt
plt.figure(figsize=(20, 20))
plt.subplot(3, 4, 1)
plt.plot(timezz, True_mat[:, 5], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 5], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 5], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 2)
plt.plot(timezz, True_mat[:, 6], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 6], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 6], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 3)
plt.plot(timezz, True_mat[:, 7], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 7], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 7], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 4)
plt.plot(timezz, True_mat[:, 8], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 8], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 8], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{oil}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 5)
plt.plot(timezz, True_mat[:, 9], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 9], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 9], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 6)
plt.plot(timezz, True_mat[:, 10], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 10], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 10], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 7)
plt.plot(timezz, True_mat[:, 11], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 11], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 11], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 8)
plt.plot(timezz, True_mat[:, 12], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 12], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 12], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$Q_{water}(bbl/day)$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 9)
plt.plot(timezz, True_mat[:, 13], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 13], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 13], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P1", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 10)
plt.plot(timezz, True_mat[:, 14], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 14], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 14], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 11)
plt.plot(timezz, True_mat[:, 15], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 15], color="blue", lw="2", label=" MAP PINO Model")
plt.plot(timezz, P12[:, 15], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P3", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
plt.subplot(3, 4, 12)
plt.plot(timezz, True_mat[:, 16], color="red", lw="2", label="model")
plt.plot(timezz, P10[:, 16], color="blue", lw="2", label="MAP PINO Model")
plt.plot(timezz, P12[:, 16], color="k", lw="2", label="MEAN PINO Model")
plt.xlabel("Time (days)", fontsize=13)
plt.ylabel("$WWCT{%}$", fontsize=13)
# plt.ylim((0,25000))
plt.title("P4", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
plt.legend()
# os.chdir('RESULTS')
plt.savefig(
Namesz
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def Plot_performance(
PINN, PINN2, trueF, nx, ny, namet, UIR, itt, dt, MAXZ, pini_alt, steppi, wells
):
look = (PINN[itt, :, :]) * pini_alt
look_sat = PINN2[itt, :, :]
look_oil = 1 - look_sat
lookf = (trueF[itt, :, :]) * pini_alt
lookf_sat = trueF[itt + steppi, :, :]
lookf_oil = 1 - lookf_sat
diff1 = abs(look - lookf)
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
XX, YY = np.meshgrid(np.arange(nx), np.arange(ny))
plt.figure(figsize=(12, 12))
plt.subplot(3, 3, 1)
plt.pcolormesh(XX.T, YY.T, look, cmap="jet")
plt.title("Pressure PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(np.min(np.reshape(lookf, (-1,))), np.max(np.reshape(lookf, (-1,))))
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 2)
plt.pcolormesh(XX.T, YY.T, lookf, cmap="jet")
plt.title("Pressure CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 3)
plt.pcolormesh(XX.T, YY.T, diff1, cmap="jet")
plt.title("Pressure (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" Pressure (psia)", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 4)
plt.pcolormesh(XX.T, YY.T, look_sat, cmap="jet")
plt.title("water_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(np.min(np.reshape(lookf_sat, (-1,))), np.max(np.reshape(lookf_sat, (-1,))))
cbar1.ax.set_ylabel(" water_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 5)
plt.pcolormesh(XX.T, YY.T, lookf_sat, cmap="jet")
plt.title("water_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 6)
plt.pcolormesh(XX.T, YY.T, diff1_wat, cmap="jet")
plt.title("water_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" water sat ", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 7)
plt.pcolormesh(XX.T, YY.T, look_oil, cmap="jet")
plt.title("oil_sat PINO", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
plt.clim(np.min(np.reshape(lookf_oil, (-1,))), np.max(np.reshape(lookf_oil, (-1,))))
cbar1.ax.set_ylabel(" oil_sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 8)
plt.pcolormesh(XX.T, YY.T, lookf_oil, cmap="jet")
plt.title("oil_sat CFD", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.subplot(3, 3, 9)
plt.pcolormesh(XX.T, YY.T, diff1_oil, cmap="jet")
plt.title("oil_sat (CFD - PINO)", fontsize=13)
plt.ylabel("Y", fontsize=13)
plt.xlabel("X", fontsize=13)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
cbar1 = plt.colorbar()
cbar1.ax.set_ylabel(" oil sat ", fontsize=13)
Add_marker(plt, XX, YY, wells)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = namet + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
# Geostatistics module
def intial_ensemble(Nx, Ny, Nz, N, permx):
"""
Geostatistics module
Function to generate an initial ensemble of permeability fields using Multiple-Point Statistics (MPS)
Parameters:
Nx: an integer representing the number of grid cells in the x-direction
Ny: an integer representing the number of grid cells in the y-direction
Nz: an integer representing the number of grid cells in the z-direction
N: an integer representing the number of realizations in the ensemble
permx: a numpy array representing the permeability field TI
Return:
ensemble: a numpy array representing the ensemble of permeability fields
"""
# import MPSlib
O = mps.mpslib()
# set the MPS method to 'mps_snesim_tree'
O = mps.mpslib(method="mps_snesim_tree")
# set the number of realizations to N
O.par["n_real"] = N
# set the permeability field TI
k = permx
kjenn = k
O.ti = kjenn
# set the simulation grid size
O.par["simulation_grid_size"] = (Ny, Nx, Nz)
# run MPS simulation in parallel
O.run_parallel()
# get the ensemble of realizations
ensemble = O.sim
# reformat the ensemble
ens = []
for kk in range(N):
temp = np.reshape(ensemble[kk], (-1, 1), "F")
ens.append(temp)
ensemble = np.hstack(ens)
# remove temporary files generated during MPS simulation
from glob import glob
for f3 in glob("thread*"):
rmtree(f3)
for f4 in glob("*mps_snesim_tree_*"):
os.remove(f4)
for f4 in glob("*ti_thread_*"):
os.remove(f4)
return ensemble
def initial_ensemble_gaussian(Nx, Ny, Nz, N, minn, maxx):
"""
Function to generate an initial ensemble of permeability fields using Gaussian distribution
Parameters:
Nx: an integer representing the number of grid cells in the x-direction
Ny: an integer representing the number of grid cells in the y-direction
Nz: an integer representing the number of grid cells in the z-direction
N: an integer representing the number of realizations in the ensemble
minn: a float representing the minimum value of the permeability field
maxx: a float representing the maximum value of the permeability field
Return:
fensemble: a numpy array representing the ensemble of permeability fields
"""
shape = (Nx, Ny)
distrib = "gaussian"
fensemble = np.zeros((Nx * Ny * Nz, N))
for k in range(N):
fout = []
# generate a 3D field
for j in range(Nz):
field = generate_field(distrib, Pkgen(3), shape)
field = imresize(field, output_shape=shape)
foo = np.reshape(field, (-1, 1), "F")
fout.append(foo)
fout = np.vstack(fout)
# scale the field to the desired range
clfy = MinMaxScaler(feature_range=(minn, maxx))
(clfy.fit(fout))
fout = clfy.transform(fout)
fensemble[:, k] = np.ravel(fout)
return fensemble
def Pkgen(n):
def Pk(k):
return np.power(k, -n)
return Pk
# Draw samples from a normal distribution
def distrib(shape):
a = np.random.normal(loc=0, scale=1, size=shape)
b = np.random.normal(loc=0, scale=1, size=shape)
return a + 1j * b
def Peaceman_well(
inn,
ooutp,
oouts,
MAXZ,
mazw,
s1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
nz,
):
"""
Calculates the pressure and flow rates for an injection and production well using the Peaceman model.
Args:
- inn (dictionary): dictionary containing the input parameters (including permeability and injection/production rates)
- ooutp (numpy array): 4D numpy array containing pressure values for each time step and grid cell
- oouts (numpy array): 4D numpy array containing saturation values for each time step and grid cell
- MAXZ (float): length of the reservoir in the z-direction
- mazw (float): the injection/production well location in the z-direction
- s1 (float): the length of the computational domain in the z-direction
- LUB (float): the upper bound of the rescaled permeability
- HUB (float): the lower bound of the rescaled permeability
- aay (float): the upper bound of the original permeability
- bby (float): the lower bound of the original permeability
- DX (float): the cell size in the x-direction
- steppi (int): number of time steps
- pini_alt (float): the initial pressure
- SWI (float): the initial water saturation
- SWR (float): the residual water saturation
- UW (float): the viscosity of water
- BW (float): the formation volume factor of water
- DZ (float): the cell thickness in the z-direction
- rwell (float): the well radius
- skin (float): the skin factor
- UO (float): the viscosity of oil
- BO (float): the formation volume factor of oil
- pwf_producer (float): the desired pressure at the producer well
- dt (float): the time step
- N_inj (int): the number of injection wells
- N_pr (int): the number of production wells
- nz (int): the number of cells in the z-direction
Returns:
- overr (numpy array): an array containing the time and flow rates (in BHP, qoil, qwater, and wct) for each time step
"""
Injector_location = np.where(
inn["Qw"][0, 0, :, :].detach().cpu().numpy().ravel() > 0
)[0]
producer_location = np.where(
inn["Q"][0, 0, :, :].detach().cpu().numpy().ravel() < 0
)[0]
PERM = rescale_linear_pytorch_numpy(
np.reshape(inn["perm"][0, 0, :, :].detach().cpu().numpy(), (-1,), "F"),
LUB,
HUB,
aay,
bby,
)
kuse_inj = PERM[Injector_location]
kuse_prod = PERM[producer_location]
RE = 0.2 * DX
Baa = []
Timz = []
for kk in range(steppi):
Ptito = ooutp[:, kk, :, :]
Stito = oouts[:, kk, :, :]
# average_pressure = np.mean(Ptito.ravel()) * pini_alt
average_pressure = (Ptito.ravel()[producer_location]) * pini_alt
p_inj = (Ptito.ravel()[Injector_location]) * pini_alt
# p_prod = (Ptito.ravel()[producer_location] ) * pini_alt
S = Stito.ravel().reshape(-1, 1)
Sout = (S - SWI) / (1 - SWI - SWR)
Krw = Sout**2 # Water mobility
Kro = (1 - Sout) ** 2 # Oil mobility
krwuse = Krw.ravel()[Injector_location]
krwusep = Krw.ravel()[producer_location]
krouse = Kro.ravel()[producer_location]
up = UW * BW
down = 2 * np.pi * kuse_inj * krwuse * DZ
right = np.log(RE / rwell) + skin
temp = (up / down) * right
# temp[temp ==-inf] = 0
Pwf = p_inj + temp
Pwf = np.abs(Pwf)
BHP = np.sum(np.reshape(Pwf, (-1, N_inj), "C"), axis=0) / nz
up = UO * BO
down = 2 * np.pi * kuse_prod * krouse * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qoil = np.abs(-(drawdown * J))
qoil = np.sum(np.reshape(qoil, (-1, N_pr), "C"), axis=0) / nz
up = UW * BW
down = 2 * np.pi * kuse_prod * krwusep * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qwater = np.abs(-(drawdown * J))
qwater = np.sum(np.reshape(qwater, (-1, N_pr), "C"), axis=0) / nz
# qwater[qwater==0] = 0
# water cut
wct = (qwater / (qwater + qoil)) * np.float32(100)
timz = ((kk + 1) * dt) * MAXZ
# timz = timz.reshape(1,1)
qs = [BHP, qoil, qwater, wct]
# print(qs.shape)
qs = np.asarray(qs)
qs = qs.reshape(1, -1)
Baa.append(qs)
Timz.append(timz)
Baa = np.vstack(Baa)
Timz = np.vstack(Timz)
overr = np.hstack([Timz, Baa])
return overr # np.vstack(B)
def Peaceman_well2(
inn,
ooutp,
oouts,
MAXZ,
mazw,
s1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
nz,
):
"""
Calculates the pressure and flow rates for an injection and production well using the Peaceman model.
Args:
- inn (dictionary): dictionary containing the input parameters (including permeability and injection/production rates)
- ooutp (numpy array): 4D numpy array containing pressure values for each time step and grid cell
- oouts (numpy array): 4D numpy array containing saturation values for each time step and grid cell
- MAXZ (float): length of the reservoir in the z-direction
- mazw (float): the injection/production well location in the z-direction
- s1 (float): the length of the computational domain in the z-direction
- LUB (float): the upper bound of the rescaled permeability
- HUB (float): the lower bound of the rescaled permeability
- aay (float): the upper bound of the original permeability
- bby (float): the lower bound of the original permeability
- DX (float): the cell size in the x-direction
- steppi (int): number of time steps
- pini_alt (float): the initial pressure
- SWI (float): the initial water saturation
- SWR (float): the residual water saturation
- UW (float): the viscosity of water
- BW (float): the formation volume factor of water
- DZ (float): the cell thickness in the z-direction
- rwell (float): the well radius
- skin (float): the skin factor
- UO (float): the viscosity of oil
- BO (float): the formation volume factor of oil
- pwf_producer (float): the desired pressure at the producer well
- dt (float): the time step
- N_inj (int): the number of injection wells
- N_pr (int): the number of production wells
- nz (int): the number of cells in the z-direction
Returns:
- overr (numpy array): an array containing the time and flow rates (in BHP, qoil, qwater, and wct) for each time step
"""
Injector_location = np.where(
inn["Qw"][0, 0, :, :].detach().cpu().numpy().ravel() > 0
)[0]
producer_location = np.where(
inn["Q"][0, 0, :, :].detach().cpu().numpy().ravel() < 0
)[0]
# PERM = np.reshape(inn["perm"][0,0,:,:].detach().cpu().numpy(),(-1,),'F')
PERM = rescale_linear_pytorch_numpy(
np.reshape(inn["perm"][0, 0, :, :].detach().cpu().numpy(), (-1,), "F"),
LUB,
HUB,
aay,
bby,
)
kuse_inj = PERM[Injector_location]
kuse_prod = PERM[producer_location]
RE = 0.2 * DX
Baa = []
Timz = []
for kk in range(steppi):
Ptito = ooutp[:, kk, :, :]
Stito = oouts[:, kk, :, :]
# average_pressure = np.mean(Ptito.ravel()) * pini_alt
average_pressure = (Ptito.ravel()[producer_location]) * pini_alt
p_inj = (Ptito.ravel()[Injector_location]) * pini_alt
# p_prod = (Ptito.ravel()[producer_location] ) * pini_alt
S = Stito.ravel().reshape(-1, 1)
Sout = (S - SWI) / (1 - SWI - SWR)
Krw = Sout**2 # Water mobility
Kro = (1 - Sout) ** 2 # Oil mobility
krwuse = Krw.ravel()[Injector_location]
krwusep = Krw.ravel()[producer_location]
krouse = Kro.ravel()[producer_location]
up = UW * BW
down = 2 * np.pi * kuse_inj * krwuse * DZ
right = np.log(RE / rwell) + skin
temp = (up / down) * right
# temp[temp ==-inf] = 0
Pwf = p_inj + temp
Pwf = np.abs(Pwf)
BHP = np.sum(np.reshape(Pwf, (-1, N_inj), "C"), axis=0) / nz
up = UO * BO
down = 2 * np.pi * kuse_prod * krouse * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qoil = np.abs(-(drawdown * J))
qoil = np.sum(np.reshape(qoil, (-1, N_pr), "C"), axis=0) / nz
up = UW * BW
down = 2 * np.pi * kuse_prod * krwusep * DZ
right = np.log(RE / rwell) + skin
J = down / (up * right)
# drawdown = p_prod - pwf_producer
drawdown = average_pressure - pwf_producer
qwater = np.abs(-(drawdown * J))
qwater = np.sum(np.reshape(qwater, (-1, N_pr), "C"), axis=0) / nz
# qwater[qwater==0] = 0
# water cut
wct = (qwater / (qwater + qoil)) * np.float32(100)
timz = ((kk + 1) * dt) * MAXZ
# timz = timz.reshape(1,1)
qs = [BHP, qoil, qwater, wct]
# print(qs.shape)
qs = np.asarray(qs)
qs = qs.reshape(1, -1)
Baa.append(qs)
Timz.append(timz)
Baa = np.vstack(Baa)
Timz = np.vstack(Timz)
overr = np.hstack([Timz, Baa])
return overr # np.vstack(B)
# Points generation
def test_points_gen(n_test, nder, interval=(-1.0, 1.0), distrib="random", **kwargs):
return {
"random": lambda n_test, nder: (interval[1] - interval[0])
* np.random.rand(n_test, nder)
+ interval[0],
"lhs": lambda n_test, nder: (interval[1] - interval[0])
* lhs(nder, samples=n_test, **kwargs)
+ interval[0],
}[distrib.lower()](n_test, nder)
def getoptimumk(X):
distortions = []
Kss = range(1, 10)
for k in Kss:
kmeanModel = MiniBatchKMeans(n_clusters=k).fit(X)
kmeanModel.fit(X)
distortions.append(
sum(np.min(cdist(X, kmeanModel.cluster_centers_, "euclidean"), axis=1))
/ X.shape[0]
)
myarray = np.array(distortions)
knn = KneeLocator(
Kss, myarray, curve="convex", direction="decreasing", interp_method="interp1d"
)
kuse = knn.knee
# Plot the elbow
plt.figure(figsize=(10, 10))
plt.plot(Kss, distortions, "bx-")
plt.xlabel("cluster size")
plt.ylabel("Distortion")
plt.title("optimal n_clusters for machine")
plt.savefig("machine_elbow.png")
plt.clf()
return kuse
class LpLoss(object):
"""
loss function with rel/abs Lp loss
"""
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
# Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h ** (self.d / self.p)) * torch.norm(
x.view(num_examples, -1) - y.view(num_examples, -1), self.p, 1
)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(
x.reshape(num_examples, -1) - y.reshape(num_examples, -1), self.p, 1
)
y_norms = torch.norm(y.reshape(num_examples, -1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms / y_norms)
else:
return torch.sum(diff_norms / y_norms)
return diff_norms / y_norms
def __call__(self, x, y):
return self.rel(x, y)
# def H(y,t0=0):
# '''
# Step fn with step at t0
# '''
# h = np.zeros_like(y)
# args = tuple([slice(0,y.shape[i]) for i in y.ndim])
def smoothn(
y,
nS0=10,
axis=None,
smoothOrder=2.0,
sd=None,
verbose=False,
s0=None,
z0=None,
isrobust=False,
W=None,
s=None,
MaxIter=100,
TolZ=1e-3,
weightstr="bisquare",
):
if type(y) == ma.core.MaskedArray: # masked array
# is_masked = True
mask = y.mask
y = np.array(y)
y[mask] = 0.0
if np.any(W != None):
W = np.array(W)
W[mask] = 0.0
if np.any(sd != None):
W = np.array(1.0 / sd**2)
W[mask] = 0.0
sd = None
y[mask] = np.nan
if np.any(sd != None):
sd_ = np.array(sd)
mask = sd > 0.0
W = np.zeros_like(sd_)
W[mask] = 1.0 / sd_[mask] ** 2
sd = None
if np.any(W != None):
W = W / W.max()
sizy = y.shape
# sort axis
if axis == None:
axis = tuple(np.arange(y.ndim))
noe = y.size # number of elements
if noe < 2:
z = y
exitflag = 0
Wtot = 0
return z, s, exitflag, Wtot
# ---
# Smoothness parameter and weights
# if s != None:
# s = []
if np.all(W == None):
W = np.ones(sizy)
# if z0 == None:
# z0 = y.copy()
# ---
# "Weighting function" criterion
weightstr = weightstr.lower()
# ---
# Weights. Zero weights are assigned to not finite values (Inf or NaN),
# (Inf/NaN values = missing data).
IsFinite = np.array(np.isfinite(y)).astype(bool)
nof = IsFinite.sum() # number of finite elements
W = W * IsFinite
if any(W < 0):
raise RuntimeError("smoothn:NegativeWeights", "Weights must all be >=0")
else:
# W = W/np.max(W)
pass
# ---
# Weighted or missing data?
isweighted = any(W != 1)
# ---
# Robust smoothing?
# isrobust
# ---
# Automatic smoothing?
isauto = not s
# ---
# DCTN and IDCTN are required
try:
from scipy.fftpack.realtransforms import dct, idct
except:
z = y
exitflag = -1
Wtot = 0
return z, s, exitflag, Wtot
## Creation of the Lambda tensor
# ---
# Lambda contains the eingenvalues of the difference matrix used in this
# penalized least squares process.
axis = tuple(np.array(axis).flatten())
d = y.ndim
Lambda = np.zeros(sizy)
for i in axis:
# create a 1 x d array (so e.g. [1,1] for a 2D case
siz0 = np.ones((1, y.ndim))[0].astype(int)
siz0[i] = sizy[i]
# cos(pi*(reshape(1:sizy(i),siz0)-1)/sizy(i)))
# (arange(1,sizy[i]+1).reshape(siz0) - 1.)/sizy[i]
Lambda = Lambda + (
np.cos(np.pi * (np.arange(1, sizy[i] + 1) - 1.0) / sizy[i]).reshape(siz0)
)
# else:
# Lambda = Lambda + siz0
Lambda = -2.0 * (len(axis) - Lambda)
if not isauto:
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
## Upper and lower bound for the smoothness parameter
# The average leverage (h) is by definition in [0 1]. Weak smoothing occurs
# if h is close to 1, while over-smoothing appears when h is near 0. Upper
# and lower bounds for h are given to avoid under- or over-smoothing. See
# equation relating h to the smoothness parameter (Equation #12 in the
# referenced CSDA paper).
N = sum(np.array(sizy) != 1)
# tensor rank of the y-array
hMin = 1e-6
hMax = 0.99
# (h/n)**2 = (1 + a)/( 2 a)
# a = 1/(2 (h/n)**2 -1)
# where a = sqrt(1 + 16 s)
# (a**2 -1)/16
try:
sMinBnd = np.sqrt(
(
((1 + np.sqrt(1 + 8 * hMax ** (2.0 / N))) / 4.0 / hMax ** (2.0 / N))
** 2
- 1
)
/ 16.0
)
sMaxBnd = np.sqrt(
(
((1 + np.sqrt(1 + 8 * hMin ** (2.0 / N))) / 4.0 / hMin ** (2.0 / N))
** 2
- 1
)
/ 16.0
)
except:
sMinBnd = None
sMaxBnd = None
## Initialize before iterating
# ---
Wtot = W
# --- Initial conditions for z
if isweighted:
# --- With weighted/missing data
# An initial guess is provided to ensure faster convergence. For that
# purpose, a nearest neighbor interpolation followed by a coarse
# smoothing are performed.
# ---
if z0 != None: # an initial guess (z0) has been provided
z = z0
else:
z = y # InitialGuess(y,IsFinite);
z[~IsFinite] = 0.0
else:
z = np.zeros(sizy)
# ---
z0 = z
y[~IsFinite] = 0
# arbitrary values for missing y-data
# ---
tol = 1.0
RobustIterativeProcess = True
RobustStep = 1
nit = 0
# --- Error on p. Smoothness parameter s = 10^p
errp = 0.1
# opt = optimset('TolX',errp);
# --- Relaxation factor RF: to speedup convergence
RF = 1 + 0.75 * isweighted
# ??
## Main iterative process
# ---
if isauto:
try:
xpost = np.array([(0.9 * np.log10(sMinBnd) + np.log10(sMaxBnd) * 0.1)])
except:
np.array([100.0])
else:
xpost = np.array([np.log10(s)])
while RobustIterativeProcess:
# --- "amount" of weights (see the function GCVscore)
aow = sum(Wtot) / noe
# 0 < aow <= 1
# ---
while tol > TolZ and nit < MaxIter:
if verbose:
print("tol", tol, "nit", nit)
nit = nit + 1
DCTy = dctND(Wtot * (y - z) + z, f=dct)
if isauto and not np.remainder(np.log2(nit), 1):
# ---
# The generalized cross-validation (GCV) method is used.
# We seek the smoothing parameter s that minimizes the GCV
# score i.e. s = Argmin(GCVscore).
# Because this process is time-consuming, it is performed from
# time to time (when nit is a power of 2)
# ---
# errp in here somewhere
# xpost,f,d = lbfgsb.fmin_l_bfgs_b(gcv,xpost,fprime=None,factr=10.,\
# approx_grad=True,bounds=[(log10(sMinBnd),log10(sMaxBnd))],\
# args=(Lambda,aow,DCTy,IsFinite,Wtot,y,nof,noe))
# if we have no clue what value of s to use, better span the
# possible range to get a reasonable starting point ...
# only need to do it once though. nS0 is teh number of samples used
if not s0:
ss = np.arange(nS0) * (1.0 / (nS0 - 1.0)) * (
np.log10(sMaxBnd) - np.log10(sMinBnd)
) + np.log10(sMinBnd)
g = np.zeros_like(ss)
for i, p in enumerate(ss):
g[i] = gcv(
p,
Lambda,
aow,
DCTy,
IsFinite,
Wtot,
y,
nof,
noe,
smoothOrder,
)
# print 10**p,g[i]
xpost = [ss[g == g.min()]]
# print '==============='
# print nit,tol,g.min(),xpost[0],s
# print '==============='
else:
xpost = [s0]
xpost, f, d = lbfgsb.fmin_l_bfgs_b(
gcv,
xpost,
fprime=None,
factr=1e7,
approx_grad=True,
bounds=[(np.log10(sMinBnd), np.log10(sMaxBnd))],
args=(Lambda, aow, DCTy, IsFinite, Wtot, y, nof, noe, smoothOrder),
)
s = 10 ** xpost[0]
# update the value we use for the initial s estimate
s0 = xpost[0]
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
z = RF * dctND(Gamma * DCTy, f=idct) + (1 - RF) * z
# if no weighted/missing data => tol=0 (no iteration)
tol = isweighted * norm(z0 - z) / norm(z)
z0 = z
# re-initialization
exitflag = nit < MaxIter
if isrobust: # -- Robust Smoothing: iteratively re-weighted process
# --- average leverage
h = np.sqrt(1 + 16.0 * s)
h = np.sqrt(1 + h) / np.sqrt(2) / h
h = h**N
# --- take robust weights into account
Wtot = W * RobustWeights(y - z, IsFinite, h, weightstr)
# --- re-initialize for another iterative weighted process
isweighted = True
tol = 1
nit = 0
# ---
RobustStep = RobustStep + 1
RobustIterativeProcess = RobustStep < 3
# 3 robust steps are enough.
else:
RobustIterativeProcess = False
# stop the whole process
## Warning messages
# ---
if isauto:
if abs(np.log10(s) - np.log10(sMinBnd)) < errp:
warning(
"MATLAB:smoothn:SLowerBound",
[
"s = %.3f " % (s)
+ ": the lower bound for s "
+ "has been reached. Put s as an input variable if required."
],
)
elif abs(np.log10(s) - np.log10(sMaxBnd)) < errp:
warning(
"MATLAB:smoothn:SUpperBound",
[
"s = %.3f " % (s)
+ ": the upper bound for s "
+ "has been reached. Put s as an input variable if required."
],
)
return z, s, exitflag, Wtot
def warning(s1, s2):
print(s1)
print(s2[0])
## GCV score
# ---
# function GCVscore = gcv(p)
def gcv(p, Lambda, aow, DCTy, IsFinite, Wtot, y, nof, noe, smoothOrder):
# Search the smoothing parameter s that minimizes the GCV score
# ---
s = 10**p
Gamma = 1.0 / (1 + (s * abs(Lambda)) ** smoothOrder)
# --- RSS = Residual sum-of-squares
if aow > 0.9: # aow = 1 means that all of the data are equally weighted
# very much faster: does not require any inverse DCT
RSS = norm(DCTy * (Gamma - 1.0)) ** 2
else:
# take account of the weights to calculate RSS:
yhat = dctND(Gamma * DCTy, f=idct)
RSS = norm(np.sqrt(Wtot[IsFinite]) * (y[IsFinite] - yhat[IsFinite])) ** 2
# ---
TrH = sum(Gamma)
GCVscore = RSS / float(nof) / (1.0 - TrH / float(noe)) ** 2
return GCVscore
## Robust weights
# function W = RobustWeights(r,I,h,wstr)
def RobustWeights(r, I, h, wstr):
# weights for robust smoothing.
MAD = np.median(abs(r[I] - np.median(r[I])))
# median absolute deviation
u = abs(r / (1.4826 * MAD) / np.sqrt(1 - h))
# studentized residuals
if wstr == "cauchy":
c = 2.385
W = 1.0 / (1 + (u / c) ** 2)
# Cauchy weights
elif wstr == "talworth":
c = 2.795
W = u < c
# Talworth weights
else:
c = 4.685
W = (1 - (u / c) ** 2) ** 2.0 * ((u / c) < 1)
# bisquare weights
W[np.isnan(W)] = 0
return W
## Initial Guess with weighted/missing data
# function z = InitialGuess(y,I)
def InitialGuess(y, I):
# -- nearest neighbor interpolation (in case of missing values)
if any(~I):
try:
from scipy.ndimage.morphology import distance_transform_edt
# if license('test','image_toolbox')
# [z,L] = bwdist(I);
L = distance_transform_edt(1 - I)
z = y
z[~I] = y[L[~I]]
except:
# If BWDIST does not exist, NaN values are all replaced with the
# same scalar. The initial guess is not optimal and a warning
# message thus appears.
z = y
z[~I] = np.mean(y[I])
else:
z = y
# coarse fast smoothing
z = dctND(z, f=dct)
k = np.array(z.shape)
m = np.ceil(k / 10) + 1
d = []
for i in np.xrange(len(k)):
d.append(np.arange(m[i], k[i]))
d = np.array(d).astype(int)
z[d] = 0.0
z = dctND(z, f=idct)
return z
def dctND(data, f=dct):
nd = len(data.shape)
if nd == 1:
return f(data, norm="ortho", type=2)
elif nd == 2:
return f(f(data, norm="ortho", type=2).T, norm="ortho", type=2).T
elif nd == 3:
return f(
f(f(data, norm="ortho", type=2, axis=0), norm="ortho", type=2, axis=1),
norm="ortho",
type=2,
axis=2,
)
elif nd == 4:
return f(
f(
f(f(data, norm="ortho", type=2, axis=0), norm="ortho", type=2, axis=1),
norm="ortho",
type=2,
axis=2,
),
norm="ortho",
type=2,
axis=3,
)
def peaks(n):
"""
Mimic basic of matlab peaks fn
"""
xp = np.arange(n)
[x, y] = np.meshgrid(xp, xp)
z = np.zeros_like(x).astype(float)
for i in np.xrange(n / 5):
x0 = random() * n
y0 = random() * n
sdx = random() * n / 4.0
sdy = sdx
c = random() * 2 - 1.0
f = np.exp(
-(((x - x0) / sdx) ** 2)
- ((y - y0) / sdy) ** 2
- (((x - x0) / sdx)) * ((y - y0) / sdy) * c
)
# f /= f.sum()
f *= random()
z += f
return z
def RelPerm2(Sa, UW, UO, BW, BO, SWI, SWR, nx, ny, nz):
"""
Computes the relative permeability and its derivative w.r.t saturation S,
based on Brooks and Corey model.
Parameters
----------
Sa : array_like
Saturation value.
UW : float
Water viscosity.
UO : float
Oil viscosity.
BW : float
Water formation volume factor.
BO : float
Oil formation volume factor.
SWI : float
Initial water saturation.
SWR : float
Residual water saturation.
nx, ny, nz : int
The number of grid cells in x, y, and z directions.
Returns
-------
Mw : array_like
Water relative permeability.
Mo : array_like
Oil relative permeability.
dMw : array_like
Water relative permeability derivative w.r.t saturation.
dMo : array_like
Oil relative permeability derivative w.r.t saturation.
"""
S = (Sa - SWI) / (1 - SWI - SWR)
Mw = (S**2) / (UW * BW) # Water mobility
Mo = ((1 - S) ** 2) / (UO * BO) # Oil mobility
dMw = 2 * S / (UW * BW) / (1 - SWI - SWR)
dMo = -2 * (1 - S) / (UO * BO) / (1 - SWI - SWR)
return (
cp.reshape(Mw, (-1, 1), "F"),
cp.reshape(Mo, (-1, 1), "F"),
cp.reshape(dMw, (-1, 1), "F"),
cp.reshape(dMo, (-1, 1), "F"),
)
def calc_mu_g(p):
# Avergae reservoir pressure
mu_g = 3e-10 * p**2 + 1e-6 * p + 0.0133
return mu_g
def calc_rs(p_bub, p):
# p=average reservoir pressure
if p < p_bub:
rs_factor = 1
else:
rs_factor = 0
rs = 178.11**2 / 5.615 * ((p / p_bub) ** 1.3 * rs_factor + (1 - rs_factor))
return rs
def calc_dp(p_bub, p_atm, p):
if p < p_bub:
dp = p_atm - p
else:
dp = p_atm - p_bub
return dp
def calc_bg(p_bub, p_atm, p):
# P is avergae reservoir pressure
b_g = 1 / (cp.exp(1.7e-3 * calc_dp(p_bub, p_atm, p)))
return b_g
def calc_bo(p_bub, p_atm, CFO, p):
# p is average reservoir pressure
if p < p_bub:
b_o = 1 / cp.exp(-8e-5 * (p_atm - p))
else:
b_o = 1 / (cp.exp(-8e-5 * (p_atm - p_bub)) * cp.exp(-CFO * (p - p_bub)))
return b_o
def ProgressBar(Total, Progress, BarLength=20, ProgressIcon="#", BarIcon="-"):
try:
# You can't have a progress bar with zero or negative length.
if BarLength < 1:
BarLength = 20
# Use status variable for going to the next line after progress completion.
Status = ""
# Calcuting progress between 0 and 1 for percentage.
Progress = float(Progress) / float(Total)
# Doing this conditions at final progressing.
if Progress >= 1.0:
Progress = 1
Status = "\r\n" # Going to the next line
# Calculating how many places should be filled
Block = int(round(BarLength * Progress))
# Show this
Bar = "[{}] {:.0f}% {}".format(
ProgressIcon * Block + BarIcon * (BarLength - Block),
round(Progress * 100, 0),
Status,
)
return Bar
except:
return "ERROR"
def ShowBar(Bar):
sys.stdout.write(Bar)
sys.stdout.flush()
def Equivalent_time(tim1, max_t1, tim2, max_t2):
tk2 = tim1 / max_t1
tc2 = np.arange(0.0, 1 + tk2, tk2)
tc2[tc2 >= 1] = 1
tc2 = tc2.reshape(-1, 1) # reference scaled to 1
tc2r = np.arange(0.0, max_t1 + tim1, tim1)
tc2r[tc2r >= max_t1] = max_t1
tc2r = tc2r.reshape(-1, 1) # reference original
func = interpolate.interp1d(tc2r.ravel(), tc2.ravel())
tc2rr = np.arange(0.0, max_t2 + tim2, tim2)
tc2rr[tc2rr >= max_t2] = max_t2
tc2rr = tc2rr.reshape(-1, 1) # reference original
ynew = func(tc2rr.ravel())
return ynew
def No_Sim(
ini,
nx,
ny,
nz,
max_t,
Dx,
Dy,
Dz,
BO,
BW,
CFL,
timmee,
MAXZ,
factorr,
steppi,
LIR,
UIR,
LUB,
HUB,
aay,
bby,
mpor,
hpor,
dt,
IWSw,
PB,
PATM,
CFO,
method,
SWI,
SWR,
UW,
UO,
typee,
step2,
pini_alt,
input_channel,
pena,
):
paramss = ini
Ne = paramss.shape[1]
ct = np.zeros((Ne, input_channel, nx, ny), dtype=np.float32)
kka = np.random.randint(LIR, UIR + 1, (1, Ne))
A = np.zeros((nx, ny, nz))
A1 = np.zeros((nx, ny, nz))
for kk in range(Ne):
ct1 = np.zeros((input_channel, nx, ny), dtype=np.float32)
print(str(kk + 1) + " | " + str(Ne))
# a = np.reshape(paramss[:,kk],(nx,ny,nz),'F')
points = np.reshape(
np.random.randint(1, nx, 16), (-1, 2), "F"
) # 16 is total number of wells
Injcl = points[:4, :]
prodcl = points[4:, :]
inj_rate = kka[:, kk]
at1 = paramss[:, kk]
at1 = rescale_linear_numpy_pytorch(at1, LUB, HUB, aay, bby)
at2 = paramss[:, kk]
at2 = rescale_linear(at2, mpor, hpor)
at1 = np.reshape(at1, (nx, ny, nz), "F")
at2 = np.reshape(at2, (nx, ny, nz), "F")
atemp = np.zeros((nx, ny, nz))
atemp[:, :, 0] = at1[:, :, 0]
if pena == 1:
for jj in range(nz):
for m in range(prodcl.shape[0]):
A[prodcl[m, :][0], prodcl[m, :][1], jj] = -50
for m in range(Injcl.shape[0]):
A[Injcl[m, :][0], Injcl[m, :][1], jj] = inj_rate
for m in range(Injcl.shape[0]):
A1[Injcl[m, :][0], Injcl[m, :][1], jj] = inj_rate
else:
for jj in range(nz):
A[1, 24, jj] = inj_rate
A[3, 3, jj] = inj_rate
A[31, 1, jj] = inj_rate
A[31, 31, jj] = inj_rate
A[7, 9, jj] = -50
A[14, 12, jj] = -50
A[28, 19, jj] = -50
A[14, 27, jj] = -50
A1[1, 24, jj] = inj_rate
A1[3, 3, jj] = inj_rate
A1[31, 1, jj] = inj_rate
A1[31, 31, jj] = inj_rate
quse1 = A
ct1[0, :, :] = at1[:, :, 0] # permeability
ct1[1, :, :] = quse1[:, :, 0] / UIR # Overall f
ct1[2, :, :] = A1[:, :, 0] / UIR # f for water injection
ct1[3, :, :] = at2[:, :, 0] # porosity
ct1[4, :, :] = dt * np.ones((nx, ny))
ct1[5, :, :] = np.ones((nx, ny)) # Initial pressure
ct1[6, :, :] = IWSw * np.ones((nx, ny)) # Initial water saturation
ct[kk, :, :, :] = ct1
return ct
def compute_f(
pressure, kuse, krouse, krwuse, rwell1, skin, pwf_producer1, UO, BO, DX, UW, BW, DZ
):
RE = 0.2 * cp.asarray(DX)
up = UO * BO
# facc = tf.constant(10,dtype = tf.float64)
DZ = cp.asarray(DZ)
down = 2.0 * cp.pi * kuse * krouse * DZ
# down = piit * pii * krouse * DZ1
right = cp.log(RE / cp.asarray(rwell1)) + cp.asarray(skin)
J = down / (up * right)
drawdown = pressure - cp.asarray(pwf_producer1)
qoil = -((drawdown) * J)
aa = qoil * 1e-5
# aa[aa<=0] = 0
# print(aa)
# water production
up2 = UW * BW
down = 2.0 * cp.pi * kuse * krwuse * DZ
J = down / (up2 * right)
drawdown = pressure - cp.asarray(pwf_producer1)
qwater = -((drawdown) * J)
aaw = qwater * 1e-5
# aaw = (qwater)
# aaw[aaw<=0] = 0
# print(qwater)
ouut = aa + aaw
return -(ouut) # outnew
def rescale_linear(array, new_min, new_max):
"""Rescale an arrary linearly."""
minimum, maximum = np.min(array), np.max(array)
m = (new_max - new_min) / (maximum - minimum)
b = new_min - m * minimum
return m * array + b
def rescale_linear_numpy_pytorch(array, new_min, new_max, minimum, maximum):
"""Rescale an arrary linearly."""
m = (new_max - new_min) / (maximum - minimum)
b = new_min - m * minimum
return m * array + b
def rescale_linear_pytorch_numpy(array, new_min, new_max, minimum, maximum):
"""Rescale an arrary linearly."""
m = (maximum - minimum) / (new_max - new_min)
b = minimum - m * new_min
return m * array + b
def Add_marker2(plt, XX, YY, injectors, producers):
"""
Function to add marker to given coordinates on a matplotlib plot
less
Copy code
Parameters:
plt: a matplotlib.pyplot object to add the markers to
XX: a numpy array of X coordinates
YY: a numpy array of Y coordinates
locc: a numpy array of locations where markers need to be added
Return:
None
"""
n_inj = len(injectors) # Number of injectors
n_prod = len(producers) # Number of producers
for mm in range(n_inj):
usethis = injectors[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=200,
marker="v",
color="white",
)
plt.text(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
discrip,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
for mm in range(n_prod):
usethis = producers[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
plt.scatter(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
s=200,
marker="^",
color="white",
)
plt.text(
XX.T[xloc - 1, yloc - 1] + 0.5,
YY.T[xloc - 1, yloc - 1] + 0.5,
discrip,
color="black",
weight="bold",
horizontalalignment="center",
verticalalignment="center",
fontsize=12,
)
def Plot_2D(XX, YY, plt, nx, ny, nz, Truee, N_injw, N_pr, varii, injectors, producers):
Pressz = np.reshape(Truee, (nx, ny, nz), "F")
maxii = max(Pressz.ravel())
minii = min(Pressz.ravel())
avg_2d = np.mean(Pressz, axis=2)
avg_2d[avg_2d == 0] = np.nan # Convert zeros to NaNs
# XX, YY = np.meshgrid(np.arange(nx),np.arange(ny))
# plt.subplot(224)
plt.pcolormesh(XX.T, YY.T, avg_2d, cmap="jet")
cbar = plt.colorbar()
if varii == "perm":
cbar.set_label("Log K(mD)", fontsize=11)
plt.title("Permeability Field with well locations", fontsize=11, weight="bold")
elif varii == "water Modulus":
cbar.set_label("water saturation", fontsize=11)
plt.title("water saturation -Modulus", fontsize=11, weight="bold")
elif varii == "water Numerical":
cbar.set_label("water saturation", fontsize=11)
plt.title("water saturation - Numerical", fontsize=11, weight="bold")
elif varii == "water diff":
cbar.set_label("unit", fontsize=11)
plt.title(
"water saturation - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold"
)
elif varii == "oil Modulus":
cbar.set_label("Oil saturation", fontsize=11)
plt.title("Oil saturation -Modulus", fontsize=11, weight="bold")
elif varii == "oil Numerical":
cbar.set_label("Oil saturation", fontsize=11)
plt.title("Oil saturation - Numerical", fontsize=11, weight="bold")
elif varii == "oil diff":
cbar.set_label("unit", fontsize=11)
plt.title(
"oil saturation - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold"
)
elif varii == "pressure Modulus":
cbar.set_label("pressure(psia)", fontsize=11)
plt.title("Pressure -Modulus", fontsize=11, weight="bold")
elif varii == "pressure Numerical":
cbar.set_label("pressure(psia)", fontsize=11)
plt.title("Pressure -Numerical", fontsize=11, weight="bold")
elif varii == "pressure diff":
cbar.set_label("unit", fontsize=11)
plt.title("Pressure - (Numerical(GPU) -Modulus)", fontsize=11, weight="bold")
elif varii == "porosity":
cbar.set_label("porosity", fontsize=11)
plt.title("Porosity Field", fontsize=11, weight="bold")
cbar.mappable.set_clim(minii, maxii)
plt.ylabel("Y", fontsize=11)
plt.xlabel("X", fontsize=11)
plt.axis([0, (nx - 1), 0, (ny - 1)])
plt.gca().set_xticks([])
plt.gca().set_yticks([])
Add_marker2(plt, XX, YY, injectors, producers)
def Plot_Modulus(ax, nx, ny, nz, Truee, N_injw, N_pr, varii, injectors, producers):
# matplotlib.use('Agg')
Pressz = np.reshape(Truee, (nx, ny, nz), "F")
avg_2d = np.mean(Pressz, axis=2)
avg_2d[avg_2d == 0] = np.nan # Convert zeros to NaNs
maxii = max(Pressz.ravel())
minii = min(Pressz.ravel())
Pressz = Pressz / maxii
masked_Pressz = Pressz
colors = plt.cm.jet(masked_Pressz)
# colors[np.isnan(Pressz), :3] = 1 # set color to white for NaN values
# alpha = np.where(np.isnan(Pressz), 0.0, 0.8) # set alpha to 0 for NaN values
norm = mpl.colors.Normalize(vmin=minii, vmax=maxii)
arr_3d = Pressz
# fig = plt.figure(figsize=(20, 20), dpi = 200)
# ax = fig.add_subplot(221, projection='3d')
# Shift the coordinates to center the points at the voxel locations
x, y, z = np.indices((arr_3d.shape))
x = x + 0.5
y = y + 0.5
z = z + 0.5
# Set the colors of each voxel using a jet colormap
# colors = plt.cm.jet(arr_3d)
# norm = matplotlib.colors.Normalize(vmin=minii, vmax=maxii)
# Plot each voxel and save the mappable object
ax.voxels(arr_3d, facecolors=colors, alpha=0.5, edgecolor="none", shade=True)
m = cm.ScalarMappable(cmap=plt.cm.jet, norm=norm)
m.set_array([])
# Add a colorbar for the mappable object
# plt.colorbar(mappable)
# Set the axis labels and title
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.set_zlabel("Z axis")
# ax.set_title(titti,fontsize= 14)
# Set axis limits to reflect the extent of each axis of the matrix
ax.set_xlim(0, arr_3d.shape[0])
ax.set_ylim(0, arr_3d.shape[1])
ax.set_zlim(0, arr_3d.shape[2])
# ax.set_zlim(0, 60)
# Remove the grid
ax.grid(False)
# Set lighting to bright
# ax.set_facecolor('white')
# Set the aspect ratio of the plot
ax.set_box_aspect([nx, ny, nz])
# Set the projection type to orthogonal
ax.set_proj_type("ortho")
# Remove the tick labels on each axis
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Remove the tick lines on each axis
ax.xaxis._axinfo["tick"]["inward_factor"] = 0
ax.xaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.yaxis._axinfo["tick"]["inward_factor"] = 0
ax.yaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.zaxis._axinfo["tick"]["inward_factor"] = 0
ax.zaxis._axinfo["tick"]["outward_factor"] = 0.4
# Set the azimuth and elevation to make the plot brighter
ax.view_init(elev=30, azim=45)
n_inj = N_injw # Number of injectors
n_prod = N_pr # Number of producers
for mm in range(n_inj):
usethis = injectors[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
# Define the direction of the line
line_dir = (0, 0, (nz * 2) + 7)
# Define the coordinates of the line end
x_line_end = xloc + line_dir[0]
y_line_end = yloc + line_dir[1]
z_line_end = 0 + line_dir[2]
ax.plot([xloc, xloc], [yloc, yloc], [0, (nz * 2) + 7], "blue", linewidth=1)
ax.text(
x_line_end,
y_line_end,
z_line_end,
discrip,
color="blue",
weight="bold",
fontsize=5,
)
for mm in range(n_prod):
usethis = producers[mm]
xloc = int(usethis[0])
yloc = int(usethis[1])
discrip = str(usethis[-1])
# Define the direction of the line
line_dir = (0, 0, (nz * 2) + 5)
# Define the coordinates of the line end
x_line_end = xloc + line_dir[0]
y_line_end = yloc + line_dir[1]
z_line_end = 0 + line_dir[2]
ax.plot([xloc, xloc], [yloc, yloc], [0, (nz * 2) + 5], "r", linewidth=1)
ax.text(
x_line_end,
y_line_end,
z_line_end,
discrip,
color="g",
weight="bold",
fontsize=5,
)
blue_line = mlines.Line2D([], [], color="blue", linewidth=2, label="water injector")
green_line = mlines.Line2D([], [], color="red", linewidth=2, label="oil Producer")
# Add the legend to the plot
ax.legend(handles=[blue_line, green_line], loc="lower left", fontsize=9)
# Add a horizontal colorbar to the plot
cbar = plt.colorbar(m, orientation="horizontal", shrink=0.5)
if varii == "perm":
cbar.set_label("Log K(mD)", fontsize=12)
ax.set_title(
"Permeability Field with well locations", fontsize=12, weight="bold"
)
elif varii == "water Modulus":
cbar.set_label("water saturation", fontsize=12)
ax.set_title("water saturation -Modulus", fontsize=12, weight="bold")
elif varii == "water Numerical":
cbar.set_label("water saturation", fontsize=12)
ax.set_title("water saturation - Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "water diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"water saturation - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "oil Modulus":
cbar.set_label("Oil saturation", fontsize=12)
ax.set_title("Oil saturation -Modulus", fontsize=12, weight="bold")
elif varii == "oil Numerical":
cbar.set_label("Oil saturation", fontsize=12)
ax.set_title("Oil saturation - Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "oil diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"oil saturation - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "pressure Modulus":
cbar.set_label("pressure", fontsize=12)
ax.set_title("Pressure -Modulus", fontsize=12, weight="bold")
elif varii == "pressure Numerical":
cbar.set_label("pressure", fontsize=12)
ax.set_title("Pressure -Numerical(GPU)", fontsize=12, weight="bold")
elif varii == "pressure diff":
cbar.set_label("unit", fontsize=12)
ax.set_title(
"Pressure - (Numerical(GPU) -Modulus))", fontsize=12, weight="bold"
)
elif varii == "porosity":
cbar.set_label("porosity", fontsize=12)
ax.set_title("Porosity Field", fontsize=12, weight="bold")
cbar.mappable.set_clim(minii, maxii)
def plot3d2(arr_3d, nx, ny, nz, itt, dt, MAXZ, namet, titti, maxii, minii):
"""
Plot a 3D array with matplotlib and annotate specific points on the plot.
Args:
arr_3d (np.ndarray): 3D array to plot.
nx (int): number of cells in the x direction.
ny (int): number of cells in the y direction.
nz (int): number of cells in the z direction.
itt (int): current iteration number.
dt (float): time step.
MAXZ (int): maximum number of iterations in the z direction.
namet (str): name of the file to save the plot.
titti (str): title of the plot.
maxii (float): maximum value of the colorbar.
minii (float): minimum value of the colorbar.
Returns:
None.
"""
fig = plt.figure(figsize=(12, 12), dpi=100)
ax = fig.add_subplot(111, projection="3d")
# Shift the coordinates to center the points at the voxel locations
x, y, z = np.indices((arr_3d.shape))
x = x + 0.5
y = y + 0.5
z = z + 0.5
# Set the colors of each voxel using a jet colormap
colors = plt.cm.jet(arr_3d)
norm = matplotlib.colors.Normalize(vmin=minii, vmax=maxii)
# Plot each voxel and save the mappable object
ax.voxels(arr_3d, facecolors=colors, alpha=0.5, edgecolor="none", shade=True)
m = cm.ScalarMappable(cmap=plt.cm.jet, norm=norm)
m.set_array([])
if titti == "Pressure":
plt.colorbar(m, fraction=0.02, pad=0.1, label="Pressure [psia]")
elif titti == "water_sat":
plt.colorbar(m, fraction=0.02, pad=0.1, label="water_sat [units]")
else:
plt.colorbar(m, fraction=0.02, pad=0.1, label="oil_sat [units]")
# Add a colorbar for the mappable object
# plt.colorbar(mappable)
# Set the axis labels and title
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.set_zlabel("Z axis")
ax.set_title(titti, fontsize=14)
# Set axis limits to reflect the extent of each axis of the matrix
ax.set_xlim(0, arr_3d.shape[0])
ax.set_ylim(0, arr_3d.shape[1])
ax.set_zlim(0, arr_3d.shape[2])
# Remove the grid
ax.grid(False)
# Set lighting to bright
ax.set_facecolor("white")
# Set the aspect ratio of the plot
ax.set_box_aspect([nx, ny, 2])
# Set the projection type to orthogonal
ax.set_proj_type("ortho")
# Remove the tick labels on each axis
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
# Remove the tick lines on each axis
ax.xaxis._axinfo["tick"]["inward_factor"] = 0
ax.xaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.yaxis._axinfo["tick"]["inward_factor"] = 0
ax.yaxis._axinfo["tick"]["outward_factor"] = 0.4
ax.zaxis._axinfo["tick"]["inward_factor"] = 0
ax.zaxis._axinfo["tick"]["outward_factor"] = 0.4
# Set the azimuth and elevation to make the plot brighter
ax.view_init(elev=30, azim=45)
# Define the coordinates of the voxel
voxel_loc = (1, 24, 0)
# Define the direction of the line
line_dir = (0, 0, 5)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([1, 1], [24, 24], [0, 5], "black", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "I1", color="black", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (1, 1, 0)
# Define the direction of the line
line_dir = (0, 0, 10)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([1, 1], [1, 1], [0, 10], "black", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "I2", color="black", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (31, 1, 0)
# Define the direction of the line
line_dir = (0, 0, 7)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([31, 31], [1, 1], [0, 7], "black", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "I3", color="black", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (31, 31, 0)
# Define the direction of the line
line_dir = (0, 0, 8)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([31, 31], [31, 31], [0, 8], "black", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "I4", color="black", fontsize=20)
# Define the coordinates of the voxel
voxel_loc = (7, 9, 0)
# Define the direction of the line
line_dir = (0, 0, 8)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([7, 7], [9, 9], [0, 8], "r", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "P1", color="r", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (14, 12, 0)
# Define the direction of the line
line_dir = (0, 0, 10)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([14, 14], [12, 12], [0, 10], "r", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "P2", color="r", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (28, 19, 0)
# Define the direction of the line
line_dir = (0, 0, 15)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([28, 28], [19, 19], [0, 15], "r", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "P3", color="r", fontsize=16)
# Define the coordinates of the voxel
voxel_loc = (14, 27, 0)
# Define the direction of the line
line_dir = (0, 0, 15)
# Define the coordinates of the line end
x_line_end = voxel_loc[0] + line_dir[0]
y_line_end = voxel_loc[1] + line_dir[1]
z_line_end = voxel_loc[2] + line_dir[2]
ax.plot([14, 14], [27, 27], [0, 15], "r", linewidth=2)
ax.text(x_line_end, y_line_end, z_line_end, "P4", color="r", fontsize=16)
# plt.show()
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * dt * MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
name = namet + str(int(itt)) + ".png"
plt.savefig(name)
# plt.show()
plt.clf()
def Plot_Models(True_mat):
colors = ["r", "b", "g", "k", "#9467bd"]
linestyles = ["-", "--", ":", "-.", "-", "--", ":"]
markers = ["o", "s", "v", "*", "X"]
timezz = True_mat[0][:, 0].reshape(-1, 1)
plt.figure(figsize=(40, 40))
plt.subplot(4, 4, 1)
plt.plot(
timezz,
True_mat[0][:, 1],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 1],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 1],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 1],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 1],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 2)
plt.plot(
timezz,
True_mat[0][:, 2],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 2],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 2],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 2],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 2],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", fontsize=13)
# plt.ylim((0,25000))
plt.title("I2", fontsize=13)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 3)
plt.plot(
timezz,
True_mat[0][:, 3],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 3],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 3],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 3],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 3],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 4)
plt.plot(
timezz,
True_mat[0][:, 4],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 4],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 4],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 4],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 4],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("BHP(Psia)", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("I4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 5)
plt.plot(
timezz,
True_mat[0][:, 5],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 5],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 5],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 5],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 5],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 6)
plt.plot(
timezz,
True_mat[0][:, 6],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 6],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 6],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 6],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 6],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 7)
plt.plot(
timezz,
True_mat[0][:, 7],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 7],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 7],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 7],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 7],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 8)
plt.plot(
timezz,
True_mat[0][:, 8],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 8],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 8],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 8],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 8],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{oil}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 9)
plt.plot(
timezz,
True_mat[0][:, 9],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 9],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 9],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 9],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 9],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 10)
plt.plot(
timezz,
True_mat[0][:, 10],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 10],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 10],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 10],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 10],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 11)
plt.plot(
timezz,
True_mat[0][:, 11],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 11],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 11],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 11],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 11],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P3", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 12)
plt.plot(
timezz,
True_mat[0][:, 12],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 12],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 12],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 12],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 12],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$Q_{water}(bbl/day)$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P4", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 13)
plt.plot(
timezz,
True_mat[0][:, 13],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 13],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 13],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 13],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 13],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 14)
plt.plot(
timezz,
True_mat[0][:, 14],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 14],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 14],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 14],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 14],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P2", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 15)
plt.plot(
timezz,
True_mat[0][:, 15],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 15],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 15],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 15],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 15],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
plt.subplot(4, 4, 16)
plt.plot(
timezz,
True_mat[0][:, 16],
linestyle=linestyles[0],
marker=markers[0],
markersize=1,
color=colors[0],
lw="2",
label="Numerical Model",
)
plt.plot(
timezz,
True_mat[1][:, 16],
linestyle=linestyles[1],
marker=markers[1],
markersize=1,
color=colors[1],
lw="2",
label="FNO",
)
plt.plot(
timezz,
True_mat[2][:, 16],
linestyle=linestyles[2],
marker=markers[2],
markersize=1,
color=colors[2],
lw="2",
label="PINO",
)
plt.plot(
timezz,
True_mat[3][:, 16],
linestyle=linestyles[3],
marker=markers[3],
markersize=1,
color=colors[3],
lw="2",
label="AFNOP",
)
plt.plot(
timezz,
True_mat[4][:, 16],
linestyle=linestyles[4],
marker=markers[4],
markersize=1,
color=colors[4],
lw="2",
label="AFNOD",
)
plt.xlabel("Time (days)", weight="bold", fontsize=14)
plt.ylabel("$WWCT{%}$", weight="bold", fontsize=14)
# plt.ylim((0,25000))
plt.title("P1", weight="bold", fontsize=14)
plt.ylim(ymin=0)
plt.xlim(xmin=0)
# plt.legend()
legend = plt.legend(fontsize="large", title_fontsize="large")
for text in legend.get_texts():
text.set_weight("bold")
# os.chdir('RESULTS')
plt.savefig(
"Compare_models.png"
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def Plot_bar(True_mat):
a1 = rmsee(True_mat[1][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
a2 = rmsee(True_mat[2][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
a3 = rmsee(True_mat[3][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
a4 = rmsee(True_mat[4][:, 1:].ravel(), True_mat[0][:, 1:].ravel())
models = ["FNO", "PINO", "AFNOP", "AFNOD"]
rmse_values = [a1, a2, a3, a4]
colors = ["red", "blue", "green", "purple"]
# Create a bar chart
plt.figure(figsize=(10, 10))
plt.bar(models, rmse_values, color=colors)
# Add a title and labels
plt.title("RMSE accuracy", weight="bold", fontsize=16)
# Add x and y labels with bold and bigger font
plt.xlabel("Surrogate Models", weight="bold", fontsize=14)
plt.ylabel("RMSE", weight="bold", fontsize=14)
plt.savefig(
"Bar_chat.png"
) # save as png # preventing the figures from showing
# os.chdir(oldfolder)
plt.clf()
plt.close()
def rmsee(predictions, targets):
noww = predictions.reshape(-1, 1)
measurment = targets.reshape(-1, 1)
rmse_val = (np.sum(((noww - measurment) ** 2))) ** (0.5) / (measurment.shape[0])
# the square root of the mean of the squared differences
return rmse_val
| modulus-sym-main | examples/reservoir_simulation/2D/src/NVRS.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.