python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
def dx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute first order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
-0.5,
0.0,
0.5,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
-1.0 / 60.0,
3.0 / 20.0,
-3.0 / 4.0,
0.0,
3.0 / 4.0,
-3.0 / 20.0,
1.0 / 60.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
def ddx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute second order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
1.0,
-2.0,
1.0,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
1.0 / 90.0,
-3.0 / 20.0,
3.0 / 2.0,
-49.0 / 18.0,
3.0 / 2.0,
-3.0 / 20.0,
1.0 / 90.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx**2) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
| modulus-sym-main | examples/reservoir_simulation/2D/src/ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from NVRS import *
from modulus.sym.models.fno import *
import shutil
import pandas as pd
import scipy.io as sio
import torch
import yaml
from multiprocessing import Lock, Value
from PIL import Image
import requests
import concurrent.futures
def read_yaml(fname):
"""Read Yaml file into a dict of parameters"""
print(f"Read simulation plan from {fname}...")
with open(fname, "r") as stream:
try:
data = yaml.safe_load(stream)
# print(data)
except yaml.YAMLError as exc:
print(exc)
return data
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
def process_chunk(chunk):
chunk_results = []
for kk in chunk:
result = process_step(kk)
chunk_results.append(result)
with lock:
Runs = len(chunks)
processed_chunks.value += 1
completion_percentage = (processed_chunks.value / len(chunks)) * 100
remaining_percentage = 100 - completion_percentage
# print(f"Processed chunk {processed_chunks.value}. {completion_percentage:.2f}% completed. {remaining_percentage:.2f}% remaining.")
progressBar = "\rPlotting Progress: " + ProgressBar(
Runs, processed_chunks.value, Runs
)
ShowBar(progressBar)
# time.sleep(1)
return chunk_results
def sort_key(s):
"""Extract the number from the filename for sorting."""
return int(re.search(r"\d+", s).group())
def process_step(kk):
f_3 = plt.figure(figsize=(20, 20), dpi=200)
current_time = int((kk + 1) * dt * MAXZ)
f_3 = plt.figure(figsize=(20, 20), dpi=200)
look = ouut_p[0, kk, :, :]
look = look * pini_alt
lookf = cPress[0, kk, :, :]
lookf = lookf * pini_alt
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(331, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "pressure Modulus", injectors, producers
)
ax2 = f_3.add_subplot(332, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "pressure Numerical", injectors, producers
)
ax3 = f_3.add_subplot(333, projection="3d")
Plot_Modulus(
ax3, nx, ny, nz, diff1, N_injw, N_pr, "pressure diff", injectors, producers
)
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
look = ouut_s[0, kk, :, :]
lookf = cSat[0, kk, :, :]
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(334, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "water Modulus", injectors, producers
)
ax2 = f_3.add_subplot(335, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "water Numerical", injectors, producers
)
ax3 = f_3.add_subplot(336, projection="3d")
Plot_Modulus(
ax3, nx, ny, nz, diff1, N_injw, N_pr, "water diff", injectors, producers
)
R2w, L2w = compute_metrics(look.ravel(), lookf.ravel())
look = 1 - ouut_s[0, kk, :, :]
lookf = 1 - cSat[0, kk, :, :]
diff1 = abs(look - lookf)
ax1 = f_3.add_subplot(337, projection="3d")
Plot_Modulus(
ax1, nx, ny, nz, look, N_injw, N_pr, "oil Modulus", injectors, producers
)
ax2 = f_3.add_subplot(338, projection="3d")
Plot_Modulus(
ax2, nx, ny, nz, lookf, N_injw, N_pr, "oil Numerical", injectors, producers
)
ax3 = f_3.add_subplot(339, projection="3d")
Plot_Modulus(ax3, nx, ny, nz, diff1, N_injw, N_pr, "oil diff", injectors, producers)
R2o, L2o = compute_metrics(look.ravel(), lookf.ravel())
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(current_time) + " days"
plt.suptitle(tita, fontsize=16)
plt.savefig("Dynamic" + str(int(kk)))
plt.clf()
plt.close()
return current_time, (R2p, L2p), (R2w, L2w), (R2o, L2o)
oldfolder = os.getcwd()
os.chdir(oldfolder)
surrogate = None
while True:
surrogate = int(
input(
"Select surrogate method type:\n1=FNO [Modulus Implementation]\n\
2=PINO [Modulus Implementation]\n3\3=AFNO (data driven)\n4=AFNO ( data + physics driven)\n"
)
)
if (surrogate > 4) or (surrogate < 1):
# raise SyntaxError('please select value between 1-2')
print("")
print("please try again and select value between 1-4")
else:
break
if not os.path.exists("../COMPARE_RESULTS"):
os.makedirs("../COMPARE_RESULTS")
if surrogate == 1:
folderr = "../COMPARE_RESULTS/FNO"
if not os.path.exists("../COMPARE_RESULTS/FNO"):
os.makedirs("../COMPARE_RESULTS/FNO")
else:
shutil.rmtree("../COMPARE_RESULTS/FNO")
os.makedirs("../COMPARE_RESULTS/FNO")
elif surrogate == 2:
folderr = "../COMPARE_RESULTS/PINO"
if not os.path.exists("../COMPARE_RESULTS/PINO"):
os.makedirs("../COMPARE_RESULTS/PINO")
else:
shutil.rmtree("../COMPARE_RESULTS/PINO")
os.makedirs("../COMPARE_RESULTS/PINO")
elif surrogate == 3:
folderr = "../COMPARE_RESULTS/AFNOD"
if not os.path.exists("../COMPARE_RESULTS/AFNOD"):
os.makedirs("../COMPARE_RESULTS/AFNOD")
else:
shutil.rmtree("../COMPARE_RESULTS/AFNOD")
os.makedirs("../COMPARE_RESULTS/AFNOD")
else:
folderr = "../COMPARE_RESULTS/AFNOP"
if not os.path.exists("../COMPARE_RESULTS/AFNOP"):
os.makedirs("../COMPARE_RESULTS/AFNOP")
else:
shutil.rmtree("../COMPARE_RESULTS/AFNOP")
os.makedirs("../COMPARE_RESULTS/AFNOP")
if not os.path.exists("../PACKETS"):
os.makedirs("../PACKETS")
else:
pass
plan = read_yaml("conf/config_FNO.yaml")
injectors = plan["custom"]["WELLSPECS"]["water_injector_wells"]
producers = plan["custom"]["WELLSPECS"]["producer_wells"]
N_injw = len(
plan["custom"]["WELLSPECS"]["water_injector_wells"]
) # Number of water injectors
N_pr = len(plan["custom"]["WELLSPECS"]["producer_wells"]) # Number of producers
# Varaibles needed for NVRS
nx = plan["custom"]["NVRS"]["nx"]
ny = plan["custom"]["NVRS"]["ny"]
nz = plan["custom"]["NVRS"]["nz"]
BO = plan["custom"]["NVRS"]["BO"] # oil formation volume factor
BW = plan["custom"]["NVRS"]["BW"] # Water formation volume factor
UW = plan["custom"]["NVRS"]["UW"] # water viscosity in cP
UO = plan["custom"]["NVRS"]["UO"] # oil viscosity in cP
DX = plan["custom"]["NVRS"]["DX"] # size of pixel in x direction
DY = plan["custom"]["NVRS"]["DY"] # sixze of pixel in y direction
DZ = plan["custom"]["NVRS"]["DZ"] # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(plan["custom"]["NVRS"]["SWI"])
SWR = cp.float32(plan["custom"]["NVRS"]["SWR"])
CFO = cp.float32(plan["custom"]["NVRS"]["CFO"]) # oil compressibility in 1/psi
IWSw = plan["custom"]["NVRS"]["IWSw"] # initial water saturation
pini_alt = plan["custom"]["NVRS"]["pini_alt"]
# print(pini_alt)
P1 = cp.float32(pini_alt) # Bubble point pressure psia
PB = P1
mpor, hpor = (
plan["custom"]["NVRS"]["mpor"],
plan["custom"]["NVRS"]["hpor"],
) # minimum and maximum porosity
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
PATM = cp.float32(plan["custom"]["NVRS"]["PATM"]) # Atmospheric pressure in psi
# training
LUB, HUB = (
plan["custom"]["NVRS"]["LUB"],
plan["custom"]["NVRS"]["HUB"],
) # Permeability rescale
aay, bby = (
plan["custom"]["NVRS"]["aay"],
plan["custom"]["NVRS"]["bby"],
) # Permeability range mD
Low_K, High_K = aay, bby
batch_size = plan["custom"]["NVRS"][
"batch_size"
] #'size of simulated labelled dtaa to run'
timmee = plan["custom"]["NVRS"][
"timmee"
] # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = plan["custom"]["NVRS"][
"max_t"
] # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = plan["custom"]["NVRS"]["MAXZ"] # reference maximum time in days of simulation
steppi = int(max_t / timmee)
choice = 1 # 1= Non-Gaussian prior, 2 = Gaussian prior
factorr = 0.1 # from [0 1] excluding the limits for PermZ
LIR = plan["custom"]["NVRS"]["LIR"] # lower injection rate
UIR = plan["custom"]["NVRS"]["UIR"] # uppwer injection rate
RE = 0.2 * DX
rwell = 200 # well radius
skin = 0 # well deformation
pwf_producer = 100
cuda = 0
input_channel = 7 # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
device = torch.device(f"cuda:{cuda}" if torch.cuda.is_available() else "cpu")
N_inj = 4
N_pr = 4
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
# 4 injector and 4 producer wells
wells = np.array(
[1, 24, 1, 3, 3, 1, 31, 1, 1, 31, 31, 1, 7, 9, 2, 14, 12, 2, 28, 19, 2, 14, 27, 2]
)
wells = np.reshape(wells, (-1, 3), "C")
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1G4Cvg8eIObyBK0eoo7iX-0hhMTnpJktj", to_absolute_path("../PACKETS/Test4.mat")
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data11 = matt["INPUT"]
data_use11 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data11 = matt["INPUT"]
data_use11 = matt["OUTPUT"]
# index = np.random.choice(X_data11.shape[0], 1, \
# replace=False)
index = 253
X_data1 = X_data11[index, :, :, :][None, :, :, :]
data_use1 = data_use11[index, :, :, :][None, :, :, :]
# print(X_data1.shape)
Ne = 1
ini_ensemble1 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble2 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble3 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble4 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble5 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble6 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
ini_ensemble7 = np.zeros((Ne, 1, nx, ny), dtype=np.float32)
cPress = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble1[kk, :, :, :] = permin
perm = X_data1[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble2[kk, :, :, :] = permin
perm = X_data1[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble3[kk, :, :, :] = permin
perm = X_data1[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble4[kk, :, :, :] = permin
perm = X_data1[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble5[kk, :, :, :] = permin
perm = X_data1[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble6[kk, :, :, :] = permin
perm = X_data1[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
ini_ensemble7[kk, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use1[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
print("")
print("Finished constructing Pytorch inputs")
print("*******************Load the trained Forward models*******************")
if (surrogate == 1) or (surrogate == 2):
decoder1 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("pressure", size=steppi)]
)
modelP = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder1,
)
decoder2 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("water_sat", size=steppi)]
)
modelS = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder2,
)
if (surrogate == 3) or (surrogate == 4):
modelP = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("pressure", size=steppi)],
(nx, ny),
patch_size=3,
)
# Define AFNO model for forward model (saturation)
modelS = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("water_sat", size=steppi)],
(nx, ny),
patch_size=3,
)
if surrogate == 1:
print("-----------------Surrogate Model learned with FNO----------------")
if not os.path.exists(("outputs/Forward_problem_FNO/ResSim/")):
os.makedirs(("outputs/Forward_problem_FNO/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"14njayJd77pKUeuCG9Jno03MbS8SaAVbn",
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelP.load_state_dict(torch.load("fno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelP.load_state_dict(torch.load("fno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1fI1whx3CJSsICcvMddF2uF1hlMekWUrF",
"outputs/Forward_problem_FNO/ResSim/fno_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelS.load_state_dict(torch.load("fno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_FNO/ResSim")
print(" Surrogate model learned with FNO")
modelS.load_state_dict(torch.load("fno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
elif surrogate == 2:
print("-----------------Surrogate Model learned with PINO----------------")
if not os.path.exists(("outputs/Forward_problem_PINO/ResSim/")):
os.makedirs(("outputs/Forward_problem_PINO/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1lmtvM8LucUU3DqSgUe7hYyYZvYS4yLnf",
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelP.load_state_dict(torch.load("pino_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelP.load_state_dict(torch.load("pino_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"10n0NgdgskQbNyTtZNG53IhttB8WCpzli",
"outputs/Forward_problem_PINO/ResSim/pino_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelS.load_state_dict(torch.load("pino_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_PINO/ResSim")
print(" Surrogate model learned with PINO")
modelS.load_state_dict(torch.load("pino_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
elif surrogate == 3:
print(
"-----------------Surrogate Model learned with AFNO (data driven)----------------"
)
if not os.path.exists(("outputs/Forward_problem_AFNOD/ResSim/")):
os.makedirs(("outputs/Forward_problem_AFNOD/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_AFNOD/ResSim/afno_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1JGV9vNYIRUWS1Gd6bqDcG_jRbO8wEkzX",
"outputs/Forward_problem_AFNOD/ResSim/afno_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_AFNOD/ResSim")
print(" Surrogate model learned with AFNO (data driven)")
modelP.load_state_dict(torch.load("afno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_AFNOD/ResSim")
print(" Surrogate model learned with AFNO (data-driven)")
modelP.load_state_dict(torch.load("afno_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_AFNOD/ResSim/afno_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1V_TKAqWlw3_kKaE1vnWGwWB7vlmk7t3s",
"outputs/Forward_problem_AFNOD/ResSim/afno_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_AFNOD/ResSim")
print(" Surrogate model learned with AFNO (data-driven)")
modelS.load_state_dict(torch.load("afno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_AFNOD/ResSim")
print(" Surrogate model learned with AFNO (data-driven)")
modelS.load_state_dict(torch.load("afno_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
elif surrogate == 4:
print(
"-----------------Surrogate Model learned with AFNO (physics driven)----------------"
)
if not os.path.exists(("outputs/Forward_problem_AFNOP/ResSim/")):
os.makedirs(("outputs/Forward_problem_AFNOP/ResSim/"))
else:
pass
bb = os.path.isfile(
"outputs/Forward_problem_AFNOP/ResSim/afnop_forward_model_pressure.0.pth"
)
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"18BrbHfgM3r8OWmpuMhZi9y9geUyAI8t4",
"outputs/Forward_problem_AFNOP/ResSim/afnop_forward_model_pressure.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_AFNOP/ResSim")
print(" Surrogate model learned with AFNO (physics driven)")
modelP.load_state_dict(torch.load("afnop_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_AFNOP/ResSim")
print(" Surrogate model learned with AFNO (physics-driven)")
modelP.load_state_dict(torch.load("afnop_forward_model_pressure.0.pth"))
modelP = modelP.to(device)
modelP.eval()
os.chdir(oldfolder)
bba = os.path.isfile(
"outputs/Forward_problem_AFNOP/ResSim/afnop_forward_model_saturation.0.pth"
)
if bba == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1aSf4QYoU0ITuQdbSNRPWbtkQz7ZQh_Bt",
"outputs/Forward_problem_AFNOP/ResSim/afnop_forward_model_saturation.0.pth",
)
print("...Downlaod completed.......")
os.chdir("outputs/Forward_problem_AFNOP/ResSim")
print(" Surrogate model learned with AFNO (physics-driven)")
modelS.load_state_dict(torch.load("afnop_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
else:
os.chdir("outputs/Forward_problem_AFNOP/ResSim")
print(" Surrogate model learned with AFNO (physics-driven)")
modelS.load_state_dict(torch.load("afnop_forward_model_saturation.0.pth"))
modelS = modelS.to(device)
modelS.eval()
os.chdir(oldfolder)
print("********************Model Loaded*************************************")
inn = {
"perm": torch.from_numpy(ini_ensemble1).to(device, torch.float32),
"Q": torch.from_numpy(ini_ensemble2).to(device, dtype=torch.float32),
"Qw": torch.from_numpy(ini_ensemble3).to(device, dtype=torch.float32),
"Phi": torch.from_numpy(ini_ensemble4).to(device, dtype=torch.float32),
"Time": torch.from_numpy(ini_ensemble5).to(device, dtype=torch.float32),
"Pini": torch.from_numpy(ini_ensemble6).to(device, dtype=torch.float32),
"Swini": torch.from_numpy(ini_ensemble7).to(device, dtype=torch.float32),
}
print("")
print("predicting with surrogate model")
start_time_plots1 = time.time()
ouut_p = modelP(inn)["pressure"].detach().cpu().numpy()
ouut_s = modelS(inn)["water_sat"].detach().cpu().numpy()
elapsed_time_secs = time.time() - start_time_plots1
msg = "Surrogate Reservoir simulation took: %s secs (Wall clock time)" % timedelta(
seconds=round(elapsed_time_secs)
)
print(msg)
print("")
ouut_oil = np.ones_like(ouut_s) - ouut_s
print("")
print("Plotting outputs")
os.chdir(folderr)
Runs = steppi
ty = np.arange(1, Runs + 1)
Time_vector = np.zeros((steppi))
Accuracy_presure = np.zeros((steppi, 2))
Accuracy_oil = np.zeros((steppi, 2))
Accuracy_water = np.zeros((steppi, 2))
lock = Lock()
processed_chunks = Value("i", 0)
NUM_CORES = 12 # specify the number of cores you want to use
# Split the range of steps into chunks
chunks = [
list(range(i, min(i + steppi // NUM_CORES, steppi)))
for i in range(0, steppi, steppi // NUM_CORES)
]
with concurrent.futures.ProcessPoolExecutor(max_workers=NUM_CORES) as executor:
chunked_results = list(executor.map(process_chunk, chunks))
# Flatten the chunked results to get the ordered results
results = [result for sublist in chunked_results for result in sublist]
for kk, (current_time, acc_pressure, acc_oil, acc_water) in enumerate(results):
Time_vector[kk] = current_time
Accuracy_presure[kk] = acc_pressure
Accuracy_oil[kk] = acc_oil
Accuracy_water[kk] = acc_water
fig4 = plt.figure(figsize=(20, 20), dpi=100)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=11,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=11,
)
# Plot R2 accuracies
plt.subplot(2, 3, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 3, 2)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 3, 3)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 3, 4)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 3, 5)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 3, 6)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2.png"
plt.savefig(namez)
plt.clf()
plt.close()
print("")
print("Now - Creating GIF")
import glob
import re
frames = []
imgs = sorted(glob.glob("*Dynamic*"), key=sort_key)
for i in imgs:
new_frame = Image.open(i)
frames.append(new_frame)
frames[0].save(
"Evolution.gif",
format="GIF",
append_images=frames[1:],
save_all=True,
duration=500,
loop=0,
)
from glob import glob
for f3 in glob("*Dynamic*"):
os.remove(f3)
print("")
print("Saving prediction in CSV file")
spittsbig = [
"Time(DAY)",
"I1 - WBHP(PSIA)",
"I2 - WBHP (PSIA)",
"I3 - WBHP(PSIA)",
"I4 - WBHP(PSIA)",
"P1 - WOPR(BBL/DAY)",
"P2 - WOPR(BBL/DAY)",
"P3 - WOPR(BBL/DAY)",
"P4 - WOPR(BBL/DAY)",
"P1 - WWPR(BBL/DAY)",
"P2 - WWPR(BBL/DAY)",
"P3 - WWPR(BBL/DAY)",
"P4 - WWPR(BBL/DAY)",
"P1 - WWCT(%)",
"P2 - WWCT(%)",
"P3 - WWCT(%)",
"P4 - WWCT(%)",
]
see = Peaceman_well(
inn,
ouut_p,
ouut_s,
MAXZ,
1,
1e1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
nz,
)
seeTrue = Peaceman_well(
inn,
cPress,
cSat,
MAXZ,
0,
1e1,
LUB,
HUB,
aay,
bby,
DX,
steppi,
pini_alt,
SWI,
SWR,
UW,
BW,
DZ,
rwell,
skin,
UO,
BO,
pwf_producer,
dt,
N_inj,
N_pr,
nz,
)
seeuse = pd.DataFrame(see)
seeuse.to_csv("RSM_MODULUS.csv", header=spittsbig, sep=",")
seeuse.drop(columns=seeuse.columns[0], axis=1, inplace=True)
seeuset = pd.DataFrame(seeTrue)
seeuset.to_csv("RSM_NUMERICAL.csv", header=spittsbig, sep=",")
seeuset.drop(columns=seeuset.columns[0], axis=1, inplace=True)
Plot_RSM_percentile(see, seeTrue, "Compare.png")
os.chdir(oldfolder)
print("")
print("-------------------PROGRAM EXECUTED-----------------------------------")
| modulus-sym-main | examples/reservoir_simulation/2D/src/Compare_FVM_surrogate.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import modulus.sym
import torch
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from modulus.sym.models.fno import *
import shutil
import cupy as cp
import scipy.io as sio
import requests
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
from modulus.sym.utils.io.plotter import ValidatorPlotter
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
# get and interpolate output variable
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
look = (pressure_pred[0, itt, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(10, 10), dpi=100)
plt.subplot(1, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(10, 10), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
plt.subplot(2, 1, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 1, 2)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
look_sat = water_pred[0, itt, :, :] # *1e-2
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :] # * 1e-2
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(2, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
R2w, L2w = compute_metrics(look_sat.ravel(), lookf_sat.ravel())
Accuracy_water[itt, 0] = R2w
Accuracy_water[itt, 1] = L2w
plt.subplot(2, 3, 4)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 5)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 6)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
R2o, L2o = compute_metrics(look_oil.ravel(), lookf_oil.ravel())
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(20, 20), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
plt.subplot(2, 2, 1)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 2)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 3)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 2, 4)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 2D FNO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
oldfolder = os.getcwd()
os.chdir(oldfolder)
if not os.path.exists(to_absolute_path("../PACKETS")):
os.makedirs(to_absolute_path("../PACKETS"))
else:
pass
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
CFO = cp.float32(cfg.custom.NVRS.CFO) # oil compressibility in 1/psi
IWSw = cfg.custom.NVRS.IWSw # initial water saturation
pini_alt = cfg.custom.NVRS.pini_alt
P1 = cp.float32(pini_alt) # Bubble point pressure psia
PB = P1
mpor = cfg.custom.NVRS.mpor
hpor = cfg.custom.NVRS.hpor # minimum and maximum porosity
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
PATM = cp.float32(cfg.custom.NVRS.PATM) # Atmospheric pressure in psi
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
Low_K, High_K = aay, bby
batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
# 4 injector and 4 producer wells
wells = np.array(
[
1,
24,
1,
3,
3,
1,
31,
1,
1,
31,
31,
1,
7,
9,
2,
14,
12,
2,
28,
19,
2,
14,
27,
2,
]
)
wells = np.reshape(wells, (-1, 3), "C")
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1I-27_S53ORRFB_hIN_41r3Ntc6PpOE40",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1G4Cvg8eIObyBK0eoo7iX-0hhMTnpJktj",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data1[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data1[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data1[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data1[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data1[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data1[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use1[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data2[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data2[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data2[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data2[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data2[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data2[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use2[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
input_keys = [
Key("perm", scale=(3.46327e-01, 3.53179e-01)),
Key("Q", scale=(1.94683e-03, 3.70558e-02)),
Key("Qw", scale=(2.03866e-03, 3.70199e-02)),
Key("Phi", scale=(1.73163e-01, 1.76590e-01)),
Key("Time", scale=(1.66667e-02, 7.45058e-09)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(2.00000e-01, 4.91738e-07)),
]
output_keys_pressure = [Key("pressure", scale=(2.87008e-01, 1.85386e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.12903e-01, 1.79786e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# [init-node]
# Make custom Darcy residual node for PINO
# Define FNO model for forward model (pressure)
decoder1 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("pressure", size=steppi)]
)
fno_pressure = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder1,
)
# Define FNO model for forward model (saturation)
decoder2 = ConvFullyConnectedArch(
[Key("z", size=32)], [Key("water_sat", size=steppi)]
)
fno_saturation = FNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
dimension=2,
decoder_net=decoder2,
)
nodes = [fno_pressure.make_node("fno_forward_model_pressure")] + [
fno_saturation.make_node("fno_forward_model_saturation")
]
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# [constraint]
# add validator
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=1,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=1,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/2D/src/Forward_problem_FNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 21:33:56 2023
@author: clementetienam
"""
import os
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from NVRS import *
from modulus.sym.models.fno import *
from modulus.sym.models.afno.afno import *
import shutil
import pandas as pd
import scipy.io as sio
import torch
import yaml
from PIL import Image
oldfolder = os.getcwd()
os.chdir(oldfolder)
data = []
os.chdir("../COMPARE_RESULTS/FNO")
True_measurement = pd.read_csv("RSM_NUMERICAL.csv")
True_measurement = True_measurement.values.astype(np.float32)[:, 1:]
data.append(True_measurement)
FNO = pd.read_csv("RSM_MODULUS.csv")
FNO = FNO.values.astype(np.float32)[:, 1:]
data.append(FNO)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS/PINO")
PINO = pd.read_csv("RSM_MODULUS.csv")
PINO = PINO.values.astype(np.float32)[:, 1:]
data.append(PINO)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS/AFNOP")
AFNOP = pd.read_csv("RSM_MODULUS.csv")
AFNOP = AFNOP.values.astype(np.float32)[:, 1:]
data.append(AFNOP)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS/AFNOD")
AFNOD = pd.read_csv("RSM_MODULUS.csv")
AFNOD = AFNOD.values.astype(np.float32)[:, 1:]
data.append(AFNOD)
os.chdir(oldfolder)
os.chdir("../COMPARE_RESULTS")
Plot_Models(data)
Plot_bar(data)
os.chdir(oldfolder)
| modulus-sym-main | examples/reservoir_simulation/2D/src/Compare_Models.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import zipfile
try:
import gdown
except:
gdown = None
import scipy.io
import numpy as np
import h5py
from modulus.sym.hydra import to_absolute_path
# list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt-
_FNO_datatsets_ids = {
"Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV",
"Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf",
}
_FNO_dataset_names = {
"Darcy_241": (
"piececonst_r241_N1024_smooth1.hdf5",
"piececonst_r241_N1024_smooth2.hdf5",
),
"Darcy_421": (
"piececonst_r421_N1024_smooth1.hdf5",
"piececonst_r421_N1024_smooth2.hdf5",
),
}
def load_FNO_dataset(path, input_keys, output_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar = dict(), dict()
for d, keys in [(invar, input_keys), (outvar, output_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar)
def load_FNO_dataset2(path, input_keys, output_keys, output_keys2, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar, outvar2 = dict(), dict(), dict()
for d, keys in [
(invar, input_keys),
(outvar, output_keys),
(outvar2, output_keys2),
]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar, outvar2)
def load_FNO_dataset4(path, input_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar = dict()
for d, keys in [(invar, input_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return invar
def load_deeponet_dataset(
path, input_keys, output_keys, n_examples=None, filter_size=8
):
"Loads a deeponet dataset"
# load dataset
invar, outvar = load_FNO_dataset(path, input_keys, output_keys, n_examples)
# reduce shape needed for deeponet
for key, value in invar.items():
invar[key] = value[:, :, ::filter_size, ::filter_size]
for key, value in outvar.items():
outvar[key] = value[:, :, ::filter_size, ::filter_size]
res = next(iter(invar.values())).shape[-1]
nr_points_per_sample = res**2
# tile invar
tiled_invar = {
key: np.concatenate(
[
np.tile(value[i], (nr_points_per_sample, 1, 1, 1))
for i in range(n_examples)
]
)
for key, value in invar.items()
}
# tile outvar
tiled_outvar = {key: value.flatten()[:, None] for key, value in outvar.items()}
# add cord points
x = np.linspace(0.0, 1.0, res)
y = np.linspace(0.0, 1.0, res)
x, y = [a.flatten()[:, None] for a in np.meshgrid(x, y)]
tiled_invar["x"] = np.concatenate(n_examples * [x], axis=0)
tiled_invar["y"] = np.concatenate(n_examples * [y], axis=0)
return (tiled_invar, tiled_outvar)
def download_FNO_dataset(name, outdir="datasets/"):
"Tries to download FNO dataset from drive"
if name not in _FNO_datatsets_ids:
raise Exception(
f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}"
)
id = _FNO_datatsets_ids[name]
outdir = to_absolute_path(outdir) + "/"
namedir = f"{outdir}{name}/"
# skip if already exists
exists = True
for file_name in _FNO_dataset_names[name]:
if not os.path.isfile(namedir + file_name):
exists = False
break
if exists:
return
print(f"FNO dataset {name} not detected, downloading dataset")
# Make sure we have gdown installed
if gdown is None:
raise ModuleNotFoundError("gdown package is required to download the dataset!")
# get output directory
os.makedirs(namedir, exist_ok=True)
# download dataset
zippath = f"{outdir}{name}.zip"
_download_file_from_google_drive(id, zippath)
# unzip
with zipfile.ZipFile(zippath, "r") as f:
f.extractall(namedir)
os.remove(zippath)
# preprocess files
for file in os.listdir(namedir):
if file.endswith(".mat"):
matpath = f"{namedir}{file}"
preprocess_FNO_mat(matpath)
os.remove(matpath)
def _download_file_from_google_drive(id, path):
"Downloads a file from google drive"
# use gdown library to download file
gdown.download(id=id, output=path)
def preprocess_FNO_mat(path):
"Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays"
assert path.endswith(".mat")
data = scipy.io.loadmat(path)
ks = [k for k in data.keys() if not k.startswith("__")]
with h5py.File(path[:-4] + ".hdf5", "w") as f:
for k in ks:
# x = np.expand_dims(data[k], axis=1) # N, C, H, W
x = data[k]
f.create_dataset(
k, data=x, dtype="float32"
) # note h5 files larger than .mat because no compression used
| modulus-sym-main | examples/reservoir_simulation/2D/src/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
# Import from Modulus
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight1 = (torch.tanh((10000 - step_tensor) * smoothness) + 1.0) * 0.5
lambda_pressure = 1.0
lambda_saturation = 1.0
# # Add losses
for key in losses.keys():
if "pressure" in key:
loss += lambda_pressure * (1 - decay_weight1) * ((losses[key]))
if "water_sat" in key:
loss += lambda_saturation * (1 - decay_weight1) * ((losses[key]))
return loss
| modulus-sym-main | examples/reservoir_simulation/2D/src/custom_aggregator_FNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
# Import from Modulus
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight1 = (torch.tanh((10000 - step_tensor) * smoothness) + 1.0) * 0.5
lambda_pressure = 1.0
lambda_saturation = 1.0
lambda_pressured = 0.1
lambda_saturationd = 0.1
# # Add losses
for key in losses.keys():
if "pressure" in key:
loss += lambda_pressure * (1 - decay_weight1) * ((losses[key]))
if "water_sat" in key:
loss += lambda_saturation * (1 - decay_weight1) * ((losses[key]))
if "pressured" in key:
loss += lambda_pressured * (1 - decay_weight1) * ((losses[key]))
if "saturationd" in key:
loss += lambda_saturationd * (1 - decay_weight1) * ((losses[key]))
return loss
| modulus-sym-main | examples/reservoir_simulation/2D/src/custom_aggregator_PINO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import modulus
import torch
from modulus.sym.hydra import ModulusConfig
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from NVRS import *
from utilities import load_FNO_dataset2, preprocess_FNO_mat
from modulus.sym.models.afno.afno import *
import shutil
import cupy as cp
import scipy.io as sio
import requests
torch.set_default_dtype(torch.float32)
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={"id": id, "confirm": 1}, stream=True)
token = get_confirm_token(response)
if token:
params = {"id": id, "confirm": token}
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith("download_warning"):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
from modulus.sym.utils.io.plotter import ValidatorPlotter
class CustomValidatorPlotterP(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
# get and interpolate output variable
pressure_true, pressure_pred = true_outvar["pressure"], pred_outvar["pressure"]
# make plot
f_big = []
Time_vector = np.zeros((self.steppi))
Accuracy_presure = np.zeros((self.steppi, 2))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
look = (pressure_pred[0, itt, :, :]) * self.pini_alt
lookf = (pressure_true[0, itt, :, :]) * self.pini_alt
diff1 = abs(look - lookf)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(10, 10), dpi=100)
plt.subplot(1, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
plt.subplot(1, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "pressure_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
R2p, L2p = compute_metrics(look.ravel(), lookf.ravel())
Accuracy_presure[itt, 0] = R2p
Accuracy_presure[itt, 1] = L2p
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(131, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look,
self.N_injw,
self.N_pr,
"pressure Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(132, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf,
self.N_injw,
self.N_pr,
"pressure Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(133, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1,
self.N_injw,
self.N_pr,
"pressure diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Dp" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(10, 10), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
# Plot R2 accuracies
plt.subplot(2, 1, 1)
plt.plot(
Time_vector,
Accuracy_presure[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
# Plot L2 accuracies
plt.subplot(2, 1, 2)
plt.plot(
Time_vector,
Accuracy_presure[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("Pressure", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_pressure"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
class CustomValidatorPlotterS(ValidatorPlotter):
def __init__(
self,
timmee,
max_t,
MAXZ,
pini_alt,
nx,
ny,
nz,
steppi,
tc2,
dt,
injectors,
producers,
N_injw,
N_pr,
):
self.timmee = timmee
self.max_t = max_t
self.MAXZ = MAXZ
self.pini_alt = pini_alt
self.nx = nx
self.ny = ny
self.nz = nz
self.steppi = steppi
self.tc2 = tc2
self.dt = dt
self.injectors = injectors
self.producers = producers
self.N_injw = N_injw
self.N_pr = N_pr
def __call__(self, invar, true_outvar, pred_outvar):
"Custom plotting function for validator"
# get input variables
water_true, water_pred = true_outvar["water_sat"], pred_outvar["water_sat"]
# make plot
f_big = []
Accuracy_oil = np.zeros((self.steppi, 2))
Accuracy_water = np.zeros((self.steppi, 2))
Time_vector = np.zeros((self.steppi))
for itt in range(self.steppi):
Time_vector[itt] = int((itt + 1) * self.dt * self.MAXZ)
XX, YY = np.meshgrid(np.arange(self.nx), np.arange(self.ny))
f_2 = plt.figure(figsize=(12, 12), dpi=100)
look_sat = water_pred[0, itt, :, :] # *1e-2
look_oil = 1 - look_sat
lookf_sat = water_true[0, itt, :, :] # * 1e-2
lookf_oil = 1 - lookf_sat
diff1_wat = abs(look_sat - lookf_sat)
diff1_oil = abs(look_oil - lookf_oil)
plt.subplot(2, 3, 1)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 2)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 3)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
R2w, L2w = compute_metrics(look_sat.ravel(), lookf_sat.ravel())
Accuracy_water[itt, 0] = R2w
Accuracy_water[itt, 1] = L2w
plt.subplot(2, 3, 4)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 5)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
plt.subplot(2, 3, 6)
Plot_2D(
XX,
YY,
plt,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
R2o, L2o = compute_metrics(look_oil.ravel(), lookf_oil.ravel())
Accuracy_oil[itt, 0] = R2o
Accuracy_oil[itt, 1] = L2o
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = "Timestep --" + str(int((itt + 1) * self.dt * self.MAXZ)) + " days"
plt.suptitle(tita, fontsize=16)
namez = "saturation_simulations" + str(int(itt))
yes = (f_2, namez)
f_big.append(yes)
# plt.clf()
plt.close()
f_3 = plt.figure(figsize=(20, 20), dpi=200)
ax1 = f_3.add_subplot(231, projection="3d")
Plot_Modulus(
ax1,
self.nx,
self.ny,
self.nz,
look_sat,
self.N_injw,
self.N_pr,
"water Modulus",
self.injectors,
self.producers,
)
ax2 = f_3.add_subplot(232, projection="3d")
Plot_Modulus(
ax2,
self.nx,
self.ny,
self.nz,
lookf_sat,
self.N_injw,
self.N_pr,
"water Numerical",
self.injectors,
self.producers,
)
ax3 = f_3.add_subplot(233, projection="3d")
Plot_Modulus(
ax3,
self.nx,
self.ny,
self.nz,
diff1_wat,
self.N_injw,
self.N_pr,
"water diff",
self.injectors,
self.producers,
)
ax4 = f_3.add_subplot(234, projection="3d")
Plot_Modulus(
ax4,
self.nx,
self.ny,
self.nz,
look_oil,
self.N_injw,
self.N_pr,
"oil Modulus",
self.injectors,
self.producers,
)
ax5 = f_3.add_subplot(235, projection="3d")
Plot_Modulus(
ax5,
self.nx,
self.ny,
self.nz,
lookf_oil,
self.N_injw,
self.N_pr,
"oil Numerical",
self.injectors,
self.producers,
)
ax6 = f_3.add_subplot(236, projection="3d")
Plot_Modulus(
ax6,
self.nx,
self.ny,
self.nz,
diff1_oil,
self.N_injw,
self.N_pr,
"oil diff",
self.injectors,
self.producers,
)
plt.tight_layout(rect=[0, 0, 1, 0.95])
tita = (
"3D Map - Timestep --"
+ str(int((itt + 1) * self.dt * self.MAXZ))
+ " days"
)
plt.suptitle(tita, fontsize=20, weight="bold")
namez = "Simulations3Ds" + str(int(itt))
yes2 = (f_3, namez)
f_big.append(yes2)
# plt.clf()
plt.close()
fig4 = plt.figure(figsize=(20, 20), dpi=200)
font = FontProperties()
font.set_family("Helvetica")
font.set_weight("bold")
fig4.text(
0.5,
0.98,
"R2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
fig4.text(
0.5,
0.49,
"L2(%) Accuracy - Modulus/Numerical(GPU)",
ha="center",
va="center",
fontproperties=font,
fontsize=16,
)
plt.subplot(2, 2, 1)
plt.plot(
Time_vector,
Accuracy_water[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 2)
plt.plot(
Time_vector,
Accuracy_oil[:, 0],
label="R2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("R2(%)", fontproperties=font)
plt.subplot(2, 2, 3)
plt.plot(
Time_vector,
Accuracy_water[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("water_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.subplot(2, 2, 4)
plt.plot(
Time_vector,
Accuracy_oil[:, 1],
label="L2",
marker="*",
markerfacecolor="red",
markeredgecolor="red",
linewidth=0.5,
)
plt.title("oil_saturation", fontproperties=font)
plt.xlabel("Time (days)", fontproperties=font)
plt.ylabel("L2(%)", fontproperties=font)
plt.tight_layout(rect=[0, 0.05, 1, 0.93])
namez = "R2L2_saturations"
yes21 = (fig4, namez)
f_big.append(yes21)
# plt.clf()
plt.close()
return f_big
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
print("")
print("------------------------------------------------------------------")
print("")
print("\n")
print("|-----------------------------------------------------------------|")
print("| TRAIN THE MODEL USING A 2D DATA DRIVEN AFNO APPROACH: |")
print("|-----------------------------------------------------------------|")
print("")
oldfolder = os.getcwd()
os.chdir(oldfolder)
if not os.path.exists(to_absolute_path("../PACKETS")):
os.makedirs(to_absolute_path("../PACKETS"))
else:
pass
# Varaibles needed for NVRS
nx = cfg.custom.NVRS.nx
ny = cfg.custom.NVRS.ny
nz = cfg.custom.NVRS.nz
BO = cfg.custom.NVRS.BO # oil formation volume factor
BW = cfg.custom.NVRS.BW # Water formation volume factor
UW = cfg.custom.NVRS.UW # water viscosity in cP
UO = cfg.custom.NVRS.UO # oil viscosity in cP
DX = cfg.custom.NVRS.DX # size of pixel in x direction
DY = cfg.custom.NVRS.DY # sixze of pixel in y direction
DZ = cfg.custom.NVRS.DZ # sizze of pixel in z direction
DX = cp.float32(DX)
DY = cp.float32(DY)
UW = cp.float32(UW) # water viscosity in cP
UO = cp.float32(UO) # oil viscosity in cP
SWI = cp.float32(cfg.custom.NVRS.SWI)
SWR = cp.float32(cfg.custom.NVRS.SWR)
CFO = cp.float32(cfg.custom.NVRS.CFO) # oil compressibility in 1/psi
IWSw = cfg.custom.NVRS.IWSw # initial water saturation
pini_alt = cfg.custom.NVRS.pini_alt
P1 = cp.float32(pini_alt) # Bubble point pressure psia
PB = P1
mpor = cfg.custom.NVRS.mpor
hpor = cfg.custom.NVRS.hpor # minimum and maximum porosity
BW = cp.float32(BW) # Water formation volume factor
BO = cp.float32(BO) # Oil formation volume factor
PATM = cp.float32(cfg.custom.NVRS.PATM) # Atmospheric pressure in psi
# training
LUB = cfg.custom.NVRS.LUB
HUB = cfg.custom.NVRS.HUB # Permeability rescale
aay, bby = cfg.custom.NVRS.aay, cfg.custom.NVRS.bby # Permeability range mD
Low_K, High_K = aay, bby
batch_size = cfg.custom.NVRS.batch_size #'size of simulated labelled data to run'
timmee = (
cfg.custom.NVRS.timmee
) # float(input ('Enter the time step interval duration for simulation (days): '))
max_t = (
cfg.custom.NVRS.max_t
) # float(input ('Enter the maximum time in days for simulation(days): '))
MAXZ = cfg.custom.NVRS.MAXZ # reference maximum time in days of simulation
steppi = int(max_t / timmee)
factorr = cfg.custom.NVRS.factorr # from [0 1] excluding the limits for PermZ
LIR = cfg.custom.NVRS.LIR # lower injection rate
UIR = cfg.custom.NVRS.UIR # uppwer injection rate
input_channel = (
cfg.custom.NVRS.input_channel
) # [Perm, Q,QW,Phi,dt, initial_pressure, initial_water_sat]
injectors = cfg.custom.WELLSPECS.water_injector_wells
producers = cfg.custom.WELLSPECS.producer_wells
N_injw = len(cfg.custom.WELLSPECS.water_injector_wells) # Number of water injectors
N_pr = len(cfg.custom.WELLSPECS.producer_wells) # Number of producers
# tc2 = Equivalent_time(timmee,2100,timmee,max_t)
tc2 = Equivalent_time(timmee, MAXZ, timmee, max_t)
dt = np.diff(tc2)[0] # Time-step
# 4 injector and 4 producer wells
wells = np.array(
[
1,
24,
1,
3,
3,
1,
31,
1,
1,
31,
31,
1,
7,
9,
2,
14,
12,
2,
28,
19,
2,
14,
27,
2,
]
)
wells = np.reshape(wells, (-1, 3), "C")
bb = os.path.isfile(to_absolute_path("../PACKETS/Training4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1I-27_S53ORRFB_hIN_41r3Ntc6PpOE40",
to_absolute_path("../PACKETS/Training4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
else:
print("Load simulated labelled training data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Training4.mat"))
X_data1 = matt["INPUT"]
data_use1 = matt["OUTPUT"]
bb = os.path.isfile(to_absolute_path("../PACKETS/Test4.mat"))
if bb == False:
print("....Downloading Please hold.........")
download_file_from_google_drive(
"1G4Cvg8eIObyBK0eoo7iX-0hhMTnpJktj",
to_absolute_path("../PACKETS/Test4.mat"),
)
print("...Downlaod completed.......")
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
else:
print("Load simulated labelled test data from MAT file")
matt = sio.loadmat(to_absolute_path("../PACKETS/Test4.mat"))
X_data2 = matt["INPUT"]
data_use2 = matt["OUTPUT"]
cPerm = np.zeros((X_data1.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data1.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data1.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data1.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data1.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data1.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data1.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data1.shape[0]):
perm = X_data1[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data1[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data1[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data1[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data1[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data1[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data1[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use1[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use1[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstrain.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstrain.mat"))
cPerm = np.zeros((X_data2.shape[0], 1, nx, ny)) # Permeability
cQ = np.zeros((X_data2.shape[0], 1, nx, ny)) # Overall source/sink term
cQw = np.zeros((X_data2.shape[0], 1, nx, ny)) # Sink term
cPhi = np.zeros((X_data2.shape[0], 1, nx, ny)) # Porosity
cTime = np.zeros((X_data2.shape[0], 1, nx, ny)) # Time index
cPini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial pressure
cSini = np.zeros((X_data2.shape[0], 1, nx, ny)) # Initial water saturation
cPress = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Pressure
cSat = np.zeros((X_data2.shape[0], steppi, nx, ny)) # Water saturation
for kk in range(X_data2.shape[0]):
perm = X_data2[kk, 0, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPerm[kk, :, :, :] = permin
perm = X_data2[kk, 1, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQ[kk, :, :, :] = permin
perm = X_data2[kk, 2, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cQw[kk, :, :, :] = permin
perm = X_data2[kk, 3, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPhi[kk, :, :, :] = permin
perm = X_data2[kk, 4, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cTime[kk, :, :, :] = permin
perm = X_data2[kk, 5, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cPini[kk, :, :, :] = permin
perm = X_data2[kk, 6, :, :]
permin = np.zeros((1, nx, ny))
permin[0, :, :] = perm
cSini[kk, :, :, :] = permin
perm = data_use2[kk, :steppi, :, :]
cPress[kk, :, :, :] = perm # np.clip(perm ,1/pini_alt,1.)
perm = data_use2[kk, steppi:, :, :]
cSat[kk, :, :, :] = perm
sio.savemat(
to_absolute_path("../PACKETS/simulationstest.mat"),
{
"perm": cPerm,
"Q": cQ,
"Qw": cQw,
"Phi": cPhi,
"Time": cTime,
"Pini": cPini,
"Swini": cSini,
"pressure": cPress,
"water_sat": cSat,
},
)
preprocess_FNO_mat(to_absolute_path("../PACKETS/simulationstest.mat"))
# load training/ test data
input_keys = [
Key("perm", scale=(3.46327e-01, 3.53179e-01)),
Key("Q", scale=(1.94683e-03, 3.70558e-02)),
Key("Qw", scale=(2.03866e-03, 3.70199e-02)),
Key("Phi", scale=(1.73163e-01, 1.76590e-01)),
Key("Time", scale=(1.66667e-02, 7.45058e-09)),
Key("Pini", scale=(1.00000e00, 0.00000e00)),
Key("Swini", scale=(2.00000e-01, 4.91738e-07)),
]
output_keys_pressure = [Key("pressure", scale=(2.87008e-01, 1.85386e-01))]
output_keys_saturation = [Key("water_sat", scale=(3.12903e-01, 1.79786e-01))]
invar_train, outvar_train_pressure, outvar_train_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstrain.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test_pressure, outvar_test_saturation = load_FNO_dataset2(
to_absolute_path("../PACKETS/simulationstest.hdf5"),
[k.name for k in input_keys],
[k.name for k in output_keys_pressure],
[k.name for k in output_keys_saturation],
n_examples=cfg.custom.ntest,
)
train_dataset_pressure = DictGridDataset(invar_train, outvar_train_pressure)
train_dataset_saturation = DictGridDataset(invar_train, outvar_train_saturation)
test_dataset_pressure = DictGridDataset(invar_test, outvar_test_pressure)
test_dataset_saturation = DictGridDataset(invar_test, outvar_test_saturation)
# Define AFNO model for forward model (pressure)
afno_pressure = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("pressure", size=steppi)],
(nx, ny),
patch_size=3,
)
# Define AFNO model for forward model (saturation)
afno_saturation = AFNOArch(
[
Key("perm", size=1),
Key("Q", size=1),
Key("Qw", size=1),
Key("Phi", size=1),
Key("Time", size=1),
Key("Pini", size=1),
Key("Swini", size=1),
],
[Key("water_sat", size=steppi)],
(nx, ny),
patch_size=3,
)
nodes = [afno_pressure.make_node("afno_forward_model_pressure")] + [
afno_saturation.make_node("afno_forward_model_saturation")
]
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised_pressure = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_pressure,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_pressure, "supervised_pressure")
supervised_saturation = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset_saturation,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised_saturation, "supervised_saturation")
# [constraint]
# add validator
# [constraint]
# add validator
# test_pressure = GridValidator(
# nodes,
# dataset=test_dataset_pressure,
# batch_size=1,
# plotter=CustomValidatorPlotterP(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_pressure = GridValidator(
nodes,
dataset=test_dataset_pressure,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_pressure, "test_pressure")
# test_saturation = GridValidator(
# nodes,
# dataset=test_dataset_saturation,
# batch_size=1,
# plotter=CustomValidatorPlotterS(timmee,max_t,MAXZ,pini_alt,nx,ny,nz,\
# steppi,tc2,dt,injectors,producers,N_injw,N_pr),
# requires_grad=False,
# )
test_saturation = GridValidator(
nodes,
dataset=test_dataset_saturation,
batch_size=1,
requires_grad=False,
)
domain.add_validator(test_saturation, "test_saturation")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/reservoir_simulation/2D/src/Forward_problem_AFNOD.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq, And
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# specify params
channel_length = (-6.732, 6.732)
channel_width = (-1.0, 1.0)
cylinder_center = (0.0, 0.0)
outer_cylinder_radius = 2.0
inner_cylinder_radius = 1.0
inlet_vel = 1.5
# make geometry
x, y = Symbol("x"), Symbol("y")
rec = Rectangle(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
outer_circle = Circle(cylinder_center, outer_cylinder_radius)
inner_circle = Circle((0, 0), inner_cylinder_radius)
geo = (rec + outer_circle) - inner_circle
# make annular ring domain
domain = Domain()
# inlet
inlet_sympy = parabola(
y, inter_1=channel_width[0], inter_2=channel_width[1], height=inlet_vel
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": inlet_sympy, "v": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_length[0]),
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.no_slip,
criteria=And((x > channel_length[0]), (x < channel_length[1])),
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# integral continuity
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"normal_dot_vel": 2},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "../openfoam/bend_finerInternal0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_length[0] # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=1024,
)
domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add monitors
# metric for mass and momentum imbalance
global_monitor = PointwiseMonitor(
geo.sample_interior(1024),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
# metric for force on inner sphere
force_monitor = PointwiseMonitor(
inner_circle.sample_boundary(1024),
output_names=["p"],
metrics={
"force_x": lambda var: torch.sum(var["normal_x"] * var["area"] * var["p"]),
"force_y": lambda var: torch.sum(var["normal_y"] * var["area"] * var["p"]),
},
nodes=nodes,
)
domain.add_monitor(force_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/annular_ring/annular_ring/annular_ring.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq, And
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes(
create_instances=2,
freeze_terms={
"continuity_0": [],
"momentum_x_0": [1, 2],
"momentum_y_0": [1, 2],
"continuity_1": [],
"momentum_x_1": [3, 4],
"momentum_y_1": [3, 4],
},
)
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network", jit=cfg.jit)]
)
# add constraints to solver
# specify params
channel_length = (-6.732, 6.732)
channel_width = (-1.0, 1.0)
cylinder_center = (0.0, 0.0)
outer_cylinder_radius = 2.0
inner_cylinder_radius = 1.0
inlet_vel = 1.5
# make geometry
x, y = Symbol("x"), Symbol("y")
rec = Rectangle(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
outer_circle = Circle(cylinder_center, outer_cylinder_radius)
inner_circle = Circle((0, 0), inner_cylinder_radius)
geo = (rec + outer_circle) - inner_circle
# make annular ring domain
domain = Domain()
# inlet
inlet_sympy = parabola(
y, inter_1=channel_width[0], inter_2=channel_width[1], height=inlet_vel
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": inlet_sympy, "v": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_length[0]),
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.no_slip,
criteria=And((x > channel_length[0]), (x < channel_length[1])),
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"continuity_0": 0,
"momentum_x_0": 0,
"momentum_y_0": 0,
"continuity_1": 0,
"momentum_x_1": 0,
"momentum_y_1": 0,
},
batch_size=cfg.batch_size.interior,
lambda_weighting={
"continuity_0": Symbol("sdf"),
"momentum_x_0": Symbol("sdf"),
"momentum_y_0": Symbol("sdf"),
"continuity_1": Symbol("sdf"),
"momentum_x_1": Symbol("sdf"),
"momentum_y_1": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# integral continuity
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"normal_dot_vel": 2},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "../openfoam/bend_finerInternal0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_length[0] # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=512,
)
domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add monitors
# metric for mass and momentum imbalance
global_monitor = PointwiseMonitor(
geo.sample_interior(1024),
output_names=["continuity_0", "momentum_x_0", "momentum_y_0"],
metrics={
"mass_imbalance_0": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity_0"])
),
"momentum_imbalance_0": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x_0"]) + torch.abs(var["momentum_y_0"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
global_monitor = PointwiseMonitor(
geo.sample_interior(1024),
output_names=["continuity_1", "momentum_x_1", "momentum_y_1"],
metrics={
"mass_imbalance_1": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity_1"])
),
"momentum_imbalance_1": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x_1"]) + torch.abs(var["momentum_y_1"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
# metric for force on inner sphere
force_monitor = PointwiseMonitor(
inner_circle.sample_boundary(1024),
output_names=["p"],
metrics={
"force_x": lambda var: torch.sum(var["normal_x"] * var["area"] * var["p"]),
"force_y": lambda var: torch.sum(var["normal_y"] * var["area"] * var["p"]),
},
nodes=nodes,
)
domain.add_monitor(force_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/annular_ring/annular_ring_equation_instancing/annular_ring.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq
import numpy as np
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Bounds, Parameterization, Parameter
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("r")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# specify params
channel_length = (-6.732, 6.732)
channel_width = (-1.0, 1.0)
cylinder_center = (0.0, 0.0)
outer_cylinder_radius = 2.0
inner_cylinder_radius = Parameter("r")
inner_cylinder_radius_ranges = (0.75, 1.0)
inlet_vel = 1.5
parameterization = Parameterization(
{inner_cylinder_radius: inner_cylinder_radius_ranges}
)
# make geometry
x, y = Symbol("x"), Symbol("y")
rec = Rectangle(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
outer_circle = Circle(cylinder_center, outer_cylinder_radius)
inner_circle = Circle(
(0, 0), inner_cylinder_radius, parameterization=parameterization
)
geo = (rec + outer_circle) - inner_circle
# make annular ring domain
domain = Domain()
# inlet
inlet_sympy = parabola(
y, inter_1=channel_width[0], inter_2=channel_width[1], height=inlet_vel
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": inlet_sympy, "v": 0},
batch_size=cfg.batch_size.inlet,
batch_per_epoch=4000,
criteria=Eq(x, channel_length[0]),
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
batch_per_epoch=4000,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.no_slip,
batch_per_epoch=4000,
criteria=(x > channel_length[0]) & (x < channel_length[1]),
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior,
batch_per_epoch=4000,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# integral continuity
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"normal_dot_vel": 2},
batch_size=10,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
file_path_1 = "../openfoam/bend_finerInternal0.csv"
file_path_2 = "../openfoam/annularRing_r_0.8750.csv"
file_path_3 = "../openfoam/annularRing_r_0.750.csv"
# r1
if os.path.exists(to_absolute_path(file_path_1)):
openfoam_var_r1 = csv_to_dict(to_absolute_path(file_path_1), mapping)
openfoam_var_r1["x"] += channel_length[0] # center OpenFoam data
openfoam_var_r1["r"] = np.zeros_like(openfoam_var_r1["x"]) + 1.0
openfoam_invar_r1_numpy = {
key: value
for key, value in openfoam_var_r1.items()
if key in ["x", "y", "r"]
}
openfoam_outvar_r1_numpy = {
key: value
for key, value in openfoam_var_r1.items()
if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_r1_numpy,
true_outvar=openfoam_outvar_r1_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path_1} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# r875
if os.path.exists(to_absolute_path(file_path_2)):
openfoam_var_r875 = csv_to_dict(to_absolute_path(file_path_2), mapping)
openfoam_var_r875["x"] += channel_length[0] # center OpenFoam data
openfoam_var_r875["r"] = np.zeros_like(openfoam_var_r875["x"]) + 0.875
openfoam_invar_r875_numpy = {
key: value
for key, value in openfoam_var_r875.items()
if key in ["x", "y", "r"]
}
openfoam_outvar_r875_numpy = {
key: value
for key, value in openfoam_var_r875.items()
if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_r875_numpy,
true_outvar=openfoam_outvar_r875_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path_2} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# r75
if os.path.exists(to_absolute_path(file_path_3)):
openfoam_var_r75 = csv_to_dict(to_absolute_path(file_path_3), mapping)
openfoam_var_r75["x"] += channel_length[0] # center OpenFoam data
openfoam_var_r75["r"] = np.zeros_like(openfoam_var_r75["x"]) + 0.75
openfoam_invar_r75_numpy = {
key: value
for key, value in openfoam_var_r75.items()
if key in ["x", "y", "r"]
}
openfoam_outvar_r75_numpy = {
key: value
for key, value in openfoam_var_r75.items()
if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_r75_numpy,
true_outvar=openfoam_outvar_r75_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path_3} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add inferencer data
for i, radius in enumerate(
np.linspace(
inner_cylinder_radius_ranges[0], inner_cylinder_radius_ranges[1], 10
)
):
radius = float(radius)
sampled_interior = geo.sample_interior(
1024,
bounds=Bounds(
{x: channel_length, y: (-outer_cylinder_radius, outer_cylinder_radius)}
),
parameterization={inner_cylinder_radius: radius},
)
point_cloud_inference = PointwiseInferencer(
nodes=nodes,
invar=sampled_interior,
output_names=["u", "v", "p"],
batch_size=1024,
)
domain.add_inferencer(point_cloud_inference, "inf_data" + str(i).zfill(5))
# add monitors
# metric for mass and momentum imbalance
global_monitor = PointwiseMonitor(
geo.sample_interior(
1024,
bounds=Bounds(
{x: channel_length, y: (-outer_cylinder_radius, outer_cylinder_radius)}
),
),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
# metric for force on inner sphere
for i, radius in enumerate(
np.linspace(inner_cylinder_radius_ranges[0], inner_cylinder_radius_ranges[1], 3)
):
radius = float(radius)
force_monitor = PointwiseMonitor(
inner_circle.sample_boundary(
1024,
parameterization={inner_cylinder_radius: radius},
),
output_names=["p"],
metrics={
"force_x_r"
+ str(radius): lambda var: torch.sum(
var["normal_x"] * var["area"] * var["p"]
),
"force_y_r"
+ str(radius): lambda var: torch.sum(
var["normal_y"] * var["area"] * var["p"]
),
},
nodes=nodes,
)
domain.add_monitor(force_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/annular_ring/annular_ring_parameterized/annular_ring_parameterized.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
from sympy import Symbol, Eq
from typing import Dict
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.geometry.adf import ADF
class HardBC(ADF):
def __init__(self):
super().__init__()
# domain measures
self.channel_length = (-6.732, 6.732)
self.channel_width = (-1.0, 1.0)
self.cylinder_center = (0.0, 0.0)
self.outer_cylinder_radius = 2.0
self.inner_cylinder_radius = 1.0
self.delta = 0.267949
self.center = (0.0, 0.0)
self.r = self.outer_cylinder_radius
self.inlet_vel = 1.5
# parameters
self.eps: float = 1e-9
self.mu: float = 2.0
self.m: float = 2.0
def forward(self, invar: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Forms the solution anstaz for the annular ring example
"""
outvar = {}
x, y = invar["x"], invar["y"]
# ADFs
# left line
omega_0 = ADF.line_segment_adf(
(x, y),
(self.channel_length[0], self.channel_width[0]),
(self.channel_length[0], self.channel_width[1]),
)
# right line
omega_1 = ADF.line_segment_adf(
(x, y),
(self.channel_length[1], self.channel_width[0]),
(self.channel_length[1], self.channel_width[1]),
)
# top left line
omega_2 = ADF.line_segment_adf(
(x, y),
(self.channel_length[0], self.channel_width[1]),
(-self.outer_cylinder_radius + self.delta, self.channel_width[1]),
)
# top right line
omega_3 = ADF.line_segment_adf(
(x, y),
(self.outer_cylinder_radius - self.delta, self.channel_width[1]),
(self.channel_length[1], self.channel_width[1]),
)
# bottom left line
omega_4 = ADF.line_segment_adf(
(x, y),
(self.channel_length[0], self.channel_width[0]),
(-self.outer_cylinder_radius + self.delta, self.channel_width[0]),
)
# bottom right line
omega_5 = ADF.line_segment_adf(
(x, y),
(self.outer_cylinder_radius - self.delta, self.channel_width[0]),
(self.channel_length[1], self.channel_width[0]),
)
# inner circle
omega_6 = ADF.circle_adf((x, y), self.inner_cylinder_radius, self.center)
# top arch
omega_7 = ADF.trimmed_circle_adf(
(x, y),
(-self.outer_cylinder_radius, self.channel_width[1]),
(self.outer_cylinder_radius, self.channel_width[1]),
-1,
self.outer_cylinder_radius,
self.center,
)
# bottom arch
omega_8 = ADF.trimmed_circle_adf(
(x, y),
(-self.outer_cylinder_radius, self.channel_width[0]),
(self.outer_cylinder_radius, self.channel_width[0]),
1,
self.outer_cylinder_radius,
self.center,
)
# r equivalence
omega_E_u = ADF.r_equivalence(
[omega_0, omega_2, omega_3, omega_4, omega_5, omega_6, omega_7, omega_8],
self.m,
)
omega_E_v = ADF.r_equivalence(
[omega_0, omega_2, omega_3, omega_4, omega_5, omega_6, omega_7, omega_8],
self.m,
)
omega_E_p = omega_1
# u BC
bases = [
omega_0**self.mu,
omega_2**self.mu,
omega_3**self.mu,
omega_4**self.mu,
omega_5**self.mu,
omega_6**self.mu,
omega_7**self.mu,
omega_8**self.mu,
]
w = [
ADF.transfinite_interpolation(bases, idx, self.eps)
for idx in range(len(bases))
]
dirichlet_bc = [self.inlet_vel - (3 * (y**2) / 2), 0, 0, 0, 0, 0, 0, 0]
g = sum([w[i] * dirichlet_bc[i] for i in range(len(w))])
outvar["u"] = g + omega_E_u * invar["u_star"]
# v BC
outvar["v"] = omega_E_v * invar["v_star"]
# p BC
outvar["p"] = omega_E_p * invar["p_star"]
return outvar
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
hard_bc = HardBC()
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False, mixed_form=True)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[
Key("u_star"),
Key("v_star"),
Key("p_star"),
Key("u_x"),
Key("u_y"),
Key("v_x"),
Key("v_y"),
],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
+ [
Node(
inputs=["x", "y", "u_star", "v_star", "p_star"],
outputs=["u", "v", "p"],
evaluate=hard_bc,
)
]
)
# add constraints to solver
# specify params
channel_length = hard_bc.channel_length
channel_width = hard_bc.channel_width
cylinder_center = hard_bc.cylinder_center
outer_cylinder_radius = hard_bc.outer_cylinder_radius
inner_cylinder_radius = hard_bc.inner_cylinder_radius
inlet_vel = hard_bc.inlet_vel
# make geometry
x, y = Symbol("x"), Symbol("y")
rec = Rectangle(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
outer_circle = Circle(cylinder_center, outer_cylinder_radius)
inner_circle = Circle((0, 0), inner_cylinder_radius)
geo = (rec + outer_circle) - inner_circle
# make annular ring domain
domain = Domain()
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"compatibility_u_x": 0,
"compatibility_u_y": 0,
"compatibility_v_x": 0,
"compatibility_v_y": 0,
},
batch_size=cfg.batch_size.interior,
bounds=Bounds(
{x: channel_length, y: (-outer_cylinder_radius, outer_cylinder_radius)}
),
lambda_weighting={
"continuity": 5.0 * Symbol("sdf"),
"momentum_x": 2.0 * Symbol("sdf"),
"momentum_y": 2.0 * Symbol("sdf"),
"compatibility_u_x": 0.1 * Symbol("sdf"),
"compatibility_u_y": 0.1 * Symbol("sdf"),
"compatibility_v_x": 0.1 * Symbol("sdf"),
"compatibility_v_y": 0.1 * Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# integral continuity
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"normal_dot_vel": 2},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "../openfoam/bend_finerInternal0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_length[0] # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=1024,
)
domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add monitors
# metric for mass and momentum imbalance
global_monitor = PointwiseMonitor(
geo.sample_interior(1024),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
# metric for force on inner sphere
force_monitor = PointwiseMonitor(
inner_circle.sample_boundary(1024),
output_names=["p"],
metrics={
"force_x": lambda var: torch.sum(var["normal_x"] * var["area"] * var["p"]),
"force_y": lambda var: torch.sum(var["normal_y"] * var["area"] * var["p"]),
},
nodes=nodes,
)
domain.add_monitor(force_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/annular_ring/annular_ring_hardBC/annular_ring_hardBC.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" References:
(1) "Son, H., Jang, J.W., Han, W.J. and Hwang, H.J., 2021.
Sobolev training for the neural network solutions of pdes.
arXiv preprint arXiv:2101.08932."
(2) Yu, J., Lu, L., Meng, X. and Karniadakis, G.E., 2021.
Gradient-enhanced physics-informed neural networks for forward
and inverse PDE problems. arXiv preprint arXiv:2111.02801.
"""
import os
import warnings
from sympy import Symbol, Eq, And
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.basic import NormalDotVec
from pdes.navier_stokes import NavierStokes
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# specify params
channel_length = (-6.732, 6.732)
channel_width = (-1.0, 1.0)
cylinder_center = (0.0, 0.0)
outer_cylinder_radius = 2.0
inner_cylinder_radius = 1.0
inlet_vel = 1.5
# make geometry
x, y = Symbol("x"), Symbol("y")
rec = Rectangle(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
outer_circle = Circle(cylinder_center, outer_cylinder_radius)
inner_circle = Circle((0, 0), inner_cylinder_radius)
geo = (rec + outer_circle) - inner_circle
# make annular ring domain
domain = Domain()
# inlet
inlet_sympy = parabola(
y, inter_1=channel_width[0], inter_2=channel_width[1], height=inlet_vel
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": inlet_sympy, "v": 0},
lambda_weighting={"u": 1.1, "v": 1.1},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_length[0]),
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
lambda_weighting={"u": 1.1, "v": 1.1},
batch_size=cfg.batch_size.no_slip,
criteria=And((x > channel_length[0]), (x < channel_length[1])),
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"continuity_dx": 0,
"continuity_dy": 0,
"momentum_x_dx": 0,
"momentum_x_dy": 0,
"momentum_y_dx": 0,
"momentum_y_dy": 0,
},
batch_size=cfg.batch_size.interior,
bounds=Bounds(
{x: channel_length, y: (-outer_cylinder_radius, outer_cylinder_radius)}
),
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"continuity_dx": 0.01 * Symbol("sdf"),
"continuity_dy": 0.01 * Symbol("sdf"),
"momentum_x_dx": 0.01 * Symbol("sdf"),
"momentum_x_dy": 0.01 * Symbol("sdf"),
"momentum_y_dx": 0.01 * Symbol("sdf"),
"momentum_y_dy": 0.01 * Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# integral continuity
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"normal_dot_vel": 2},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.11},
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "../openfoam/bend_finerInternal0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_length[0] # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=1024,
)
domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add monitors
# metric for mass and momentum imbalance
global_monitor = PointwiseMonitor(
geo.sample_interior(1024),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
# metric for force on inner sphere
force_monitor = PointwiseMonitor(
inner_circle.sample_boundary(1024),
output_names=["p"],
metrics={
"force_x": lambda var: torch.sum(var["normal_x"] * var["area"] * var["p"]),
"force_y": lambda var: torch.sum(var["normal_y"] * var["area"] * var["p"]),
},
nodes=nodes,
)
domain.add_monitor(force_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/annular_ring/annular_ring_gradient_enhanced/annular_ring_gradient_enhanced.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class NavierStokes(PDE):
"""
Compressible Navier Stokes equations with third-order derivatives to be used for gradient-enhanced training.
Parameters
==========
nu : float, Sympy Symbol/Expr, str
The kinematic viscosity. If `nu` is a str then it is
converted to Sympy Function of form `nu(x,y,z,t)`.
If `nu` is a Sympy Symbol or Expression then this
is substituted into the equation. This allows for
variable viscosity.
rho : float, Sympy Symbol/Expr, str
The density of the fluid. If `rho` is a str then it is
converted to Sympy Function of form 'rho(x,y,z,t)'.
If 'rho' is a Sympy Symbol or Expression then this
is substituted into the equation to allow for
compressible Navier Stokes. Default is 1.
dim : int
Dimension of the Navier Stokes (2 or 3). Default is 3.
time : bool
If time-dependent equations or not. Default is True.
"""
name = "NavierStokes"
def __init__(self, nu, rho=1, dim=3, time=True):
# set params
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
if self.dim == 3:
w = Function("w")(*input_variables)
else:
w = Number(0)
# pressure
p = Function("p")(*input_variables)
# kinematic viscosity
if isinstance(nu, str):
nu = Function(nu)(*input_variables)
elif isinstance(nu, (float, int)):
nu = Number(nu)
# density
if isinstance(rho, str):
rho = Function(rho)(*input_variables)
elif isinstance(rho, (float, int)):
rho = Number(rho)
# dynamic viscosity
mu = rho * nu
# curl
curl = Number(0) if rho.diff() == 0 else u.diff(x) + v.diff(y) + w.diff(z)
# set equations
self.equations = {}
self.equations["continuity"] = (
rho.diff(t) + (rho * u).diff(x) + (rho * v).diff(y) + (rho * w).diff(z)
)
self.equations["continuity_dx"] = (
rho.diff(t) + (rho * u).diff(x) + (rho * v).diff(y) + (rho * w).diff(z)
).diff(x)
self.equations["continuity_dy"] = (
rho.diff(t) + (rho * u).diff(x) + (rho * v).diff(y) + (rho * w).diff(z)
).diff(y)
self.equations["continuity_dz"] = (
rho.diff(t) + (rho * u).diff(x) + (rho * v).diff(y) + (rho * w).diff(z)
).diff(z)
self.equations["momentum_x"] = (
(rho * u).diff(t)
+ (
u * ((rho * u).diff(x))
+ v * ((rho * u).diff(y))
+ w * ((rho * u).diff(z))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u.diff(x)).diff(x)
- (mu * u.diff(y)).diff(y)
- (mu * u.diff(z)).diff(z)
- (mu * (curl).diff(x))
)
self.equations["momentum_y"] = (
(rho * v).diff(t)
+ (
u * ((rho * v).diff(x))
+ v * ((rho * v).diff(y))
+ w * ((rho * v).diff(z))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v.diff(x)).diff(x)
- (mu * v.diff(y)).diff(y)
- (mu * v.diff(z)).diff(z)
- (mu * (curl).diff(y))
)
self.equations["momentum_z"] = (
(rho * w).diff(t)
+ (
u * ((rho * w).diff(x))
+ v * ((rho * w).diff(y))
+ w * ((rho * w).diff(z))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w.diff(x)).diff(x)
- (mu * w.diff(y)).diff(y)
- (mu * w.diff(z)).diff(z)
- (mu * (curl).diff(z))
)
self.equations["momentum_x_dx"] = (
(
(rho * u).diff(t)
+ (
u * ((rho * u).diff(x))
+ v * ((rho * u).diff(y))
+ w * ((rho * u).diff(z))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u.diff(x)).diff(x)
- (mu * u.diff(y)).diff(y)
- (mu * u.diff(z)).diff(z)
- (mu * (curl).diff(x))
)
).diff(x)
self.equations["momentum_x_dy"] = (
(
(rho * u).diff(t)
+ (
u * ((rho * u).diff(x))
+ v * ((rho * u).diff(y))
+ w * ((rho * u).diff(z))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u.diff(x)).diff(x)
- (mu * u.diff(y)).diff(y)
- (mu * u.diff(z)).diff(z)
- (mu * (curl).diff(x))
)
).diff(y)
self.equations["momentum_x_dz"] = (
(
(rho * u).diff(t)
+ (
u * ((rho * u).diff(x))
+ v * ((rho * u).diff(y))
+ w * ((rho * u).diff(z))
+ rho * u * (curl)
)
+ p.diff(x)
- (-2 / 3 * mu * (curl)).diff(x)
- (mu * u.diff(x)).diff(x)
- (mu * u.diff(y)).diff(y)
- (mu * u.diff(z)).diff(z)
- (mu * (curl).diff(x))
)
).diff(z)
self.equations["momentum_y_dx"] = (
(
(rho * v).diff(t)
+ (
u * ((rho * v).diff(x))
+ v * ((rho * v).diff(y))
+ w * ((rho * v).diff(z))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v.diff(x)).diff(x)
- (mu * v.diff(y)).diff(y)
- (mu * v.diff(z)).diff(z)
- (mu * (curl).diff(y))
)
).diff(x)
self.equations["momentum_y_dy"] = (
(
(rho * v).diff(t)
+ (
u * ((rho * v).diff(x))
+ v * ((rho * v).diff(y))
+ w * ((rho * v).diff(z))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v.diff(x)).diff(x)
- (mu * v.diff(y)).diff(y)
- (mu * v.diff(z)).diff(z)
- (mu * (curl).diff(y))
)
).diff(y)
self.equations["momentum_y_dz"] = (
(
(rho * v).diff(t)
+ (
u * ((rho * v).diff(x))
+ v * ((rho * v).diff(y))
+ w * ((rho * v).diff(z))
+ rho * v * (curl)
)
+ p.diff(y)
- (-2 / 3 * mu * (curl)).diff(y)
- (mu * v.diff(x)).diff(x)
- (mu * v.diff(y)).diff(y)
- (mu * v.diff(z)).diff(z)
- (mu * (curl).diff(y))
)
).diff(z)
self.equations["momentum_z_dx"] = (
(
(rho * w).diff(t)
+ (
u * ((rho * w).diff(x))
+ v * ((rho * w).diff(y))
+ w * ((rho * w).diff(z))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w.diff(x)).diff(x)
- (mu * w.diff(y)).diff(y)
- (mu * w.diff(z)).diff(z)
- (mu * (curl).diff(z))
)
).diff(x)
self.equations["momentum_z_dy"] = (
(
(rho * w).diff(t)
+ (
u * ((rho * w).diff(x))
+ v * ((rho * w).diff(y))
+ w * ((rho * w).diff(z))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w.diff(x)).diff(x)
- (mu * w.diff(y)).diff(y)
- (mu * w.diff(z)).diff(z)
- (mu * (curl).diff(z))
)
).diff(y)
self.equations["momentum_z_dz"] = (
(
(rho * w).diff(t)
+ (
u * ((rho * w).diff(x))
+ v * ((rho * w).diff(y))
+ w * ((rho * w).diff(z))
+ rho * w * (curl)
)
+ p.diff(z)
- (-2 / 3 * mu * (curl)).diff(z)
- (mu * w.diff(x)).diff(x)
- (mu * w.diff(y)).diff(y)
- (mu * w.diff(z)).diff(z)
- (mu * (curl).diff(z))
)
).diff(z)
if self.dim == 2:
self.equations.pop("momentum_z")
self.equations.pop("continuity_dz")
self.equations.pop("momentum_x_dz")
self.equations.pop("momentum_y_dz")
self.equations.pop("momentum_z_dx")
self.equations.pop("momentum_z_dy")
self.equations.pop("momentum_z_dz")
| modulus-sym-main | examples/annular_ring/annular_ring_gradient_enhanced/pdes/navier_stokes.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
import numpy as np
from sympy import Symbol, Eq, Or, And
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.eq.pdes.navier_stokes import GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.models.layers import Activation
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.models.modified_fourier_net import ModifiedFourierNetArch
@modulus.sym.main(config_path="conf_2d_solid_solid", config_name="config")
def run(cfg: ModulusConfig) -> None:
# add constraints to solver
# simulation params
channel_origin = (-2.5, -0.5)
channel_dim = (5.0, 1.0)
heat_sink_base_origin = (-1.0, -0.5)
heat_sink_base_dim = (1.0, 0.2)
fin_origin = heat_sink_base_origin
fin_dim = (1.0, 0.6)
total_fins = 1
box_origin = (-1.1, -0.5)
box_dim = (1.2, 1.0)
source_origin = (-0.7, -0.5)
source_dim = (0.4, 0.0)
source_length = 0.4
inlet_temp = 25.0
conductivity_I = 0.01
conductivity_II = 100.0
source_grad = 0.025
# make list of nodes to unroll graph on
d_solid_I = Diffusion(T="theta_I", D=1.0, dim=2, time=False)
d_solid_II = Diffusion(T="theta_II", D=1.0, dim=2, time=False)
interface = DiffusionInterface(
"theta_I", "theta_II", conductivity_I, conductivity_II, dim=2, time=False
)
gn_solid_I = GradNormal("theta_I", dim=2, time=False)
gn_solid_II = GradNormal("theta_II", dim=2, time=False)
solid_I_net = ModifiedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("theta_I_star")],
layer_size=128,
frequencies=("gaussian", 0.2, 64),
activation_fn=Activation.TANH,
)
solid_II_net = ModifiedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("theta_II_star")],
layer_size=128,
frequencies=("gaussian", 0.2, 64),
activation_fn=Activation.TANH,
)
nodes = (
d_solid_I.make_nodes()
+ d_solid_II.make_nodes()
+ interface.make_nodes()
+ gn_solid_I.make_nodes()
+ gn_solid_II.make_nodes()
+ [
Node.from_sympy(100 * Symbol("theta_I_star") + 25.0, "theta_I")
] # Normalize the outputs
+ [
Node.from_sympy(Symbol("theta_II_star") + 200.0, "theta_II")
] # Normalize the outputs
+ [solid_I_net.make_node(name="solid_I_network")]
+ [solid_II_net.make_node(name="solid_II_network")]
)
# define sympy variables to parametrize domain curves
x, y = Symbol("x"), Symbol("y")
# define geometry
# channel
channel = Channel2D(
channel_origin,
(channel_origin[0] + channel_dim[0], channel_origin[1] + channel_dim[1]),
)
# heat sink
heat_sink_base = Rectangle(
heat_sink_base_origin,
(
heat_sink_base_origin[0] + heat_sink_base_dim[0], # base of heat sink
heat_sink_base_origin[1] + heat_sink_base_dim[1],
),
)
fin_center = (fin_origin[0] + fin_dim[0] / 2, fin_origin[1] + fin_dim[1] / 2)
fin = Rectangle(
fin_origin, (fin_origin[0] + fin_dim[0], fin_origin[1] + fin_dim[1])
)
chip2d = heat_sink_base + fin
# entire geometry
geo = channel - chip2d
# low and high resultion geo away and near the heat sink
box = Rectangle(
box_origin,
(box_origin[0] + box_dim[0], box_origin[1] + box_dim[1]), # base of heat sink
)
lr_geo = geo - box
hr_geo = geo & box
lr_bounds_x = (channel_origin[0], channel_origin[0] + channel_dim[0])
lr_bounds_y = (channel_origin[1], channel_origin[1] + channel_dim[1])
hr_bounds_x = (box_origin[0], box_origin[0] + box_dim[0])
hr_bounds_y = (box_origin[1], box_origin[1] + box_dim[1])
# inlet and outlet
inlet = Line(
channel_origin, (channel_origin[0], channel_origin[1] + channel_dim[1]), -1
)
outlet = Line(
(channel_origin[0] + channel_dim[0], channel_origin[1]),
(channel_origin[0] + channel_dim[0], channel_origin[1] + channel_dim[1]),
1,
)
# make domain
domain = Domain()
# inlet
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"theta_I": inlet_temp},
lambda_weighting={"theta_I": 10.0},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"normal_gradient_theta_I": 0},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "outlet")
# channel walls insulating
def walls_criteria(invar, params):
sdf = chip2d.sdf(invar, params)
return np.less(sdf["sdf"], -1e-5)
walls = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=channel,
outvar={"normal_gradient_theta_I": 0},
batch_size=cfg.batch_size.walls,
criteria=walls_criteria,
)
domain.add_constraint(walls, "channel_walls")
# solid I interior lr
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=lr_geo,
outvar={"diffusion_theta_I": 0},
batch_size=cfg.batch_size.interior_lr,
lambda_weighting={"diffusion_theta_I": 1.0},
)
domain.add_constraint(interior, "solid_I_interior_lr")
# solid I interior hr
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=hr_geo,
outvar={"diffusion_theta_I": 0},
batch_size=cfg.batch_size.interior_hr,
lambda_weighting={"diffusion_theta_I": 1.0},
)
domain.add_constraint(interior, "solid_I_interior_hr")
# solid II interior
interiorS = PointwiseInteriorConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"diffusion_theta_II": 0},
batch_size=cfg.batch_size.interiorS,
lambda_weighting={"diffusion_theta_II": 1.0},
)
domain.add_constraint(interiorS, "solid_II_interior")
# solid-solid interface
def interface_criteria(invar, params):
sdf = channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
interface = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={
"diffusion_interface_dirichlet_theta_I_theta_II": 0,
"diffusion_interface_neumann_theta_I_theta_II": 0,
},
batch_size=cfg.batch_size.interface,
lambda_weighting={
"diffusion_interface_dirichlet_theta_I_theta_II": 10,
"diffusion_interface_neumann_theta_I_theta_II": 1,
},
criteria=interface_criteria,
)
domain.add_constraint(interface, name="interface")
# heat source
heat_source = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"normal_gradient_theta_II": source_grad},
batch_size=cfg.batch_size.heat_source,
lambda_weighting={"normal_gradient_theta_II": 1000},
criteria=(
Eq(y, source_origin[1])
& (x >= source_origin[0])
& (x <= (source_origin[0] + source_dim[0]))
),
)
domain.add_constraint(heat_source, name="heat_source")
# chip walls
chip_walls = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"normal_gradient_theta_II": 0},
batch_size=cfg.batch_size.chip_walls,
# lambda_weighting={"normal_gradient_theta_II": 1000},
criteria=(
Eq(y, source_origin[1])
& ((x < source_origin[0]) | (x > (source_origin[0] + source_dim[0])))
),
)
domain.add_constraint(chip_walls, name="chip_walls")
# add monitor
monitor = PointwiseMonitor(
chip2d.sample_boundary(10000, criteria=Eq(y, source_origin[1])),
output_names=["theta_II"],
metrics={
"peak_temp": lambda var: torch.max(var["theta_II"]),
},
nodes=nodes,
)
domain.add_monitor(monitor)
# add validation data
file_path = "openfoam/2d_solid_solid_D1.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "Temperature": "theta_I"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_origin[0] # normalize pos
openfoam_var["y"] += channel_origin[1]
openfoam_invar_solid_I_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_solid_I_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_I"]
}
openfoam_validator_solid_I = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_solid_I_numpy,
true_outvar=openfoam_outvar_solid_I_numpy,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_validator_solid_I)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
file_path = "openfoam/2d_solid_solid_D2.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "Temperature": "theta_II"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += channel_origin[0] # normalize pos
openfoam_var["y"] += channel_origin[1]
openfoam_invar_solid_II_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_solid_II_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_II"]
}
openfoam_validator_solid_II = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_solid_II_numpy,
true_outvar=openfoam_outvar_solid_II_numpy,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_validator_solid_II)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/chip_2d/chip_2d_solid_solid_heat_transfer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
import numpy as np
from sympy import Symbol, Eq, tanh, Or, And
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.eq.pdes.navier_stokes import GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.modified_fourier_net import ModifiedFourierNetArch
@modulus.sym.main(config_path="conf_2d_solid_fluid", config_name="config_heat")
def run(cfg: ModulusConfig) -> None:
#############
# Real Params
#############
fluid_kinematic_viscosity = 0.004195088 # m**2/s
fluid_density = 1.1614 # kg/m**3
fluid_specific_heat = 1005 # J/(kg K)
fluid_conductivity = 0.0261 # W/(m K)
# copper params
copper_density = 8930 # kg/m3
copper_specific_heat = 385 # J/(kg K)
copper_conductivity = 385 # W/(m K)
# boundary params
inlet_velocity = 5.24386 # m/s
inlet_temp = 25.0 # C
copper_heat_flux = 51.948051948 # W / m2
################
# Non dim params
################
length_scale = 0.04 # m
time_scale = 0.007627968710072352 # s
mass_scale = 7.43296e-05 # kg
temp_scale = 1.0 # K
velocity_scale = length_scale / time_scale # m/s
pressure_scale = mass_scale / (length_scale * time_scale**2) # kg / (m s**2)
density_scale = mass_scale / length_scale**3 # kg/m3
watt_scale = (mass_scale * length_scale**2) / (time_scale**3) # kg m**2 / s**3
joule_scale = (mass_scale * length_scale**2) / (
time_scale**2
) # kg * m**2 / s**2
##############################
# Nondimensionalization Params
##############################
# fluid params
nd_fluid_kinematic_viscosity = fluid_kinematic_viscosity / (
length_scale**2 / time_scale
)
nd_fluid_density = fluid_density / density_scale
nd_fluid_specific_heat = fluid_specific_heat / (
joule_scale / (mass_scale * temp_scale)
)
nd_fluid_conductivity = fluid_conductivity / (
watt_scale / (length_scale * temp_scale)
)
nd_fluid_diffusivity = nd_fluid_conductivity / (
nd_fluid_specific_heat * nd_fluid_density
)
# copper params
nd_copper_density = copper_density / (mass_scale / length_scale**3)
nd_copper_specific_heat = copper_specific_heat / (
joule_scale / (mass_scale * temp_scale)
)
nd_copper_conductivity = copper_conductivity / (
watt_scale / (length_scale * temp_scale)
)
nd_copper_diffusivity = nd_copper_conductivity / (
nd_copper_specific_heat * nd_copper_density
)
print("nd_copper_diffusivity", nd_copper_diffusivity)
# boundary params
nd_inlet_velocity = inlet_velocity / (length_scale / time_scale)
nd_inlet_temp = inlet_temp / temp_scale
nd_copper_source_grad = copper_heat_flux * length_scale / temp_scale
# make list of nodes to unroll graph on
ad = AdvectionDiffusion(
T="theta_f", rho=nd_fluid_density, D=nd_fluid_diffusivity, dim=2, time=False
)
diff = Diffusion(T="theta_s", D=1.0, dim=2, time=False)
interface = DiffusionInterface(
"theta_f",
"theta_s",
nd_fluid_conductivity,
nd_copper_conductivity,
dim=2,
time=False,
)
gn_theta_f = GradNormal("theta_f", dim=2, time=False)
gn_theta_s = GradNormal("theta_s", dim=2, time=False)
flow_net = FourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
frequencies=("axis", [i / 5.0 for i in range(25)]),
)
solid_heat_net = ModifiedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("theta_s_star")],
layer_size=256,
frequencies=("gaussian", 2, 128),
)
fluid_heat_net = ModifiedFourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("theta_f_star")],
layer_size=256,
frequencies=("gaussian", 2, 128),
)
nodes = (
ad.make_nodes(detach_names=["u", "v"])
+ diff.make_nodes()
+ interface.make_nodes()
+ gn_theta_f.make_nodes()
+ gn_theta_s.make_nodes()
+ [
Node.from_sympy(Symbol("theta_s_star") + 170.0, "theta_s")
] # Normalize the outputs
+ [
Node.from_sympy(Symbol("theta_f_star") + 70.0, "theta_f")
] # Normalize the outputs
+ [flow_net.make_node(name="flow_network", optimize=False)]
+ [solid_heat_net.make_node(name="solid_heat_network")]
+ [fluid_heat_net.make_node(name="fluid_heat_network")]
)
# add constraints to solver
# simulation params
channel_length = (-2.5, 5.0)
channel_width = (-0.5, 0.5)
chip_pos = -1.0
chip_height = 0.6
chip_width = 1.0
source_origin = (-0.7, -0.5)
source_dim = (0.4, 0.0)
source_length = 0.4
# define sympy variables to parametrize domain curves
x, y = Symbol("x"), Symbol("y")
# define geometry
channel = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
rec = Rectangle(
(chip_pos, channel_width[0]),
(chip_pos + chip_width, channel_width[0] + chip_height),
)
chip2d = rec
geo = channel - rec
x_pos = Symbol("x_pos")
integral_line = Line((x_pos, channel_width[0]), (x_pos, channel_width[1]), 1)
x_pos_range = {
x_pos: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(channel_length[0], channel_length[1])
)
}
# make domain
domain = Domain()
# inlet
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"theta_f": nd_inlet_temp},
batch_size=cfg.batch_size.inlet,
lambda_weighting={"theta_f": 100.0},
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# channel walls insulating
def walls_criteria(invar, params):
sdf = chip2d.sdf(invar, params)
return np.less(sdf["sdf"], -1e-5)
walls = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=channel,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.walls,
criteria=walls_criteria,
)
domain.add_constraint(walls, "channel_walls")
# fluid interior lr
interior_lr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.interior_lr,
criteria=Or(x < (chip_pos - 0.25), x > (chip_pos + chip_width + 0.25)),
lambda_weighting={"advection_diffusion_theta_f": 1.0},
)
domain.add_constraint(interior_lr, "fluid_interior_lr")
# fluid interior hr
interior_hr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.interior_hr,
criteria=And(x > (chip_pos - 0.25), x < (chip_pos + chip_width + 0.25)),
lambda_weighting={"advection_diffusion_theta_f": 1.0},
)
domain.add_constraint(interior_hr, "fluid_interior_hr")
# solid interior
interiorS = PointwiseInteriorConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"diffusion_theta_s": 0},
batch_size=cfg.batch_size.interiorS,
lambda_weighting={"diffusion_theta_s": 1.0},
)
domain.add_constraint(interiorS, "solid_interior")
# fluid-solid interface
def interface_criteria(invar, params):
sdf = channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
interface = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={
"diffusion_interface_dirichlet_theta_f_theta_s": 0,
"diffusion_interface_neumann_theta_f_theta_s": 0,
},
batch_size=cfg.batch_size.interface,
lambda_weighting={
"diffusion_interface_dirichlet_theta_f_theta_s": 1,
"diffusion_interface_neumann_theta_f_theta_s": 1e-4,
},
criteria=interface_criteria,
)
domain.add_constraint(interface, name="interface")
heat_source = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"normal_gradient_theta_s": nd_copper_source_grad},
batch_size=cfg.batch_size.heat_source,
lambda_weighting={"normal_gradient_theta_s": 100},
criteria=(
Eq(y, source_origin[1])
& (x >= source_origin[0])
& (x <= (source_origin[0] + source_dim[0]))
),
)
domain.add_constraint(heat_source, name="heat_source")
# chip walls
chip_walls = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=chip2d,
outvar={"normal_gradient_theta_s": 0},
batch_size=cfg.batch_size.chip_walls,
criteria=(
Eq(y, source_origin[1])
& ((x < source_origin[0]) | (x > (source_origin[0] + source_dim[0])))
),
)
domain.add_constraint(chip_walls, name="chip_walls")
# add monitor
monitor = PointwiseMonitor(
chip2d.sample_boundary(10000, criteria=Eq(y, source_origin[1])),
output_names=["theta_s"],
metrics={
"peak_temp": lambda var: torch.max(var["theta_s"]),
},
nodes=nodes,
)
domain.add_monitor(monitor)
# add validation data
file_path = "openfoam/2d_real_cht_fluid.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"x-coordinate": "x",
"y-coordinate": "y",
"x-velocity": "u",
"y-velocity": "v",
"pressure": "p",
"temperature": "theta_f",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] = openfoam_var["x"] / length_scale - 2.5 # normalize pos
openfoam_var["y"] = openfoam_var["y"] / length_scale - 0.5
openfoam_var["p"] = (openfoam_var["p"] + 400.0) / pressure_scale
openfoam_var["u"] = openfoam_var["u"] / velocity_scale
openfoam_var["v"] = openfoam_var["v"] / velocity_scale
openfoam_var["theta_f"] = openfoam_var["theta_f"] - 273.15
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "p", "theta_f"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add solid validation data
file_path = "openfoam/2d_real_cht_solid.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"x-coordinate": "x", "y-coordinate": "y", "temperature": "theta_s"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] = openfoam_var["x"] / length_scale - 2.5 # normalize pos
openfoam_var["y"] = openfoam_var["y"] / length_scale - 0.5
openfoam_var["theta_s"] = openfoam_var["theta_s"] - 273.15
openfoam_solid_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_solid_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_s"]
}
openfoam_solid_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_solid_invar_numpy,
true_outvar=openfoam_solid_outvar_numpy,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_solid_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/chip_2d/chip_2d_solid_fluid_heat_transfer_heat.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import numpy as np
from sympy import Symbol, Eq, And, Or
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.02, rho=1.0, dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# simulation params
channel_length = (-2.5, 2.5)
channel_width = (-0.5, 0.5)
chip_pos = -1.0
chip_height = 0.6
chip_width = 1.0
inlet_vel = 1.5
# define sympy variables to parametrize domain curves
x, y = Symbol("x"), Symbol("y")
# define geometry
channel = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
rec = Rectangle(
(chip_pos, channel_width[0]),
(chip_pos + chip_width, channel_width[0] + chip_height),
)
geo = channel - rec
x_pos = Symbol("x_pos")
integral_line = Line((x_pos, channel_width[0]), (x_pos, channel_width[1]), 1)
x_pos_range = {
x_pos: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(channel_length[0], channel_length[1])
)
}
# make domain
domain = Domain()
# inlet
inlet_parabola = parabola(y, channel_width[0], channel_width[1], inlet_vel)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"u": inlet_parabola, "v": 0},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.no_slip,
)
domain.add_constraint(no_slip, "no_slip")
# interior lr
interior_lr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior_lr,
criteria=Or(x < (chip_pos - 0.25), x > (chip_pos + chip_width + 0.25)),
lambda_weighting={
"continuity": 2 * Symbol("sdf"),
"momentum_x": 2 * Symbol("sdf"),
"momentum_y": 2 * Symbol("sdf"),
},
)
domain.add_constraint(interior_lr, "interior_lr")
# interior hr
interior_hr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior_hr,
criteria=And(x > (chip_pos - 0.25), x < (chip_pos + chip_width + 0.25)),
lambda_weighting={
"continuity": 2 * Symbol("sdf"),
"momentum_x": 2 * Symbol("sdf"),
"momentum_y": 2 * Symbol("sdf"),
},
)
domain.add_constraint(interior_hr, "interior_hr")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=integral_line,
outvar={"normal_dot_vel": 1},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 1},
criteria=integral_criteria,
parameterization=x_pos_range,
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "openfoam/2D_chip_fluid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] -= 2.5 # normalize pos
openfoam_var["y"] -= 0.5
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/chip_2d/chip_2d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import numpy as np
from sympy import Symbol, Eq, And, Or
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.models.fourier_net import FourierNetArch
@modulus.sym.main(config_path="conf_2d_solid_fluid", config_name="config_flow")
def run(cfg: ModulusConfig) -> None:
#############
# Real Params
#############
fluid_kinematic_viscosity = 0.004195088 # m**2/s
fluid_density = 1.1614 # kg/m**3
fluid_specific_heat = 1005 # J/(kg K)
fluid_conductivity = 0.0261 # W/(m K)
# copper params
copper_density = 8930 # kg/m3
copper_specific_heat = 385 # J/(kg K)
copper_conductivity = 385 # W/(m K)
# boundary params
inlet_velocity = 5.24386 # m/s
inlet_temp = 25.0 # C
copper_heat_flux = 51.948051948 # W / m2
################
# Non dim params
################
length_scale = 0.04 # m
time_scale = 0.007627968710072352 # s
mass_scale = 7.43296e-05 # kg
temp_scale = 1.0 # K
velocity_scale = length_scale / time_scale # m/s
pressure_scale = mass_scale / (length_scale * time_scale**2) # kg / (m s**2)
density_scale = mass_scale / length_scale**3 # kg/m3
watt_scale = (mass_scale * length_scale**2) / (time_scale**3) # kg m**2 / s**3
joule_scale = (mass_scale * length_scale**2) / (
time_scale**2
) # kg * m**2 / s**2
##############################
# Nondimensionalization Params
##############################
# fluid params
nd_fluid_kinematic_viscosity = fluid_kinematic_viscosity / (
length_scale**2 / time_scale
)
nd_fluid_density = fluid_density / density_scale
nd_fluid_specific_heat = fluid_specific_heat / (
joule_scale / (mass_scale * temp_scale)
)
nd_fluid_conductivity = fluid_conductivity / (
watt_scale / (length_scale * temp_scale)
)
nd_fluid_diffusivity = nd_fluid_conductivity / (
nd_fluid_specific_heat * nd_fluid_density
)
# copper params
nd_copper_density = copper_density / (mass_scale / length_scale**3)
nd_copper_specific_heat = copper_specific_heat / (
joule_scale / (mass_scale * temp_scale)
)
nd_copper_conductivity = copper_conductivity / (
watt_scale / (length_scale * temp_scale)
)
nd_copper_diffusivity = nd_copper_conductivity / (
nd_copper_specific_heat * nd_copper_density
)
# boundary params
nd_inlet_velocity = inlet_velocity / (length_scale / time_scale)
nd_inlet_temp = inlet_temp / temp_scale
nd_copper_source_grad = copper_heat_flux * length_scale / temp_scale
# make list of nodes to unroll graph on
ns = NavierStokes(
nu=nd_fluid_kinematic_viscosity, rho=nd_fluid_density, dim=2, time=False
)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = FourierNetArch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
frequencies=("axis", [i / 5.0 for i in range(25)]),
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# simulation params
channel_length = (-2.5, 5.0)
channel_width = (-0.5, 0.5)
chip_pos = -1.0
chip_height = 0.6
chip_width = 1.0
# define sympy variables to parametrize domain curves
x, y = Symbol("x"), Symbol("y")
# define geometry
channel = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
rec = Rectangle(
(chip_pos, channel_width[0]),
(chip_pos + chip_width, channel_width[0] + chip_height),
)
geo = channel - rec
x_pos = Symbol("x_pos")
integral_line = Line((x_pos, channel_width[0]), (x_pos, channel_width[1]), 1)
x_pos_range = {
x_pos: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(channel_length[0], channel_length[1])
)
}
# make domain
domain = Domain()
# inlet
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"u": nd_inlet_velocity, "v": 0},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_length[1]),
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.no_slip,
)
domain.add_constraint(no_slip, "no_slip")
# interior lr
interior_lr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior_lr,
criteria=Or(x < (chip_pos - 0.25), x > (chip_pos + chip_width + 0.25)),
lambda_weighting={
"continuity": 2 * Symbol("sdf"),
"momentum_x": 2 * Symbol("sdf"),
"momentum_y": 2 * Symbol("sdf"),
},
)
domain.add_constraint(interior_lr, "interior_lr")
# interior hr
interior_hr = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior_hr,
criteria=And(x > (chip_pos - 0.25), x < (chip_pos + chip_width + 0.25)),
lambda_weighting={
"continuity": 2 * Symbol("sdf"),
"momentum_x": 2 * Symbol("sdf"),
"momentum_y": 2 * Symbol("sdf"),
},
)
domain.add_constraint(interior_hr, "interior_hr")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=integral_line,
outvar={"normal_dot_vel": 1},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 1},
criteria=integral_criteria,
parameterization=x_pos_range,
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "openfoam/2d_real_cht_fluid.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"x-coordinate": "x",
"y-coordinate": "y",
"x-velocity": "u",
"y-velocity": "v",
"pressure": "p",
"temperature": "theta_f",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] = openfoam_var["x"] / length_scale - 2.5 # normalize pos
openfoam_var["y"] = openfoam_var["y"] / length_scale - 0.5
openfoam_var["p"] = (openfoam_var["p"] + 400.0) / pressure_scale
openfoam_var["u"] = openfoam_var["u"] / velocity_scale
openfoam_var["v"] = openfoam_var["v"] / velocity_scale
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/chip_2d/chip_2d_solid_fluid_heat_transfer_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, pi, sin
import os
import warnings
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.wave_equation import HelmholtzEquation
from modulus.sym.utils.io.plotter import ValidatorPlotter
@modulus.sym.main(config_path="conf", config_name="config_ntk")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
wave = HelmholtzEquation(u="u", k=1.0, dim=2)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = wave.make_nodes() + [wave_net.make_node(name="wave_network")]
# add constraints to solver
# make geometry
x, y = Symbol("x"), Symbol("y")
height = 2
width = 2
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make domain
domain = Domain()
# walls
wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0},
batch_size=cfg.batch_size.wall,
lambda_weighting={"u": 1.0},
)
domain.add_constraint(wall, "wall")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"helmholtz": -(
-((pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
- ((4 * pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
+ 1 * sin(pi * x) * sin(4 * pi * y)
)
},
batch_size=cfg.batch_size.interior,
bounds={x: (-width / 2, width / 2), y: (-height / 2, height / 2)},
lambda_weighting={
"helmholtz": 1.0,
},
)
domain.add_constraint(interior, "interior")
# validation data
file_path = "validation/helmholtz.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"x": "x", "y": "y", "z": "u"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/helmholtz/helmholtz_ntk.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
import numpy as np
from sympy import Symbol, pi, sin
from typing import List, Tuple, Dict
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.geometry.adf import ADF
from modulus.sym.eq.pdes.wave_equation import HelmholtzEquation
class HardBC(ADF):
def __init__(self):
super().__init__()
# domain measures
self.domain_height: float = 2.0
self.domain_width: float = 2.0
# boundary conditions (bottom, right, top, left)
self.g: List[float] = [0.0, 0.0, 0.0, 0.0]
# parameters
self.eps: float = 1e-9
self.mu: float = 2.0
self.m: float = 2.0
def forward(self, invar: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Forms the solution anstaz for the Helmholtz example
"""
outvar = {}
x, y = invar["x"], invar["y"]
omega_0 = ADF.line_segment_adf(
(x, y),
(-self.domain_width / 2, -self.domain_height / 2),
(self.domain_width / 2, -self.domain_height / 2),
)
omega_1 = ADF.line_segment_adf(
(x, y),
(self.domain_width / 2, -self.domain_height / 2),
(self.domain_width / 2, self.domain_height / 2),
)
omega_2 = ADF.line_segment_adf(
(x, y),
(self.domain_width / 2, self.domain_height / 2),
(-self.domain_width / 2, self.domain_height / 2),
)
omega_3 = ADF.line_segment_adf(
(x, y),
(-self.domain_width / 2, self.domain_height / 2),
(-self.domain_width / 2, -self.domain_height / 2),
)
omega_E_u = ADF.r_equivalence([omega_0, omega_1, omega_2, omega_3], self.m)
bases = [
omega_0**self.mu,
omega_1**self.mu,
omega_2**self.mu,
omega_3**self.mu,
]
w = [
ADF.transfinite_interpolation(bases, idx, self.eps)
for idx in range(len(self.g))
]
g = w[0] * self.g[0] + w[1] * self.g[1] + w[2] * self.g[2] + w[3] * self.g[3]
outvar["u"] = g + omega_E_u * invar["u_star"]
return outvar
@modulus.sym.main(config_path="conf", config_name="config_hardBC")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
wave = HelmholtzEquation(u="u", k=1.0, dim=2, mixed_form=True)
hard_bc = HardBC()
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u_star"), Key("u_x"), Key("u_y")],
cfg=cfg.arch.fully_connected,
)
nodes = (
wave.make_nodes()
+ [Node(inputs=["x", "y", "u_star"], outputs=["u"], evaluate=hard_bc)]
+ [wave_net.make_node(name="wave_network")]
)
# add constraints to solver
# make geometry
x, y = Symbol("x"), Symbol("y")
height = hard_bc.domain_height
width = hard_bc.domain_width
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make domain
domain = Domain()
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"helmholtz": -(
-((pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
- ((4 * pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
+ 1 * sin(pi * x) * sin(4 * pi * y)
),
"compatibility_u_x": 0,
"compatibility_u_y": 0,
},
batch_size=cfg.batch_size.interior,
bounds={x: (-width / 2, width / 2), y: (-height / 2, height / 2)},
lambda_weighting={
"helmholtz": Symbol("sdf"),
"compatibility_u_x": 0.5,
"compatibility_u_y": 0.5,
},
)
domain.add_constraint(interior, "interior")
# validation data
file_path = "validation/helmholtz.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"x": "x", "y": "y", "z": "u"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/helmholtz/helmholtz_hardBC.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, pi, sin
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.wave_equation import HelmholtzEquation
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
wave = HelmholtzEquation(u="u", k=1.0, dim=2)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = wave.make_nodes() + [wave_net.make_node(name="wave_network")]
# add constraints to solver
# make geometry
x, y = Symbol("x"), Symbol("y")
height = 2
width = 2
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make domain
domain = Domain()
# walls
wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0},
batch_size=cfg.batch_size.wall,
)
domain.add_constraint(wall, "wall")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"helmholtz": -(
-((pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
- ((4 * pi) ** 2) * sin(pi * x) * sin(4 * pi * y)
+ 1 * sin(pi * x) * sin(4 * pi * y)
)
},
batch_size=cfg.batch_size.interior,
bounds={x: (-width / 2, width / 2), y: (-height / 2, height / 2)},
lambda_weighting={
"helmholtz": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior")
# validation data
file_path = "validation/helmholtz.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"x": "x", "y": "y", "z": "u"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/helmholtz/helmholtz.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: run three_fin_flow and Three_fin_thermal in "eval" mode
after training to get the monitor values for different designs.
"""
# import Modulus library
from modulus.sym.utils.io.csv_rw import dict_to_csv
from modulus.sym.hydra import to_absolute_path
# import other libraries
import numpy as np
import os, sys
import csv
# specify the design optimization requirements
max_pressure_drop = 2.5
num_design = 10
path_flow = to_absolute_path("outputs/run_mode=eval/three_fin_flow")
path_thermal = to_absolute_path("outputs/run_mode=eval/three_fin_thermal")
invar_mapping = [
"fin_height_middle",
"fin_height_sides",
"fin_length_middle",
"fin_length_sides",
"fin_thickness_middle",
"fin_thickness_sides",
]
outvar_mapping = ["pressure_drop", "peak_temp"]
# read the monitor files, and perform a design space search
def DesignOpt(
path_flow,
path_thermal,
num_design,
max_pressure_drop,
invar_mapping,
outvar_mapping,
):
path_flow += "/monitors"
path_thermal += "/monitors"
directory = os.path.join(os.getcwd(), path_flow)
sys.path.append(path_flow)
values, configs = [], []
for _, _, files in os.walk(directory):
for file in files:
if file.startswith("back_pressure") & file.endswith(".csv"):
value = []
configs.append(file[13:-4])
# read back pressure
with open(os.path.join(path_flow, file), "r") as datafile:
data = []
reader = csv.reader(datafile, delimiter=",")
for row in reader:
columns = [row[1]]
data.append(columns)
last_row = float(data[-1][0])
value.append(last_row)
# read front pressure
with open(
os.path.join(path_flow, "front_pressure" + file[13:]), "r"
) as datafile:
reader = csv.reader(datafile, delimiter=",")
data = []
for row in reader:
columns = [row[1]]
data.append(columns)
last_row = float(data[-1][0])
value.append(last_row)
# read temperature
with open(
os.path.join(path_thermal, "peak_temp" + file[13:]), "r"
) as datafile:
data = []
reader = csv.reader(datafile, delimiter=",")
for row in reader:
columns = [row[1]]
data.append(columns)
last_row = float(data[-1][0])
value.append(last_row)
values.append(value)
# perform the design optimization
values = np.array(
[
[values[i][1] - values[i][0], values[i][2] * 273.15]
for i in range(len(values))
]
)
indices = np.where(values[:, 0] < max_pressure_drop)[0]
values = values[indices]
configs = [configs[i] for i in indices]
opt_design_index = values[:, 1].argsort()[0:num_design]
opt_design_values = values[opt_design_index]
opt_design_configs = [configs[i] for i in opt_design_index]
# Save to a csv file
opt_design_configs = np.array(
[
np.array(opt_design_configs[i][1:].split("_")).astype(float)
for i in range(num_design)
]
)
opt_design_configs_dict = {
key: value.reshape(-1, 1)
for (key, value) in zip(invar_mapping, opt_design_configs.T)
}
opt_design_values_dict = {
key: value.reshape(-1, 1)
for (key, value) in zip(outvar_mapping, opt_design_values.T)
}
opt_design = {**opt_design_configs_dict, **opt_design_values_dict}
dict_to_csv(opt_design, "optimal_design")
print("Finished design optimization!")
if __name__ == "__main__":
DesignOpt(
path_flow,
path_thermal,
num_design,
max_pressure_drop,
invar_mapping,
outvar_mapping,
)
| modulus-sym-main | examples/three_fin_3d/three_fin_design.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
from sympy import Symbol, Eq, Abs, tanh
import numpy as np
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.geometry import Parameterization
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.key import Key
from modulus.sym.node import Node
# define sympy varaibles to parametize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
x_pos = Symbol("x_pos")
# parametric variation
fin_height_m, fin_height_s = Symbol("fin_height_m"), Symbol("fin_height_s")
fin_length_m, fin_length_s = Symbol("fin_length_m"), Symbol("fin_length_s")
fin_thickness_m, fin_thickness_s = Symbol("fin_thickness_m"), Symbol("fin_thickness_s")
height_m_range = (0.0, 0.6)
height_s_range = (0.0, 0.6)
length_m_range = (0.5, 1.0)
length_s_range = (0.5, 1.0)
thickness_m_range = (0.05, 0.15)
thickness_s_range = (0.05, 0.15)
param_ranges = {
fin_height_m: height_m_range,
fin_height_s: height_s_range,
fin_length_m: length_m_range,
fin_length_s: length_s_range,
fin_thickness_m: thickness_m_range,
fin_thickness_s: thickness_s_range,
}
fixed_param_ranges = {
fin_height_m: 0.4,
fin_height_s: 0.4,
fin_length_m: 1.0,
fin_length_s: 1.0,
fin_thickness_m: 0.1,
fin_thickness_s: 0.1,
}
# geometry params for domain
channel_origin = (-2.5, -0.5, -0.5)
channel_dim = (5.0, 1.0, 1.0)
heat_sink_base_origin = (-1.0, -0.5, -0.3)
heat_sink_base_dim = (1.0, 0.2, 0.6)
fin_origin = (heat_sink_base_origin[0] + 0.5 - fin_length_s / 2, -0.3, -0.3)
fin_dim = (fin_length_s, fin_height_s, fin_thickness_s) # two side fins
total_fins = 2 # two side fins
flow_box_origin = (-1.1, -0.5, -0.5)
flow_box_dim = (1.6, 1.0, 1.0)
source_origin = (-0.7, -0.5, -0.1)
source_dim = (0.4, 0.0, 0.2)
source_area = 0.08
# define geometry
class ThreeFin(object):
def __init__(self, parameterized: bool = False):
# set param ranges
if parameterized:
pr = Parameterization(param_ranges)
self.pr = param_ranges
else:
pr = Parameterization(fixed_param_ranges)
self.pr = fixed_param_ranges
# channel
self.channel = Channel(
channel_origin,
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
parameterization=pr,
)
# three fin heat sink
heat_sink_base = Box(
heat_sink_base_origin,
(
heat_sink_base_origin[0] + heat_sink_base_dim[0], # base of heat sink
heat_sink_base_origin[1] + heat_sink_base_dim[1],
heat_sink_base_origin[2] + heat_sink_base_dim[2],
),
parameterization=pr,
)
fin_center = (
fin_origin[0] + fin_dim[0] / 2,
fin_origin[1] + fin_dim[1] / 2,
fin_origin[2] + fin_dim[2] / 2,
)
fin = Box(
fin_origin,
(
fin_origin[0] + fin_dim[0],
fin_origin[1] + fin_dim[1],
fin_origin[2] + fin_dim[2],
),
parameterization=pr,
)
gap = (heat_sink_base_dim[2] - fin_dim[2]) / (
total_fins - 1
) # gap between fins
fin_2 = fin.translate([0, 0, gap])
fin = fin + fin_2
three_fin = heat_sink_base + fin
# parameterized center fin
center_fin_origin = (
heat_sink_base_origin[0] + 0.5 - fin_length_m / 2,
fin_origin[1],
-fin_thickness_m / 2,
)
center_fin_dim = (fin_length_m, fin_height_m, fin_thickness_m)
center_fin = Box(
center_fin_origin,
(
center_fin_origin[0] + center_fin_dim[0],
center_fin_origin[1] + center_fin_dim[1],
center_fin_origin[2] + center_fin_dim[2],
),
parameterization=pr,
)
self.three_fin = three_fin + center_fin
# entire geometry
self.geo = self.channel - self.three_fin
# low and high resultion geo away and near the heat sink
flow_box = Box(
flow_box_origin,
(
flow_box_origin[0] + flow_box_dim[0], # base of heat sink
flow_box_origin[1] + flow_box_dim[1],
flow_box_origin[2] + flow_box_dim[2],
),
)
self.lr_geo = self.geo - flow_box
self.hr_geo = self.geo & flow_box
lr_bounds_x = (channel_origin[0], channel_origin[0] + channel_dim[0])
lr_bounds_y = (channel_origin[1], channel_origin[1] + channel_dim[1])
lr_bounds_z = (channel_origin[2], channel_origin[2] + channel_dim[2])
self.lr_bounds = {x: lr_bounds_x, y: lr_bounds_y, z: lr_bounds_z}
hr_bounds_x = (flow_box_origin[0], flow_box_origin[0] + flow_box_dim[0])
hr_bounds_y = (flow_box_origin[1], flow_box_origin[1] + flow_box_dim[1])
hr_bounds_z = (flow_box_origin[2], flow_box_origin[2] + flow_box_dim[2])
self.hr_bounds = {x: hr_bounds_x, y: hr_bounds_y, z: hr_bounds_z}
# inlet and outlet
self.inlet = Plane(
channel_origin,
(
channel_origin[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
-1,
parameterization=pr,
)
self.outlet = Plane(
(channel_origin[0] + channel_dim[0], channel_origin[1], channel_origin[2]),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
1,
parameterization=pr,
)
# planes for integral continuity
self.integral_plane = Plane(
(x_pos, channel_origin[1], channel_origin[2]),
(
x_pos,
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
1,
)
| modulus-sym-main | examples/three_fin_3d/three_fin_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
from torch.utils.data import DataLoader, Dataset
from sympy import Symbol, Eq, Abs, tanh, Or, And
import numpy as np
import itertools
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.models.fully_connected import FullyConnectedArch
from three_fin_geometry import *
@modulus.sym.main(config_path="conf", config_name="conf_flow")
def run(cfg: ModulusConfig) -> None:
# make navier stokes equations
if cfg.custom.turbulent:
ze = ZeroEquation(nu=0.002, dim=3, time=False, max_distance=0.5)
ns = NavierStokes(nu=ze.equations["nu"], rho=1.0, dim=3, time=False)
navier_stokes_nodes = ns.make_nodes() + ze.make_nodes()
else:
ns = NavierStokes(nu=0.01, rho=1.0, dim=3, time=False)
navier_stokes_nodes = ns.make_nodes()
normal_dot_vel = NormalDotVec()
# make network arch
if cfg.custom.parameterized:
input_keys = [
Key("x"),
Key("y"),
Key("z"),
Key("fin_height_m"),
Key("fin_height_s"),
Key("fin_length_m"),
Key("fin_length_s"),
Key("fin_thickness_m"),
Key("fin_thickness_s"),
]
else:
input_keys = [Key("x"), Key("y"), Key("z")]
flow_net = FullyConnectedArch(
input_keys=input_keys, output_keys=[Key("u"), Key("v"), Key("w"), Key("p")]
)
# make list of nodes to unroll graph on
flow_nodes = (
navier_stokes_nodes
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
geo = ThreeFin(parameterized=cfg.custom.parameterized)
# params for simulation
# fluid params
inlet_vel = 1.0
volumetric_flow = 1.0
# make flow domain
flow_domain = Domain()
# inlet
u_profile = inlet_vel * tanh((0.5 - Abs(y)) / 0.02) * tanh((0.5 - Abs(z)) / 0.02)
constraint_inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo.inlet,
outvar={"u": u_profile, "v": 0, "w": 0},
batch_size=cfg.batch_size.Inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={
"u": 1.0,
"v": 1.0,
"w": 1.0,
}, # weight zero on edges
parameterization=geo.pr,
batch_per_epoch=5000,
)
flow_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo.outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.Outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
lambda_weighting={"p": 1.0},
parameterization=geo.pr,
batch_per_epoch=5000,
)
flow_domain.add_constraint(constraint_outlet, "outlet")
# no slip for channel walls
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo.geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.NoSlip,
lambda_weighting={
"u": 1.0,
"v": 1.0,
"w": 1.0,
}, # weight zero on edges
parameterization=geo.pr,
batch_per_epoch=5000,
)
flow_domain.add_constraint(no_slip, "no_slip")
# flow interior low res away from three fin
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.InteriorLR,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
compute_sdf_derivatives=True,
parameterization=geo.pr,
batch_per_epoch=5000,
criteria=Or(x < -1.1, x > 0.5),
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interiror high res near three fin
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_z": 0, "momentum_y": 0},
batch_size=cfg.batch_size.InteriorLR,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
compute_sdf_derivatives=True,
parameterization=geo.pr,
batch_per_epoch=5000,
criteria=And(x > -1.1, x < 0.5),
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=geo.integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=5,
integral_batch_size=cfg.batch_size.IntegralContinuity,
criteria=integral_criteria,
lambda_weighting={"normal_dot_vel": 1.0},
parameterization={**geo.pr, **{x_pos: (-1.1, 0.1)}},
fixed_dataset=False,
num_workers=4,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# flow data
file_path = "../openfoam/"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
}
if cfg.custom.turbulent:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_zeroEq_re500_fluid.csv"),
mapping,
)
else:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_fluid0.csv"), mapping
)
openfoam_var = {key: value[0::4] for key, value in openfoam_var.items()}
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_var.update({"fin_height_m": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_height_s": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_thickness_m": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_thickness_s": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_length_m": np.full_like(openfoam_var["x"], 1.0)})
openfoam_var.update({"fin_length_s": np.full_like(openfoam_var["x"], 1.0)})
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key
in [
"x",
"y",
"z",
"fin_height_m",
"fin_height_s",
"fin_thickness_m",
"fin_thickness_s",
"fin_length_m",
"fin_length_s",
]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=flow_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
flow_domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add pressure monitor
invar_inlet_pressure = geo.integral_plane.sample_boundary(
1024, parameterization={**fixed_param_ranges, **{x_pos: -2}}
)
pressure_monitor = PointwiseMonitor(
invar_inlet_pressure,
output_names=["p"],
metrics={"inlet_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
# add pressure drop for design optimization
# run only for parameterized cases and in eval mode
if cfg.custom.parameterized and cfg.run_mode == "eval":
# define candidate designs
num_samples = cfg.custom.num_samples
inference_param_tuple = itertools.product(
np.linspace(*height_m_range, num_samples),
np.linspace(*height_s_range, num_samples),
np.linspace(*length_m_range, num_samples),
np.linspace(*length_s_range, num_samples),
np.linspace(*thickness_m_range, num_samples),
np.linspace(*thickness_s_range, num_samples),
)
for (
HS_height_m_,
HS_height_s_,
HS_length_m_,
HS_length_s_,
HS_thickness_m_,
HS_thickness_s_,
) in inference_param_tuple:
HS_height_m = float(HS_height_m_)
HS_height_s = float(HS_height_s_)
HS_length_m = float(HS_length_m_)
HS_length_s = float(HS_length_s_)
HS_thickness_m = float(HS_thickness_m_)
HS_thickness_s = float(HS_thickness_s_)
specific_param_ranges = {
fin_height_m: HS_height_m,
fin_height_s: HS_height_s,
fin_length_m: HS_length_m,
fin_length_s: HS_length_s,
fin_thickness_m: HS_thickness_m,
fin_thickness_s: HS_thickness_s,
}
# add metrics for front pressure
plane_param_ranges = {
**specific_param_ranges,
**{x_pos: heat_sink_base_origin[0] - heat_sink_base_dim[0]},
}
metric = (
"front_pressure"
+ str(HS_height_m)
+ "_"
+ str(HS_height_s)
+ "_"
+ str(HS_length_m)
+ "_"
+ str(HS_length_s)
+ "_"
+ str(HS_thickness_m)
+ "_"
+ str(HS_thickness_s)
)
invar_pressure = geo.integral_plane.sample_boundary(
1024,
parameterization=plane_param_ranges,
)
front_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={metric: lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(front_pressure_monitor)
# add metrics for back pressure
plane_param_ranges = {
**specific_param_ranges,
**{x_pos: heat_sink_base_origin[0] + 2 * heat_sink_base_dim[0]},
}
metric = (
"back_pressure"
+ str(HS_height_m)
+ "_"
+ str(HS_height_s)
+ "_"
+ str(HS_length_m)
+ "_"
+ str(HS_length_s)
+ "_"
+ str(HS_thickness_m)
+ "_"
+ str(HS_thickness_s)
)
invar_pressure = geo.integral_plane.sample_boundary(
1024,
parameterization=plane_param_ranges,
)
back_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={metric: lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(back_pressure_monitor)
# make solver
flow_slv = Solver(cfg, flow_domain)
# start flow solver
flow_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/three_fin_3d/three_fin_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
from sympy import Symbol, Eq, Abs, tanh, Or, And
import itertools
import numpy as np
import modulus.sym
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.hydra import to_absolute_path, instantiate_arch
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from three_fin_geometry import *
@modulus.sym.main(config_path="conf", config_name="conf_thermal")
def run(cfg: ModulusConfig) -> None:
# make thermal equations
ad = AdvectionDiffusion(T="theta_f", rho=1.0, D=0.02, dim=3, time=False)
dif = Diffusion(T="theta_s", D=0.0625, dim=3, time=False)
dif_inteface = DiffusionInterface("theta_f", "theta_s", 1.0, 5.0, dim=3, time=False)
f_grad = GradNormal("theta_f", dim=3, time=False)
s_grad = GradNormal("theta_s", dim=3, time=False)
# make network arch
if cfg.custom.parameterized:
input_keys = [
Key("x"),
Key("y"),
Key("z"),
Key("fin_height_m"),
Key("fin_height_s"),
Key("fin_length_m"),
Key("fin_length_s"),
Key("fin_thickness_m"),
Key("fin_thickness_s"),
]
else:
input_keys = [Key("x"), Key("y"), Key("z")]
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
)
thermal_f_net = FullyConnectedArch(
input_keys=input_keys, output_keys=[Key("theta_f")]
)
thermal_s_net = FullyConnectedArch(
input_keys=input_keys, output_keys=[Key("theta_s")]
)
# make list of nodes to unroll graph on
thermal_nodes = (
ad.make_nodes()
+ dif.make_nodes()
+ dif_inteface.make_nodes()
+ f_grad.make_nodes()
+ s_grad.make_nodes()
+ [flow_net.make_node(name="flow_network", optimize=False)]
+ [thermal_f_net.make_node(name="thermal_f_network")]
+ [thermal_s_net.make_node(name="thermal_s_network")]
)
geo = ThreeFin(parameterized=cfg.custom.parameterized)
# params for simulation
# heat params
inlet_t = 293.15 / 273.15 - 1.0
grad_t = 360 / 273.15
# make flow domain
thermal_domain = Domain()
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo.inlet,
outvar={"theta_f": inlet_t},
batch_size=cfg.batch_size.Inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={"theta_f": 1.0}, # weight zero on edges
parameterization=geo.pr,
)
thermal_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo.outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.Outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
lambda_weighting={"normal_gradient_theta_f": 1.0}, # weight zero on edges
parameterization=geo.pr,
)
thermal_domain.add_constraint(constraint_outlet, "outlet")
# channel walls insulating
def wall_criteria(invar, params):
sdf = geo.three_fin.sdf(invar, params)
return np.less(sdf["sdf"], -1e-5)
channel_walls = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo.channel,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.ChannelWalls,
criteria=wall_criteria,
lambda_weighting={"normal_gradient_theta_f": 1.0},
parameterization=geo.pr,
)
thermal_domain.add_constraint(channel_walls, "channel_walls")
# fluid solid interface
def interface_criteria(invar, params):
sdf = geo.channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
fluid_solid_interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo.three_fin,
outvar={
"diffusion_interface_dirichlet_theta_f_theta_s": 0,
"diffusion_interface_neumann_theta_f_theta_s": 0,
},
batch_size=cfg.batch_size.SolidInterface,
criteria=interface_criteria,
parameterization=geo.pr,
)
thermal_domain.add_constraint(fluid_solid_interface, "fluid_solid_interface")
# heat source
sharpen_tanh = 60.0
source_func_xl = (tanh(sharpen_tanh * (x - source_origin[0])) + 1.0) / 2.0
source_func_xh = (
tanh(sharpen_tanh * ((source_origin[0] + source_dim[0]) - x)) + 1.0
) / 2.0
source_func_zl = (tanh(sharpen_tanh * (z - source_origin[2])) + 1.0) / 2.0
source_func_zh = (
tanh(sharpen_tanh * ((source_origin[2] + source_dim[2]) - z)) + 1.0
) / 2.0
gradient_normal = (
grad_t * source_func_xl * source_func_xh * source_func_zl * source_func_zh
)
heat_source = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo.three_fin,
outvar={"normal_gradient_theta_s": gradient_normal},
batch_size=cfg.batch_size.HeatSource,
criteria=Eq(y, source_origin[1]),
)
thermal_domain.add_constraint(heat_source, "heat_source")
# flow interior low res away from three fin
lr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo.geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.InteriorLR,
criteria=Or(x < -1.1, x > 0.5),
)
thermal_domain.add_constraint(lr_flow_interior, "lr_flow_interior")
# flow interiror high res near three fin
hr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo.geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.InteriorHR,
criteria=And(x > -1.1, x < 0.5),
)
thermal_domain.add_constraint(hr_flow_interior, "hr_flow_interior")
# solid interior
solid_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo.three_fin,
outvar={"diffusion_theta_s": 0},
batch_size=cfg.batch_size.SolidInterior,
lambda_weighting={"diffusion_theta_s": 100.0},
)
thermal_domain.add_constraint(solid_interior, "solid_interior")
# flow validation data
file_path = "../openfoam/"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
"T": "theta_f",
}
if cfg.custom.turbulent:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_zeroEq_re500_fluid.csv"),
mapping,
)
else:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_fluid0.csv"), mapping
)
openfoam_var["theta_f"] = (
openfoam_var["theta_f"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_var.update({"fin_height_m": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_height_s": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_thickness_m": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_thickness_s": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_length_m": np.full_like(openfoam_var["x"], 1.0)})
openfoam_var.update({"fin_length_s": np.full_like(openfoam_var["x"], 1.0)})
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key
in [
"x",
"y",
"z",
"fin_height_m",
"fin_height_s",
"fin_thickness_m",
"fin_thickness_s",
"fin_length_m",
"fin_length_s",
]
}
openfoam_flow_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_thermal_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p", "theta_f"]
}
openfoam_flow_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_thermal_outvar_numpy,
)
thermal_domain.add_validator(
openfoam_flow_validator,
"thermal_flow_data",
)
# solid data
mapping = {"Points:0": "x", "Points:1": "y", "Points:2": "z", "T": "theta_s"}
if cfg.custom.turbulent:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_zeroEq_re500_solid.csv"),
mapping,
)
else:
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/threeFin_extend_solid0.csv"), mapping
)
openfoam_var["theta_s"] = (
openfoam_var["theta_s"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_var.update({"fin_height_m": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_height_s": np.full_like(openfoam_var["x"], 0.4)})
openfoam_var.update({"fin_thickness_m": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_thickness_s": np.full_like(openfoam_var["x"], 0.1)})
openfoam_var.update({"fin_length_m": np.full_like(openfoam_var["x"], 1.0)})
openfoam_var.update({"fin_length_s": np.full_like(openfoam_var["x"], 1.0)})
openfoam_invar_solid_numpy = {
key: value
for key, value in openfoam_var.items()
if key
in [
"x",
"y",
"z",
"fin_height_m",
"fin_height_s",
"fin_thickness_m",
"fin_thickness_s",
"fin_length_m",
"fin_length_s",
]
}
openfoam_outvar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_s"]
}
openfoam_solid_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_solid_numpy,
true_outvar=openfoam_thermal_outvar_numpy,
)
thermal_domain.add_validator(
openfoam_solid_validator,
"thermal_solid_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add peak temp monitors for design optimization
# run only for parameterized cases and in eval mode
if cfg.custom.parameterized and cfg.run_mode == "eval":
# define candidate designs
num_samples = cfg.custom.num_samples
inference_param_tuple = itertools.product(
np.linspace(*height_m_range, num_samples),
np.linspace(*height_s_range, num_samples),
np.linspace(*length_m_range, num_samples),
np.linspace(*length_s_range, num_samples),
np.linspace(*thickness_m_range, num_samples),
np.linspace(*thickness_s_range, num_samples),
)
for (
HS_height_m_,
HS_height_s_,
HS_length_m_,
HS_length_s_,
HS_thickness_m_,
HS_thickness_s_,
) in inference_param_tuple:
HS_height_m = float(HS_height_m_)
HS_height_s = float(HS_height_s_)
HS_length_m = float(HS_length_m_)
HS_length_s = float(HS_length_s_)
HS_thickness_m = float(HS_thickness_m_)
HS_thickness_s = float(HS_thickness_s_)
specific_param_ranges = {
fin_height_m: HS_height_m,
fin_height_s: HS_height_s,
fin_length_m: HS_length_m,
fin_length_s: HS_length_s,
fin_thickness_m: HS_thickness_m,
fin_thickness_s: HS_thickness_s,
}
# add metrics for peak temperature
plane_param_ranges = {**specific_param_ranges}
metric = (
"peak_temp"
+ str(HS_height_m)
+ "_"
+ str(HS_height_s)
+ "_"
+ str(HS_length_m)
+ "_"
+ str(HS_length_s)
+ "_"
+ str(HS_thickness_m)
+ "_"
+ str(HS_thickness_s)
)
invar_temp = geo.three_fin.sample_boundary(
5000,
criteria=Eq(y, source_origin[1]),
parameterization=plane_param_ranges,
)
peak_temp_monitor = PointwiseMonitor(
invar_temp,
output_names=["theta_s"],
metrics={metric: lambda var: torch.max(var["theta_s"])},
nodes=thermal_nodes,
)
thermal_domain.add_monitor(peak_temp_monitor)
# make solver
thermal_slv = Solver(cfg, thermal_domain)
# start thermal solver
thermal_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/three_fin_3d/three_fin_thermal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import numpy as np
from tensorboard.backend.event_processing import event_accumulator
import matplotlib.pyplot as plt
def read_tensorboard_eventfiles(path):
assert len(glob.glob(path + "*events*")) != 0, "No event files found"
if len(glob.glob(path + "*events*")) > 1:
# print("Found more than one event files")
events_data = []
run_time = []
for i in glob.glob(path + "*events*"):
ea = event_accumulator.EventAccumulator(
i, size_guidance={event_accumulator.TENSORS: 0}
)
ea.Reload()
tensors_list = ea.Tags()["tensors"]
tensors_list.remove("config/text_summary")
event_data = {}
for t in tensors_list:
event_data[t] = tf_log_to_np(ea.Tensors(t))
if (
len(event_data["Train/learning_rate"]["w_time"]) != 1
): # skip the eventfiles that have only one entry in the runtime computation
run_time.append(
event_data["Train/learning_rate"]["w_time"][-1]
- event_data["Train/learning_rate"]["w_time"][0]
)
events_data.append(event_data)
run_time = sum(run_time)
data = {}
for t in events_data[0].keys():
combined_data = {}
for k in events_data[0][t].keys():
combined_data[k] = np.concatenate(
[events_data[i][t][k] for i in range(len(events_data))]
)
data[t] = combined_data
# sort the data
idx = np.argsort(data["Train/learning_rate"]["step"])
for t in data.keys():
for k in data[t].keys():
data[t][k] = data[t][k][idx]
else:
data = {}
# print("Found only one event file")
ea = event_accumulator.EventAccumulator(
glob.glob(path + "*events*")[0],
size_guidance={event_accumulator.TENSORS: 0},
)
ea.Reload()
tensors_list = ea.Tags()["tensors"]
tensors_list.remove("config/text_summary")
for t in tensors_list:
data[t] = tf_log_to_np(ea.Tensors(t))
if len(data["Train/learning_rate"]["w_time"]) != 1:
run_time = (
data["Train/learning_rate"]["w_time"][-1]
- data["Train/learning_rate"]["w_time"][0]
)
return data, run_time
def tf_log_to_np(tensor_list):
w_time, step, val = zip(*tensor_list)
val_floats = []
for i in range(len(val)):
val_floats.append(val[i].float_val)
np_data = {
"w_time": np.array(w_time),
"step": np.array(step),
"value": np.array(val_floats).flatten(),
}
return np_data
def print_final_results(np_log_data, run_time):
for key in np_log_data.keys():
print(
str(key)
+ " at step "
+ str(np_log_data[key]["step"][-1])
+ " is "
+ str(np_log_data[key]["value"][-1])
)
print("Total runtime minutes: " + str(run_time / 60))
def save_final_results(np_log_data, path):
for key in np_log_data.keys():
np.savetxt(
path + key[key.rfind("/") + 1 :] + ".csv",
np.concatenate(
(
np.reshape(np_log_data[key]["w_time"], (1, -1)),
np.reshape(np_log_data[key]["step"], (1, -1)),
np.reshape(np_log_data[key]["value"], (1, -1)),
),
axis=0,
).T,
delimiter=",",
header="w_time, step, value",
comments="",
)
def plot_results(np_log_data, save_path):
keys_list = list(np_log_data.keys())
train_keys = [key for key in keys_list if "Train" in key]
validator_keys = [key for key in keys_list if "Validators" in key]
train_keys.insert(0, train_keys.pop(train_keys.index("Train/learning_rate")))
train_keys.insert(1, train_keys.pop(train_keys.index("Train/loss_aggregated")))
ordered_keys_list = train_keys + validator_keys
fig, axs = plt.subplots(
len(ordered_keys_list), figsize=(4, 3 * len(ordered_keys_list))
)
for i in range(len(ordered_keys_list)):
axs[i].plot(
np_log_data[ordered_keys_list[i]]["step"],
np_log_data[ordered_keys_list[i]]["value"],
)
axs[i].set_yscale("log")
axs[i].set_title(ordered_keys_list[i])
plt.tight_layout()
plt.savefig(save_path + "/train_plots")
def check_validation_error(path, threshold, save_path):
os.makedirs(save_path, exist_ok=True)
np_log_data, run_time = read_tensorboard_eventfiles(path)
for key in np_log_data.keys():
if "Validators" in key:
assert (
np_log_data[key]["value"][-1] < threshold
), "Validation error for {} is not below the specified threshold of {}".format(
key, threshold
)
plot_results(np_log_data, save_path)
print_final_results(np_log_data, run_time)
save_final_results(np_log_data, save_path)
| modulus-sym-main | examples/test/tflogs_reader.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
from termcolor import colored
from collections import Counter
import itertools
from process import Process
class Que:
def __init__(
self,
available_gpus=(0, 1),
print_type="que",
exit_on_fail=True,
loop_pause=10.0,
clear_prints=False,
):
self.pl = []
self.pl_errors = [] # Holds failed processes
self.running_pl = []
self.available_gpus = list(available_gpus)
self.start_time = 0
self.print_type = print_type
self.exit_on_fail = exit_on_fail
self.loop_pause = loop_pause
self.clear_prints = clear_prints
def enque_experiments(self, cmd, params, cwd="./"):
cross_params = itertools.product(
*[
["--" + key + "=" + str(value) for value in params[key]]
for key in params.keys()
]
)
cmds = []
cwds = []
for param in cross_params:
cmds.append(cmd + " " + " ".join(param))
cwds.append(cwd)
self.enque_cmds(cmds, cwds)
def enque_cmds(self, name, cmds, cwds):
if type(cmds) is str:
cmds = [cmds]
cwds = [cwds]
random.shuffle(cmds)
for cmd, cwd in zip(cmds, cwds):
self.pl.append(Process(name, cmd, cwd=cwd))
def start_next(self, gpu):
for i in range(len(self.pl)):
if self.pl[i].get_status() == "Not Started":
print(colored(f"Starting job: {self.pl[i].name}", "yellow"))
self.pl[i].start(gpu)
break
def find_free_gpu(self):
used_gpus = []
for i in range(len(self.pl)):
if self.pl[i].get_status() == "Running":
used_gpus.append(self.pl[i].get_gpu())
free_gpus = list(Counter(self.available_gpus) - Counter(used_gpus))
return free_gpus
def num_finished_processes(self):
proc = 0
for i in range(len(self.pl)):
if (
self.pl[i].get_status() == "Finished"
and self.pl[i].get_return_status() == "SUCCESS"
):
proc += 1
return proc
def num_failed_processes(self):
proc = 0
for i in range(len(self.pl)):
if (
self.pl[i].get_status() == "Finished"
and self.pl[i].get_return_status() == "FAIL"
):
proc += 1
return proc
def num_running_processes(self):
proc = 0
for i in range(len(self.pl)):
if self.pl[i].get_status() == "Running":
proc += 1
return proc
def num_unstarted_processes(self):
proc = 0
for i in range(len(self.pl)):
if self.pl[i].get_status() == "Not Started":
proc += 1
return proc
def percent_complete(self):
rc = 0.0
if self.num_finished_processes() > 0:
rc = self.num_finished_processes() / float(len(self.pl))
return rc * 100.0
def run_time(self):
return time.time() - self.start_time
def time_left(self):
tl = -1
pc = self.percent_complete()
if pc > 0:
tl = (time.time() - self.start_time) * (
1.0 / (pc / 100.0)
) - self.run_time()
return tl
def time_string(self, tl):
tl = max([0, tl])
seconds = int(tl % 60)
tl = (tl - seconds) / 60
mins = int(tl % 60)
tl = (tl - mins) / 60
hours = int(tl % 24)
days = int((tl - hours) / 24)
return (
"("
+ str(days).zfill(3)
+ ":"
+ str(hours).zfill(2)
+ ":"
+ str(mins).zfill(2)
+ ":"
+ str(seconds).zfill(2)
+ ")"
)
def update_pl_status(self):
for i in range(len(self.pl)):
self.pl[i].update_status()
def print_que_status(self):
if self.clear_prints:
os.system("clear")
print("QUE STATUS")
print(
colored(
"Num Finished Success: " + str(self.num_finished_processes()), "green"
)
)
print(
colored("Num Finished Fail: " + str(self.num_failed_processes()), "red")
)
print(
colored(
"Num Running: " + str(self.num_running_processes()), "yellow"
)
)
print(
colored(
"Num Left: " + str(self.num_unstarted_processes()), "blue"
)
)
print(
colored(
"Percent Complete: {0:.1f}%".format(self.percent_complete()), "blue"
)
)
print(
colored(
"Time Left (D:H:M:S): " + self.time_string(self.time_left()), "blue"
)
)
print(
colored(
"Run Time (D:H:M:S): " + self.time_string(self.run_time()), "blue"
)
)
for p in self.pl:
if p.return_status == "FAIL" and p not in self.pl_errors:
p.print_info()
self.pl_errors.append(p)
def start_que_runner(self):
self.start_time = time.time()
while True:
# enqueu experiments
free_gpus = self.find_free_gpu()
for gpu in free_gpus:
self.start_next(gpu)
# print status
self.update_pl_status()
if self.print_type == "que":
self.print_que_status()
elif self.print_type == "process":
if self.clear_prints:
os.system("clear")
for p in self.pl:
p.print_info()
else:
raise ValueError("print type not defined: " + self.print_type)
# check if finished
finished = True
failed = False
for p in self.pl:
if p.status != "Finished":
finished = False
if p.return_status == "FAIL":
failed = True
if finished:
break
if self.exit_on_fail and failed:
raise RuntimeError("One or more experiements have failed")
time.sleep(self.loop_pause)
| modulus-sym-main | examples/test/que.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import time
import glob
import subprocess
from termcolor import colored
class Process:
def __init__(self, name, cmd, cwd="./"):
self.name = name
self.cmd = cmd
self.cwd = cwd
self.status = "Not Started"
self.gpu = -1
self.process = None
self.return_status = "NONE"
self.run_time = 0
def start(self, gpu=0):
with open(os.devnull, "w") as devnull:
self.process = subprocess.Popen(
self.cmd.split(" "),
cwd=self.cwd,
stdout=devnull,
stderr=subprocess.PIPE,
env=dict(os.environ, CUDA_VISIBLE_DEVICES=str(gpu)),
)
self.pid = self.process.pid
self.status = "Running"
self.start_time = time.time()
self.gpu = gpu
def update_status(self):
if self.status == "Running":
self.run_time = time.time() - self.start_time
if self.process.poll() is not None:
self.status = "Finished"
if self.process.poll() == 0:
self.return_status = "SUCCESS"
else:
self.return_status = "FAIL"
self.return_code = self.process.returncode
output, error = self.process.communicate()
if not error is None:
self.error = error.decode("utf-8")
else:
self.error = "No error message. :("
def get_pid(self):
return self.pid
def get_status(self):
return self.status
def get_gpu(self):
return self.gpu
def get_return_status(self):
return self.return_status
def print_info(self):
print_string = "\n" + colored(f"Process info for: {self.name}", "blue") + "\n"
print_string = print_string + colored("cmd is ", "blue") + self.cmd + "\n"
print_string = print_string + (colored("cwd is ", "blue") + self.cwd + "\n")
print_string = print_string + (colored("status ", "blue") + self.status + "\n")
if self.return_status == "SUCCESS":
print_string = print_string + (
colored("return status ", "blue")
+ colored(self.return_status, "green")
+ "\n"
)
elif self.return_status == "FAIL":
print_string = print_string + (
colored("return status ", "blue")
+ colored(self.return_status, "red")
+ "\n"
+ colored("START OF ERROR MESSAGE", "red")
+ "\n"
+ self.error
+ colored("END OF ERROR MESSAGE", "red")
+ "\n"
)
else:
print_string = print_string + (
colored("return status ", "blue")
+ colored(self.return_status, "yellow")
+ "\n"
)
print_string = print_string + (
colored("run time ", "blue") + str(self.run_time)
)
print(print_string)
| modulus-sym-main | examples/test/process.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tflogs_reader import check_validation_error, plot_results
if __name__ == "__main__":
check_validation_error(
"../helmholtz/outputs/helmholtz/",
threshold=0.3,
save_path="./checks/helmholtz/",
)
check_validation_error(
"../discontinuous_galerkin/dg/outputs/dg/",
threshold=0.3,
save_path="./checks/dg/",
)
check_validation_error(
"../anti_derivative/outputs/physics_informed/",
threshold=0.3,
save_path="./checks/physics_informed/",
)
| modulus-sym-main | examples/test/run_ci_tests.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import GPUtil
import json
from pathlib import Path
from que import Que
def get_experiments(
tag: str, run_opt: str = "full", json_file: Path = Path("./experiments.json")
):
"""Gets dictionary of experiments to run from JSON file
Parameters
----------
tag : str
Tag of experiments to get
run_opt : str, optional
Run option, by default "full"
json_file : Path, optional
Filename/path to JSON file, by default Path("./experiments.json")
"""
assert json_file.is_file(), f"Invalid experiment JSON path {json_file}"
with open("experiments.json") as json_file:
data = json.load(json_file)
# Run option must be present in run options field
assert run_opt in data["run_opts"], f"Invalid experiment run option {run_opt}"
experiments = {}
for key, value in data["experiments"].items():
if tag in value["tags"]:
experiments[f"{key}"] = {
"path": value["path"],
"run_cmd": value["run_cmd"]
+ "".join([f" {cmd}" for cmd in data["run_opts"][run_opt]]),
}
return experiments
if __name__ == "__main__":
# get inputs
parser = argparse.ArgumentParser()
parser.add_argument(
"--mode", default="single_step", choices=["full", "single_step", "unit_tests"]
)
parser.add_argument("--gpus", default=None)
args = parser.parse_args()
# get gpus
if args.gpus is None:
available_gpus = GPUtil.getAvailable(limit=8)
else:
available_gpus = [int(x) for x in args.gpus.split(",")]
if not available_gpus:
raise ValueError("At least 1 GPU is required to run this script")
# set experiments
if args.mode == "full":
tags = ["first", "second"]
run_opt = "full"
elif args.mode == "single_step":
tags = ["first", "second"]
run_opt = "single"
elif args.mode == "unit_tests":
tags = ["unit"]
run_opt = "full"
for tag in tags:
print(f"Collecting experiments with tag: {tag}")
experiments = get_experiments(tag, run_opt)
q = Que(available_gpus=available_gpus, print_type="que", exit_on_fail=False)
for key, value in experiments.items():
q.enque_cmds(name=key, cmds=value["run_cmd"], cwds=value["path"])
# Run experiment queue
q.start_que_runner()
| modulus-sym-main | examples/test/run_experiments.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import sympy as sp
import numpy as np
from typing import List, Dict, Union
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.distributed import DistributedManager
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import Constraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.loss import PointwiseLossNorm
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.utils.io import grid_to_vtk
from jhtdb_utils import make_jhtdb_dataset
from ops import FlowOps
class SuperResolutionConstraint(Constraint):
def __init__(
self,
nodes: List[Node],
invar: Dict[str, np.array],
outvar: Dict[str, np.array],
batch_size: int,
loss_weighting: Dict[str, int],
dx: float = 1.0,
lambda_weighting: Dict[str, Union[np.array, sp.Basic]] = None,
num_workers: int = 0,
):
dataset = DictGridDataset(
invar=invar, outvar=outvar, lambda_weighting=lambda_weighting
)
super().__init__(
nodes=nodes,
dataset=dataset,
loss=PointwiseLossNorm(),
batch_size=batch_size,
shuffle=True,
drop_last=True,
num_workers=num_workers,
)
self.dx = dx
self.ops = FlowOps().to(self.device)
self.loss_weighting = {}
self.fields = set("U")
for key, value in loss_weighting.items():
if float(value) > 0:
self.fields = set(key).union(self.fields)
self.loss_weighting[key] = value
def calc_flow_stats(self, data_var):
output = {"U": data_var["U"]}
vel_output = {}
cont_output = {}
vort_output = {}
enst_output = {}
strain_output = {}
# compute derivatives
if len(self.fields) > 1:
grad_output = self.ops.get_velocity_grad(
data_var["U"], dx=self.dx, dy=self.dx, dz=self.dx
)
# compute continuity
if "continuity" in self.fields:
cont_output = self.ops.get_continuity_residual(grad_output)
# compute vorticity
if "omega" in self.fields or "enstrophy" in self.fields:
vort_output = self.ops.get_vorticity(grad_output)
# compute enstrophy
if "enstrophy" in self.fields:
enst_output = self.ops.get_enstrophy(vort_output)
# compute strain rate
if "strain" in self.fields:
strain_output = self.ops.get_strain_rate_mag(grad_output)
if "dU" in self.fields:
# Add to output dictionary
grad_output = torch.cat(
[
grad_output[key]
for key in [
"u__x",
"u__y",
"u__z",
"v__x",
"v__y",
"v__z",
"w__x",
"w__y",
"w__z",
]
],
dim=1,
)
vel_output = {"dU": grad_output}
if "omega" in self.fields:
vort_output = torch.cat(
[vort_output[key] for key in ["omega_x", "omega_y", "omega_z"]], dim=1
)
vort_output = {"omega": vort_output}
output.update(vel_output)
output.update(cont_output)
output.update(vort_output)
output.update(enst_output)
output.update(strain_output)
return output
def save_batch(self, filename):
# sample batch
invar, true_outvar, lambda_weighting = next(self.dataloader)
invar0 = {key: value for key, value in invar.items()}
invar = Constraint._set_device(invar, device=self.device, requires_grad=True)
true_outvar = Constraint._set_device(true_outvar, device=self.device)
# compute pred outvar
if hasattr(self.model, "module"):
modl = self.model.module
else:
modl = self.model
pred_outvar = modl(invar)
# Calc flow related stats
pred_outvar = self.calc_flow_stats(pred_outvar)
true_outvar = self.calc_flow_stats(true_outvar)
named_true_outvar = {"true_" + key: value for key, value in true_outvar.items()}
named_pred_outvar = {"pred_" + key: value for key, value in pred_outvar.items()}
save_var = {**named_true_outvar, **named_pred_outvar}
out_save_var = {
key: value.cpu().detach().numpy() for key, value in save_var.items()
}
in_save_var = {
key: value.cpu().detach().numpy() for key, value in invar0.items()
}
# Output both the high-res and low-res fields
for b in range(min(4, next(iter(invar.values())).shape[0])):
grid_to_vtk(out_save_var, filename + f"_{b}_hr", batch_index=b)
grid_to_vtk(in_save_var, filename + f"_{b}_lr", batch_index=b)
def load_data(self):
# get lr and high resolution data from dataloader
invar, target_var, _ = next(self.dataloader)
self._input_vars = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
self._target_vars = Constraint._set_device(target_var, device=self.device)
def load_data_static(self):
if self._input_vars is None:
# Default loading if vars not allocated
self.load_data()
else:
# get train points from dataloader
invar, target_vars, _ = next(self.dataloader)
# Set grads to false here for inputs, static var has allocation already
invar = Constraint._set_device(
invar, device=self.device, requires_grad=False
)
target_vars = Constraint._set_device(target_vars, device=self.device)
for key in invar.keys():
self._input_vars[key].data.copy_(invar[key])
for key in target_vars.keys():
self._target_vars[key].copy_(target_vars[key])
def forward(self):
# compute forward pass of conv net
self._pred_outvar = self.model(self._input_vars)
def loss(self, step: int) -> Dict[str, torch.Tensor]:
# Calc flow related stats
pred_outvar = self.calc_flow_stats(self._pred_outvar)
target_vars = self.calc_flow_stats(self._target_vars)
# compute losses
losses = {}
for key in target_vars.keys():
mean = (target_vars[key] ** 2).mean()
losses[key] = (
self.loss_weighting[key]
* (((pred_outvar[key] - target_vars[key]) ** 2) / mean).mean()
)
return losses
class SuperResolutionValidator(GridValidator):
def __init__(self, *args, log_iter: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self.log_iter = log_iter
self.device = DistributedManager().device
def save_results(self, name, results_dir, writer, save_filetypes, step):
invar_cpu = {key: [] for key in self.dataset.invar_keys}
true_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
pred_outvar_cpu = {key: [] for key in self.dataset.outvar_keys}
# Loop through mini-batches
for i, (invar0, true_outvar0, lambda_weighting) in enumerate(self.dataloader):
# Move data to device (may need gradients in future, if so requires_grad=True)
invar = Constraint._set_device(
invar0, device=self.device, requires_grad=self.requires_grad
)
true_outvar = Constraint._set_device(
true_outvar0, device=self.device, requires_grad=self.requires_grad
)
pred_outvar = self.forward(invar)
# Collect minibatch info into cpu dictionaries
invar_cpu = {
key: value + [invar[key].cpu().detach()]
for key, value in invar_cpu.items()
}
true_outvar_cpu = {
key: value + [true_outvar[key].cpu().detach()]
for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: value + [pred_outvar[key].cpu().detach()]
for key, value in pred_outvar_cpu.items()
}
# Concat mini-batch tensors
invar_cpu = {key: torch.cat(value) for key, value in invar_cpu.items()}
true_outvar_cpu = {
key: torch.cat(value) for key, value in true_outvar_cpu.items()
}
pred_outvar_cpu = {
key: torch.cat(value) for key, value in pred_outvar_cpu.items()
}
# compute losses on cpu
losses = GridValidator._l2_relative_error(true_outvar_cpu, pred_outvar_cpu)
# convert to numpy arrays
invar = {k: v.numpy() for k, v in invar_cpu.items()}
true_outvar = {k: v.numpy() for k, v in true_outvar_cpu.items()}
pred_outvar = {k: v.numpy() for k, v in pred_outvar_cpu.items()}
# save batch to vtk file
named_target_outvar = {"true_" + k: v for k, v in true_outvar.items()}
named_pred_outvar = {"pred_" + k: v for k, v in pred_outvar.items()}
for b in range(min(4, next(iter(invar.values())).shape[0])):
if self.log_iter:
grid_to_vtk(
{**named_target_outvar, **named_pred_outvar},
results_dir + name + f"_{b}_hr" + f"{step:06}",
batch_index=b,
)
else:
grid_to_vtk(
{**named_target_outvar, **named_pred_outvar},
results_dir + name + f"_{b}_hr",
batch_index=b,
)
grid_to_vtk(invar, results_dir + name + f"_{b}_lr", batch_index=b)
# add tensorboard plots
if self.plotter is not None:
self.plotter._add_figures(
name,
results_dir,
writer,
step,
invar,
true_outvar,
pred_outvar,
)
# add tensorboard scalars
for k, loss in losses.items():
writer.add_scalar("val/" + name + "/" + k, loss, step, new_style=True)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# load jhtdb datasets
invar, outvar = make_jhtdb_dataset(
nr_samples=cfg.custom.jhtdb.n_train,
domain_size=cfg.custom.jhtdb.domain_size,
lr_factor=cfg.arch.super_res.scaling_factor,
token=cfg.custom.jhtdb.access_token,
data_dir=to_absolute_path("datasets/jhtdb_training"),
time_range=[1, 768],
dataset_seed=123,
)
invar_valid, outvar_valid = make_jhtdb_dataset(
nr_samples=cfg.custom.jhtdb.n_valid,
domain_size=cfg.custom.jhtdb.domain_size,
lr_factor=cfg.arch.super_res.scaling_factor,
token=cfg.custom.jhtdb.access_token,
data_dir=to_absolute_path("datasets/jhtdb_valid"),
time_range=[768, 1024],
dataset_seed=124,
)
model = instantiate_arch(
input_keys=[Key("U_lr", size=3)],
output_keys=[Key("U", size=3)],
cfg=cfg.arch.super_res,
)
nodes = [model.make_node(name="super_res")]
# make super resolution domain
jhtdb_domain = Domain()
# make data driven constraint
jhtdb_constraint = SuperResolutionConstraint(
nodes=nodes,
invar=invar,
outvar=outvar,
batch_size=cfg.batch_size.train,
loss_weighting=cfg.custom.loss_weights,
lambda_weighting=None,
dx=2 * np.pi / 1024.0,
)
jhtdb_domain.add_constraint(jhtdb_constraint, "constraint")
# make validator
dataset = DictGridDataset(invar_valid, outvar_valid)
jhtdb_validator = SuperResolutionValidator(
dataset=dataset,
nodes=nodes,
batch_size=cfg.batch_size.valid,
log_iter=False,
)
jhtdb_domain.add_validator(jhtdb_validator, "validator")
# make solver
slv = Solver(
cfg,
domain=jhtdb_domain,
)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/super_resolution/super_resolution.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
import torch
try:
import pyJHTDB
import pyJHTDB.dbinfo
except:
raise ModuleNotFoundError(
"This example requires the pyJHTDB python package for access to the JHT database.\n"
+ "Find out information here: https://github.com/idies/pyJHTDB"
)
from tqdm import *
from typing import List, Dict, Union
from pathlib import Path
from modulus.sym.hydra import to_absolute_path
from modulus.sym.distributed.manager import DistributedManager
def _pos_to_name(dataset, field, time_step, start, end, step, filter_width):
return (
"jhtdb_field_"
+ str(field)
+ "_time_step_"
+ str(time_step)
+ "_start_"
+ str(start[0])
+ "_"
+ str(start[1])
+ "_"
+ str(start[2])
+ "_end_"
+ str(end[0])
+ "_"
+ str(end[1])
+ "_"
+ str(end[2])
+ "_step_"
+ str(step[0])
+ "_"
+ str(step[1])
+ "_"
+ str(step[2])
+ "_filter_width_"
+ str(filter_width)
)
def _name_to_pos(name):
scrapted_name = name[:4].split("_")
field = str(scrapted_name[3])
time_step = int(scrapted_name[6])
start = [int(x) for x in scrapted_name[7:10]]
end = [int(x) for x in scrapted_name[11:14]]
step = [int(x) for x in scrapted_name[15:18]]
filter_width = int(scrapted_name[-1])
return field, time_step, start, end, step, filter_width
def get_jhtdb(
loader, data_dir: Path, dataset, field, time_step, start, end, step, filter_width
):
# get filename
file_name = (
_pos_to_name(dataset, field, time_step, start, end, step, filter_width) + ".npy"
)
file_dir = data_dir / Path(file_name)
# check if file exists and if not download it
try:
results = np.load(file_dir)
except:
# Only MPI process 0 can download data
if DistributedManager().rank == 0:
results = loader.getCutout(
data_set=dataset,
field=field,
time_step=time_step,
start=start,
end=end,
step=step,
filter_width=filter_width,
)
np.save(file_dir, results)
# Wait for all processes to get here
if DistributedManager().distributed:
torch.distributed.barrier()
results = np.load(file_dir)
return results
def make_jhtdb_dataset(
nr_samples: int = 128,
domain_size: int = 64,
lr_factor: int = 4,
token: str = "edu.jhu.pha.turbulence.testing-201311",
data_dir: str = to_absolute_path("datasets/jhtdb_training"),
time_range: List[int] = [1, 1024],
dataset_seed: int = 123,
debug: bool = False,
):
# make data dir
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
# initialize runner
lJHTDB = pyJHTDB.libJHTDB()
lJHTDB.initialize()
lJHTDB.add_token(token)
# loop to get dataset
np.random.seed(dataset_seed)
list_low_res_u = []
list_high_res_u = []
for i in tqdm(range(nr_samples)):
# set download params
dataset = "isotropic1024coarse"
field = "u"
time_step = int(np.random.randint(time_range[0], time_range[1]))
start = np.array(
[np.random.randint(1, 1024 - domain_size) for _ in range(3)], dtype=int
)
end = np.array([x + domain_size - 1 for x in start], dtype=int)
step = np.array(3 * [1], dtype=int)
# get high res data
high_res_u = get_jhtdb(
lJHTDB,
data_dir,
dataset,
field,
time_step,
start,
end,
np.array(3 * [1], dtype=int),
1,
)
# get low res data
low_res_u = get_jhtdb(
lJHTDB,
data_dir,
dataset,
field,
time_step,
start,
end,
np.array(3 * [lr_factor], dtype=int),
lr_factor,
)
# plot
if debug:
fig = plt.figure(figsize=(10, 5))
a = fig.add_subplot(121)
a.set_axis_off()
a.imshow(low_res_u[:, :, 0, 0], interpolation="none")
a = fig.add_subplot(122)
a.imshow(high_res_u[:, :, 0, 0], interpolation="none")
plt.savefig("debug_plot_" + str(i))
plt.close()
# append to list
list_low_res_u.append(np.rollaxis(low_res_u, -1, 0))
list_high_res_u.append(np.rollaxis(high_res_u, -1, 0))
# concatenate to tensor
dataset_low_res_u = np.stack(list_low_res_u, axis=0)
dataset_high_res_u = np.stack(list_high_res_u, axis=0)
return {"U_lr": dataset_low_res_u}, {"U": dataset_high_res_u}
| modulus-sym-main | examples/super_resolution/jhtdb_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code modified from https://github.com/akshaysubr/TEGAN
The following license is provided from their source,
Copyright 2020 Akshay Subramaniam, Man-Long Wong, Raunak Borker, Sravya Nimmagadda
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.modules.utils import _triple
class FlowOps(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer(
"ddx1D",
torch.Tensor(
[
-1.0 / 60.0,
3.0 / 20.0,
-3.0 / 4.0,
0.0,
3.0 / 4.0,
-3.0 / 20.0,
1.0 / 60.0,
]
),
)
def ddx(self, inpt, dx, channel, dim, padding_mode="replicate"):
var = inpt[:, channel : channel + 1, :, :, :]
ddx3D = torch.reshape(
self.ddx1D, shape=[1, 1] + dim * [1] + [-1] + (2 - dim) * [1]
)
padding = _triple(3) + _triple(3)
output = F.conv3d(
F.pad(var, padding, mode=padding_mode),
ddx3D,
stride=1,
padding=0,
bias=None,
)
output = (1.0 / dx) * output
if dim == 0:
output = output[
:,
:,
:,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
]
elif dim == 1:
output = output[
:,
:,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
:,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
]
elif dim == 2:
output = output[
:,
:,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
(self.ddx1D.shape[0] - 1) // 2 : -(self.ddx1D.shape[0] - 1) // 2,
:,
]
return output
def get_velocity_grad(self, inpt, dx, dy, dz, xdim=0):
output = {}
output["u__x"] = self.ddx(inpt, dx, channel=0, dim=xdim)
output["u__y"] = self.ddx(inpt, dy, channel=0, dim=1)
output["u__z"] = self.ddx(inpt, dz, channel=0, dim=(2 - xdim))
output["v__x"] = self.ddx(inpt, dx, channel=1, dim=xdim)
output["v__y"] = self.ddx(inpt, dy, channel=1, dim=1)
output["v__z"] = self.ddx(inpt, dz, channel=1, dim=(2 - xdim))
output["w__x"] = self.ddx(inpt, dx, channel=2, dim=xdim)
output["w__y"] = self.ddx(inpt, dy, channel=2, dim=1)
output["w__z"] = self.ddx(inpt, dz, channel=2, dim=(2 - xdim))
return output
def get_strain_rate_mag(self, vel_dict):
output = {}
output["strain"] = (
vel_dict["u__x"] ** 2
+ vel_dict["v__y"] ** 2
+ vel_dict["w__z"] ** 2
+ 2
* (
(0.5 * (vel_dict["u__y"] + vel_dict["v__x"])) ** 2
+ (0.5 * (vel_dict["u__z"] + vel_dict["w__x"])) ** 2
+ (0.5 * (vel_dict["w__y"] + vel_dict["v__z"])) ** 2
)
)
return output
def get_vorticity(self, vel_dict):
output = {}
output["omega_x"] = vel_dict["w__y"] - vel_dict["v__z"]
output["omega_y"] = vel_dict["u__z"] - vel_dict["w__x"]
output["omega_z"] = vel_dict["v__x"] - vel_dict["u__y"]
return output
def get_enstrophy(self, vort_dict):
output = {}
output["enstrophy"] = (
vort_dict["omega_x"] ** 2
+ vort_dict["omega_y"] ** 2
+ vort_dict["omega_z"] ** 2
)
return output
def get_continuity_residual(self, vel_dict):
output = {}
output["continuity"] = vel_dict["u__x"] + vel_dict["v__y"] + vel_dict["w__z"]
return output
# def d2dx2(inpt, channel, dx, name=None):
# var = inpt[:, channel : channel + 1, :, :, :]
# ddx1D = tf.constant(
# [
# 1.0 / 90.0,
# -3.0 / 20.0,
# 3.0 / 2.0,
# -49.0 / 18.0,
# 3.0 / 2.0,
# -3.0 / 20.0,
# 1.0 / 90.0,
# ]
# ).to(inpt.device)
# ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (2 - dim) * [1])
# output = F.conv3d(var, ddx3D, padding="valid")
# output = (1.0 / dx ** 2) * output
# return output
# def get_TKE(inpt):
# TKE = torch.square(inpt[:, 0, :, :, :])
# TKE = TKE + tf.square(inpt[:, 1, :, :, :])
# TKE = TKE + tf.square(inpt[:, 2, :, :, :])
# TKE = 0.5 * TKE
# TKE = tf.expand_dims(TKE, axis=1)
# return TKE
| modulus-sym-main | examples/super_resolution/ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wave equation
Reference: https://en.wikipedia.org/wiki/Wave_equation
"""
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
class WaveEquation1D(PDE):
"""
Wave equation 1D
The equation is given as an example for implementing
your own PDE. A more universal implementation of the
wave equation can be found by
`from modulus.sym.eq.pdes.wave_equation import WaveEquation`.
Parameters
==========
c : float, string
Wave speed coefficient. If a string then the
wave speed is input into the equation.
"""
name = "WaveEquation1D"
def __init__(self, c=1.0):
# coordinates
x = Symbol("x")
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "t": t}
# make u function
u = Function("u")(*input_variables)
# wave speed coefficient
if type(c) is str:
c = Function(c)(*input_variables)
elif type(c) in [float, int]:
c = Number(c)
# set equations
self.equations = {}
self.equations["wave_equation"] = u.diff(t, 2) - (c**2 * u.diff(x)).diff(x)
| modulus-sym-main | examples/wave_equation/wave_equation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, sin
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D
from modulus.sym.domain.constraint import (
PointwiseConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from wave_equation import WaveEquation1D
@modulus.sym.main(config_path="conf", config_name="config_inverse")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
we = WaveEquation1D(c="c")
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("t")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
invert_net = instantiate_arch(
input_keys=[Key("x"), Key("t")],
output_keys=[Key("c")],
cfg=cfg.arch.fully_connected,
)
nodes = (
we.make_nodes(detach_names=["u__x", "u__x__x", "u__t__t"])
+ [wave_net.make_node(name="wave_network")]
+ [invert_net.make_node(name="invert_network")]
)
# prepare input data
L = float(np.pi)
deltaT = 0.01
deltaX = 0.01
x = np.arange(0, L, deltaX)
t = np.arange(0, 2 * L, deltaT)
X, T = np.meshgrid(x, t)
X = np.expand_dims(X.flatten(), axis=-1)
T = np.expand_dims(T.flatten(), axis=-1)
u = np.sin(X) * (np.cos(T) + np.sin(T))
invar_numpy = {"x": X, "t": T}
outvar_numpy = {"u": u}
outvar_numpy["wave_equation"] = np.zeros_like(outvar_numpy["u"])
# add constraints to solver
# make domain
domain = Domain()
# data and pde loss
data = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=invar_numpy,
outvar=outvar_numpy,
batch_size=cfg.batch_size.data,
)
domain.add_constraint(data, "interior_data")
# add monitors
monitor = PointwiseMonitor(
invar_numpy,
output_names=["c"],
metrics={"mean_c": lambda var: torch.mean(var["c"])},
nodes=nodes,
)
domain.add_monitor(monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/wave_equation/wave_inverse.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sympy import Symbol, sin
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from wave_equation import WaveEquation1D
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
we = WaveEquation1D(c=1.0)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("t")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = we.make_nodes() + [wave_net.make_node(name="wave_network")]
# add constraints to solver
# make geometry
x, t_symbol = Symbol("x"), Symbol("t")
L = float(np.pi)
geo = Line1D(0, L)
time_range = {t_symbol: (0, 2 * L)}
# make domain
domain = Domain()
# initial condition
IC = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": sin(x), "u__t": sin(x)},
batch_size=cfg.batch_size.IC,
lambda_weighting={"u": 1.0, "u__t": 1.0},
parameterization={t_symbol: 0.0},
)
domain.add_constraint(IC, "IC")
# boundary condition
BC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0},
batch_size=cfg.batch_size.BC,
parameterization=time_range,
)
domain.add_constraint(BC, "BC")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"wave_equation": 0},
batch_size=cfg.batch_size.interior,
parameterization=time_range,
)
domain.add_constraint(interior, "interior")
# add validation data
deltaT = 0.01
deltaX = 0.01
x = np.arange(0, L, deltaX)
t = np.arange(0, 2 * L, deltaT)
X, T = np.meshgrid(x, t)
X = np.expand_dims(X.flatten(), axis=-1)
T = np.expand_dims(T.flatten(), axis=-1)
u = np.sin(X) * (np.cos(T) + np.sin(T))
invar_numpy = {"x": X, "t": T}
outvar_numpy = {"u": u}
validator = PointwiseValidator(
nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=128
)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/wave_equation/wave_1d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sympy import Symbol, sin
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D
from modulus.sym.geometry.parameterization import OrderedParameterization
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.utils.io import (
ValidatorPlotter,
)
from modulus.sym.loss.loss import CausalLossNorm
from modulus.sym.key import Key
from modulus.sym.node import Node
from wave_equation import WaveEquation1D
@modulus.sym.main(config_path="conf", config_name="config_causal")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
we = WaveEquation1D(c=1.0)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("t")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = we.make_nodes() + [wave_net.make_node(name="wave_network")]
# add constraints to solver
# make geometry
x, t_symbol = Symbol("x"), Symbol("t")
L = float(np.pi)
T = 4 * L
geo = Line1D(
0, L, parameterization=OrderedParameterization({t_symbol: (0, T)}, key=t_symbol)
)
# make domain
domain = Domain()
# initial condition
IC = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": sin(x), "u__t": sin(x)},
batch_size=cfg.batch_size.IC,
lambda_weighting={"u": 100.0, "u__t": 1.0},
parameterization={t_symbol: 0.0},
)
domain.add_constraint(IC, "IC")
# boundary condition
BC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0},
batch_size=cfg.batch_size.BC,
lambda_weighting={"u": 100.0},
)
domain.add_constraint(BC, "BC")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"wave_equation": 0},
batch_size=cfg.batch_size.interior,
loss=CausalLossNorm(eps=1.0),
fixed_dataset=False,
shuffle=False,
)
domain.add_constraint(interior, "interior")
# add validation data
deltaT = 0.01
deltaX = 0.01
x = np.arange(0, L, deltaX)
t = np.arange(0, T, deltaT)
xx, tt = np.meshgrid(x, t)
X_star = np.expand_dims(xx.flatten(), axis=-1)
T_star = np.expand_dims(tt.flatten(), axis=-1)
u = np.sin(X_star) * (np.cos(T_star) + np.sin(T_star))
invar_numpy = {"x": X_star, "t": T_star}
outvar_numpy = {"u": u}
validator = PointwiseValidator(
nodes=nodes,
invar=invar_numpy,
true_outvar=outvar_numpy,
batch_size=128,
plotter=ValidatorPlotter(),
)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/wave_equation/wave_1d_causal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script to train Fourcastnet on ERA5
# Ref: https://arxiv.org/abs/2202.11214
from warnings import warn
warn(
f"This example will be depricated soon! Please switch to the FourCastNet recipe from Modulus Launch repo.",
DeprecationWarning,
)
import logging
import modulus.sym
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.key import Key
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import Constraint, SupervisedGridConstraint
from modulus.sym.domain.constraint.constraint import InfiniteDataLoader
from modulus.sym.domain.validator import GridValidator
from modulus.sym.solver import Solver
from modulus.sym.utils.io import GridValidatorPlotter
from src.dali_dataset import ERA5HDF5GridDaliIterableDataset
from src.dataset import ERA5HDF5GridDataset
from src.fourcastnet import FourcastNetArch
from src.loss import LpLoss
logger = logging.getLogger(__name__)
@modulus.sym.main(config_path="conf", config_name="config_FCN")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
channels = list(range(cfg.custom.n_channels))
train_dataset = _create_dataset(
cfg.custom.train_dataset.kind,
data_dir=cfg.custom.train_dataset.data_path,
chans=channels,
tstep=cfg.custom.tstep,
n_tsteps=cfg.custom.n_tsteps,
patch_size=cfg.arch.afno.patch_size,
batch_size=cfg.batch_size.grid,
num_workers=cfg.custom.num_workers.grid,
shuffle=True,
)
test_dataset = _create_dataset(
cfg.custom.test_dataset.kind,
data_dir=cfg.custom.test_dataset.data_path,
chans=channels,
tstep=cfg.custom.tstep,
n_tsteps=cfg.custom.n_tsteps,
patch_size=cfg.arch.afno.patch_size,
n_samples_per_year=20,
batch_size=cfg.batch_size.validation,
num_workers=cfg.custom.num_workers.validation,
)
# Dataloader factory method needs to be updated before creating any constraints.
update_get_dataloader()
# define input/output keys
input_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.invar_keys]
output_keys = [Key(k, size=train_dataset.nchans) for k in train_dataset.outvar_keys]
# make list of nodes to unroll graph on
model = FourcastNetArch(
input_keys=input_keys,
output_keys=output_keys,
img_shape=test_dataset.img_shape,
patch_size=cfg.arch.afno.patch_size,
embed_dim=cfg.arch.afno.embed_dim,
depth=cfg.arch.afno.depth,
num_blocks=cfg.arch.afno.num_blocks,
)
nodes = [model.make_node(name="FCN")]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
loss=LpLoss(),
num_workers=cfg.custom.num_workers.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
num_workers=cfg.custom.num_workers.validation,
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
def _create_dataset(dataset_kind: str, **kwargs):
valid_dsets = {
"default": ERA5HDF5GridDataset,
"dali": ERA5HDF5GridDaliIterableDataset,
}
dset_cls = valid_dsets.get(dataset_kind, None)
if dset_cls is None:
raise ValueError(
f"Expected one of {list(valid_dsets.keys())}, but got {dataset_kind}"
)
logger.info(f"Dataset: {dset_cls.__name__}")
return dset_cls(**kwargs)
def update_get_dataloader():
"""Monkey-patch Constraint.get_dataloader method.
DALI has its own multi-process worker functionality, similar to PyTorch DataLoader.
This function patches Constraint.get_dataloader to avoid wrapping DALI dataset
with another, redundant, layer of DataLoader.
"""
default_get_dataloader = Constraint.get_dataloader
def get_dataloader(
dataset: "Union[Dataset, IterableDataset]",
batch_size: int,
shuffle: bool,
drop_last: bool,
num_workers: int,
distributed: bool = None,
infinite: bool = True,
):
if isinstance(dataset, ERA5HDF5GridDaliIterableDataset):
if infinite:
dataset = InfiniteDataLoader(dataset)
return dataset
return default_get_dataloader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last,
num_workers=num_workers,
distributed=distributed,
infinite=infinite,
)
Constraint.get_dataloader = get_dataloader
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fourcastnet/fcn_era5.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Script to carry out Fourcastnet inference"
import omegaconf
import torch
import logging
import numpy as np
from torch.utils.data import DataLoader, Sampler
from modulus.sym.hydra import to_absolute_path
from modulus.sym.key import Key
from modulus.sym.distributed.manager import DistributedManager
from src.dataset import ERA5HDF5GridDataset
from src.fourcastnet import FourcastNetArch
from src.metrics import Metrics
logging.basicConfig(format="[%(levelname)s] - %(message)s", level=logging.INFO)
var_key_dict = {
0: "u10",
1: "v10",
2: "t2m",
3: "sp",
4: "msl",
5: "t850",
6: "u1000",
7: "v1000",
8: "z1000",
9: "u850",
10: "v850",
11: "z850",
12: "u500",
13: "v500",
14: "z500",
15: "t500",
16: "z50",
17: "r500",
18: "r850",
19: "tcwv",
}
def to_device(tensor_dict):
return {
key: torch.as_tensor(value, dtype=torch.float32, device=device)
for key, value in tensor_dict.items()
}
class SubsetSequentialBatchSampler(Sampler):
"""Custom subset sequential batch sampler for inferencer"""
def __init__(self, subset):
self.subset = subset
def __iter__(self):
for i in self.subset:
yield [i] # batch size of 1
def __len__(self):
return len(self.subset)
# load configuration
cfg = omegaconf.OmegaConf.load("conf/config_FCN.yaml")
model_path = to_absolute_path("fcn_era5.pth")
# get device
device = DistributedManager().device
# load test data
test_dataset = ERA5HDF5GridDataset(
cfg.custom.test_data_path, # Test data location e.g. /era5/20var/test
chans=list(range(cfg.custom.n_channels)),
tstep=cfg.custom.tstep,
n_tsteps=1, # set to one for inference
patch_size=cfg.arch.afno.patch_size,
)
m = Metrics(
test_dataset.img_shape,
clim_mean_path="/era5/stats/time_means.npy", # Path to climate mean
device=device,
)
# define input/output keys
input_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.invar_keys]
output_keys = [Key(k, size=test_dataset.nchans) for k in test_dataset.outvar_keys]
# create model
model = FourcastNetArch(
input_keys=input_keys,
output_keys=output_keys,
img_shape=test_dataset.img_shape,
patch_size=cfg.arch.afno.patch_size,
embed_dim=cfg.arch.afno.embed_dim,
depth=cfg.arch.afno.depth,
num_blocks=cfg.arch.afno.num_blocks,
)
# load parameters
model.load_state_dict(torch.load(model_path))
model.to(device)
logging.info(f"Loaded model {model_path}")
# define subsets of dataset to run inference
nics = 180 # Number of 2 day correl time samples
nsteps = 25
last = len(test_dataset) - 1 - nsteps * cfg.custom.tstep
# Variable dictionary
acc_recursive = {key: [] for key in var_key_dict.values()}
rmse_recursive = {key: [] for key in var_key_dict.values()}
# Normalization stats
mu = torch.tensor(test_dataset.mu[0]).to(device) # shape [C, 1, 1]
sd = torch.tensor(test_dataset.sd[0]).to(device) # shape [C, 1, 1]
# run inference
with torch.no_grad():
for ic in range(0, min([8 * nics + 1, last])):
subset = cfg.custom.tstep * np.arange(nsteps) + ic
if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0:
logging.info(f"Running IC at step {ic}")
# get dataloader
dataloader = DataLoader(
dataset=test_dataset,
batch_sampler=SubsetSequentialBatchSampler(subset),
pin_memory=True,
num_workers=1,
worker_init_fn=test_dataset.worker_init_fn,
)
acc_error = torch.zeros(nsteps, test_dataset.nchans)
rmse_error = torch.zeros(nsteps, test_dataset.nchans)
for tstep, (invar, true_outvar, _) in enumerate(dataloader):
if tstep % 10 == 0:
logging.info(f"ic: {ic} tstep: {tstep}/{nsteps}")
# place tensors on device
invar = to_device(invar)
true_outvar = to_device(true_outvar)
# 1. single step inference
pred_outvar_single = model(invar)
pred_single = sd * pred_outvar_single["x_t1"][0]
# 2. recursive inference
if tstep == 0:
pred_outvar_recursive = model(invar)
else:
pred_outvar_recursive = model(
{"x_t0": pred_outvar_recursive["x_t1"]}
)
# get unormalised target / prediction
true = sd * true_outvar["x_t1"][0]
pred_recursive = sd * pred_outvar_recursive["x_t1"][0]
# Calc metrics
rmse_error[tstep] = m.weighted_rmse(pred_recursive, true).detach().cpu()
acc_error[tstep] = m.weighted_acc(pred_recursive, true).detach().cpu()
# Save fields into dictionary
if (ic + 1) % 8 == 0 or (ic + 1) % 36 == 0 or ic == 0:
for i, fld in var_key_dict.items():
# Fields with 9 day (36) dc time
if fld == "z500" or fld == "t2m" or fld == "t850":
if (ic + 1) % 36 == 0 or ic == 0:
acc_recursive[fld].append(acc_error[:, i].numpy())
rmse_recursive[fld].append(rmse_error[:, i].numpy())
# Rest have regular 2 day (8) dc time
else:
if (ic + 1) % 8 == 0 or ic == 0:
acc_recursive[fld].append(acc_error[:, i].numpy())
rmse_recursive[fld].append(rmse_error[:, i].numpy())
# Field stacking
for var_dict in [acc_recursive, rmse_recursive]:
for key, value in var_dict.items():
print(f"{len(value)} samples for field {key}")
var_dict[key] = np.stack(value, axis=0)
np.save("rmse_recursive", rmse_recursive)
np.save("acc_recursive", acc_recursive)
| modulus-sym-main | examples/fourcastnet/inferencer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from typing import Tuple
class Metrics:
"""Class used for computing performance related metrics. Expects predictions /
targets to be of shape [C, H, W] where H is latitude dimension and W is longitude
dimension. Metrics are computed for each channel separately.
Parameters
----------
img_shape : Tuple[int]
Shape of input image (resolution for fourcastnet)
clim_mean_path : str, optional
Path to total climate mean data, needed for ACC. By default "/era5/stats/time_means.npy"
device : torch.device, optional
Pytorch device model is on, by default 'cpu'
"""
def __init__(
self,
img_shape: Tuple[int],
clim_mean_path: str = "/era5/stats/time_means.npy",
device: torch.device = "cpu",
):
self.img_shape = tuple(img_shape)
self.device = device
# Load climate mean value
self.clim_mean = torch.as_tensor(np.load(clim_mean_path))
# compute latitude weighting
nlat = img_shape[0]
lat = torch.linspace(90, -90, nlat)
lat_weight = torch.cos(torch.pi * (lat / 180))
lat_weight = nlat * lat_weight / lat_weight.sum()
self.lat_weight = lat_weight.view(1, nlat, 1)
# place on device
if self.device is not None:
self.lat_weight = self.lat_weight.to(self.device)
self.clim_mean = self.clim_mean.to(self.device)
def _check_shape(self, *args):
# checks for shape [C, H, W]
for x in args:
assert x.ndim == 3
assert tuple(x.shape[1:]) == self.img_shape
def weighted_acc(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Computes the anomaly correlation coefficient (ACC). The ACC calculation is
weighted based on the latitude.
Parameters
----------
pred : torch.Tensor
[C, H, W] Predicted tensor
target : torch.Tensor
[C, H, W] Target tensor
Returns
-------
torch.Tensor
[C] ACC values for each channel
"""
self._check_shape(pred, target)
# subtract climate means
(n_chans, img_x, img_y) = pred.shape
clim_mean = self.clim_mean[0, 0:n_chans, 0:img_x]
pred_hat = pred - clim_mean
target_hat = target - clim_mean
# Weighted mean
pred_bar = torch.sum(
self.lat_weight * pred_hat, dim=(1, 2), keepdim=True
) / torch.sum(
self.lat_weight * torch.ones_like(pred_hat), dim=(1, 2), keepdim=True
)
target_bar = torch.sum(
self.lat_weight * target_hat, dim=(1, 2), keepdim=True
) / torch.sum(
self.lat_weight * torch.ones_like(target_hat), dim=(1, 2), keepdim=True
)
pred_diff = pred_hat - pred_bar
target_diff = target_hat - target_bar
# compute weighted acc
# Ref: https://www.atmos.albany.edu/daes/atmclasses/atm401/spring_2016/ppts_pdfs/ECMWF_ACC_definition.pdf
p1 = torch.sum(self.lat_weight * pred_diff * target_diff, dim=(1, 2))
p2 = torch.sum(self.lat_weight * pred_diff * pred_diff, dim=(1, 2))
p3 = torch.sum(self.lat_weight * target_diff * target_diff, dim=(1, 2))
m = p1 / torch.sqrt(p2 * p3)
return m
def weighted_rmse(self, pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Computes RMSE weighted based on latitude
Parameters
----------
pred : torch.Tensor
[C, H, W] Predicted tensor
target : torch.Tensor
[C, H, W] Target tensor
Returns
-------
torch.Tensor
[C] Weighted RSME values for each channel
"""
self._check_shape(pred, target)
# compute weighted rmse
m = torch.sqrt(torch.mean(self.lat_weight * (pred - target) ** 2, dim=(1, 2)))
return m
| modulus-sym-main | examples/fourcastnet/src/metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import tempfile
from termcolor import cprint
from modulus.sym.distributed import DistributedManager
from src.test_dali_dataset import (
create_test_data,
test_distributed_dali_loader,
)
if __name__ == "__main__":
DistributedManager.initialize()
m = DistributedManager()
if not m.distributed:
print(
"Please run this test in distributed mode. For example, to run on 2 GPUs:\n\n"
"mpirun -np 2 python ./src/test_dali_dist.py\n"
)
raise SystemExit(1)
with tempfile.TemporaryDirectory("-data") as data_dir:
data_path = create_test_data(Path(data_dir))
test_distributed_dali_loader(data_path)
cprint("Success!", "green")
| modulus-sym-main | examples/fourcastnet/src/test_dali_dist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, List
import h5py
import numpy as np
import torch
from modulus.sym.dataset import IterableDataset
from modulus.sym.distributed import DistributedManager
from src.dataset import ERA5HDF5GridBaseDataset
try:
import nvidia.dali as dali
import nvidia.dali.plugin.pytorch as dali_pth
except ImportError:
print(
"""DALI dataset requires NVIDIA DALI package to be installed.
The package can be installed by running:
pip install nvidia-dali-cuda110
"""
)
raise SystemExit(1)
class ERA5HDF5GridDaliIterableDataset(ERA5HDF5GridBaseDataset, IterableDataset):
"""ERA5 DALI iterable-style dataset."""
def __init__(
self,
data_dir: str,
chans: List[int],
tstep: int = 1,
n_tsteps: int = 1,
patch_size: int = None,
n_samples_per_year: int = None,
stats_dir: str = None,
batch_size: int = 1,
num_workers: int = 1,
shuffle: bool = False,
):
super().__init__(
data_dir, chans, tstep, n_tsteps, patch_size, n_samples_per_year, stats_dir
)
self.batch_size = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pipe = self._create_pipeline()
def worker_init_fn(self, iworker):
super().worker_init_fn(iworker)
def __iter__(self):
# Reset the pipeline before creating an iterator to enable epochs.
self.pipe.reset()
# Create DALI PyTorch iterator.
dali_iter = dali_pth.DALIGenericIterator([self.pipe], ["invar", "outvar"])
# Read batches.
for batch_data in dali_iter:
# Only one source is expected.
assert len(batch_data) == 1
batch = batch_data[0]
invar = {self.invar_keys[0]: batch["invar"]}
invar = self._to_tensor_dict(invar)
outvar = batch["outvar"]
# Should be [N,T,C,H,W] shape.
assert outvar.ndim == 5
outvar = {self.outvar_keys[t]: outvar[:, t] for t in range(self.n_tsteps)}
outvar = self._to_tensor_dict(outvar)
lambda_weighting = {k: torch.ones_like(v) for k, v in outvar.items()}
yield invar, outvar, lambda_weighting
def _create_pipeline(self) -> dali.Pipeline:
# TODO: make num_threads and prefetch queue configurable?
pipe = dali.Pipeline(
batch_size=self.batch_size,
num_threads=2,
prefetch_queue_depth=2,
py_num_workers=self.num_workers,
device_id=DistributedManager().device.index,
py_start_method="spawn",
)
with pipe:
source = ERA5DaliExternalSource(
self.data_paths,
self.length,
self.chans,
self.n_tsteps,
self.tstep,
self.n_samples_per_year,
self.batch_size,
self.shuffle,
)
# Read current batch.
invar, outvar = dali.fn.external_source(
source,
num_outputs=2,
parallel=True,
batch=False,
)
# Move tensors to GPU as external_source won't do that.
invar = invar.gpu()
outvar = outvar.gpu()
# Crop.
h, w = self.img_shape
invar = invar[:, :h, :w]
outvar = outvar[:, :, :h, :w]
# Standardize.
invar = dali.fn.normalize(invar, mean=self.mu[0], stddev=self.sd[0])
outvar = dali.fn.normalize(outvar, mean=self.mu, stddev=self.sd)
# Set outputs.
pipe.set_outputs(invar, outvar)
return pipe
class ERA5DaliExternalSource:
"""ERA5 DALI external callable source.
For more information about DALI external source operator:
https://docs.nvidia.com/deeplearning/dali/archives/dali_1_13_0/user-guide/docs/examples/general/data_loading/parallel_external_source.html
"""
def __init__(
self,
data_paths: Iterable[str],
num_samples: int,
channels: Iterable[int],
n_tsteps: int,
tstep: int,
n_samples_per_year: int,
batch_size: int,
shuffle: bool,
):
self.data_paths = list(data_paths)
# Will be populated later once each worker starts running in its own process.
self.data_files = None
self.num_samples = num_samples
self.chans = list(channels)
self.n_tsteps = n_tsteps
self.tstep = tstep
self.n_samples_per_year = n_samples_per_year
self.batch_size = batch_size
self.shuffle = shuffle
self.last_epoch = None
self.indices = np.arange(num_samples)
# If running in distributed mode, select appropriate shard from indices.
m = DistributedManager()
if m.distributed:
# Each shard will get its own subset of indices (possibly empty).
self.indices = np.array_split(self.indices, m.world_size)[m.rank]
# Get number of full batches, ignore possible last incomplete batch for now.
# Also, DALI external source does not support incomplete batches in parallel mode.
self.num_batches = len(self.indices) // self.batch_size
def __call__(self, sample_info: dali.types.SampleInfo):
if sample_info.iteration >= self.num_batches:
raise StopIteration()
if self.data_files is None:
# This will be called once per worker. Workers are persistent,
# so there is no need to explicitly close the files - this will be done
# when corresponding pipeline/dataset is destroyed.
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
# Shuffle before the next epoch starts.
if self.shuffle and sample_info.epoch_idx != self.last_epoch:
# All workers use the same rng seed so the resulting
# indices are the same across workers.
np.random.default_rng(seed=sample_info.epoch_idx).shuffle(self.indices)
self.last_epoch = sample_info.epoch_idx
# Get local indices from global index.
idx = self.indices[sample_info.idx_in_epoch]
year_idx = idx // self.n_samples_per_year
in_idx = idx % self.n_samples_per_year
#
data = self.data_files[year_idx]["fields"]
# Has [C,H,W] shape.
invar = data[in_idx, self.chans]
# Has [T,C,H,W] shape.
outvar = np.empty((self.n_tsteps,) + invar.shape, dtype=invar.dtype)
for i in range(self.n_tsteps):
out_idx = in_idx + (i + 1) * self.tstep
# If at end of dataset, just learn identity instead.
if out_idx >= data.shape[0]:
out_idx = in_idx
outvar[i] = data[out_idx, self.chans]
return invar, outvar
| modulus-sym-main | examples/fourcastnet/src/dali_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import logging
import numpy as np
from typing import List
from pathlib import Path
from modulus.sym.hydra import to_absolute_path
from modulus.sym.dataset import Dataset
class ERA5HDF5GridBaseDataset:
"""Lazy-loading ERA5 dataset.
Provides common implementation that is used in map- or iterable-style datasets.
Parameters
----------
data_dir : str
Directory where ERA5 data is stored
chans : List[int]
Defines which ERA5 variables to load
tstep : int
Defines the size of the timestep between the input and output variables
n_tsteps : int, optional
Defines how many timesteps are included in the output variables
Default is 1
patch_size : int, optional
If specified, crops input and output variables so image dimensions are
divisible by patch_size
Default is None
n_samples_per_year : int, optional
If specified, randomly selects n_samples_per_year samples from each year
rather than all of the samples per year
Default is None
stats_dir : str, optional
Directory to test data statistic numpy files that have the global mean and variance
"""
def __init__(
self,
data_dir: str,
chans: List[int],
tstep: int = 1,
n_tsteps: int = 1,
patch_size: int = None,
n_samples_per_year: int = None,
stats_dir: str = None,
**kwargs,
):
self.data_dir = Path(to_absolute_path(data_dir))
self.chans = chans
self.nchans = len(self.chans)
self.tstep = tstep
self.n_tsteps = n_tsteps
self.patch_size = patch_size
self.n_samples_per_year = n_samples_per_year
if stats_dir is None:
self.stats_dir = self.data_dir.parent / "stats"
# check root directory exists
assert (
self.data_dir.is_dir()
), f"Error, data directory {self.data_dir} does not exist"
assert (
self.stats_dir.is_dir()
), f"Error, stats directory {self.stats_dir} does not exist"
# get all input data files
self.data_paths = sorted(self.data_dir.glob("????.h5"))
for data_path in self.data_paths:
logging.info(f"ERA5 file found: {data_path}")
self.n_years = len(self.data_paths)
logging.info(f"Number of years: {self.n_years}")
# get total number of examples and image shape from the first file,
# assuming other files have exactly the same format.
logging.info(f"Getting file stats from {self.data_paths[0]}")
with h5py.File(self.data_paths[0], "r") as f:
self.n_samples_per_year_all = f["fields"].shape[0]
self.img_shape = f["fields"].shape[2:]
logging.info(f"Number of channels available: {f['fields'].shape[1]}")
# get example indices to use
if self.n_samples_per_year is None:
self.n_samples_per_year = self.n_samples_per_year_all
self.samples = [
np.arange(self.n_samples_per_year) for _ in range(self.n_years)
]
else:
if self.n_samples_per_year > self.n_samples_per_year_all:
raise ValueError(
f"n_samples_per_year ({self.n_samples_per_year}) > number of samples available ({self.n_samples_per_year_all})!"
)
self.samples = [
np.random.choice(
np.arange(self.n_samples_per_year_all),
self.n_samples_per_year,
replace=False,
)
for _ in range(self.n_years)
]
logging.info(f"Number of samples/year: {self.n_samples_per_year}")
# get total length
self.length = self.n_years * self.n_samples_per_year
# adjust image shape if patch_size defined
if self.patch_size is not None:
self.img_shape = [s - s % self.patch_size for s in self.img_shape]
logging.info(f"Input image shape: {self.img_shape}")
# load normalisation values
# has shape [1, C, 1, 1]
self.mu = np.load(self.stats_dir / "global_means.npy")[:, self.chans]
# has shape [1, C, 1, 1]
self.sd = np.load(self.stats_dir / "global_stds.npy")[:, self.chans]
assert (
self.mu.shape == self.sd.shape == (1, self.nchans, 1, 1)
), "Error, normalisation arrays have wrong shape"
@property
def invar_keys(self):
return ["x_t0"]
@property
def outvar_keys(self):
return [f"x_t{(i+1)*self.tstep}" for i in range(self.n_tsteps)]
class ERA5HDF5GridDataset(ERA5HDF5GridBaseDataset, Dataset):
"""Map-style ERA5 dataset."""
def __getitem__(self, idx):
# get local indices from global index
year_idx = int(idx / self.n_samples_per_year)
local_idx = int(idx % self.n_samples_per_year)
in_idx = self.samples[year_idx][local_idx]
# get output indices
out_idxs = []
for i in range(self.n_tsteps):
out_idx = in_idx + (i + 1) * self.tstep
# if at end of dataset, just learn identity instead
if out_idx > (self.n_samples_per_year_all - 1):
out_idx = in_idx
out_idxs.append(out_idx)
# get data
xs = []
for idx in [in_idx] + out_idxs:
# get array
# has shape [C, H, W]
x = self.data_files[year_idx]["fields"][idx, self.chans]
assert x.ndim == 3, f"Expected 3 dimensions, but got {x.shape}"
# apply input / output normalisation (broadcasted operation)
x = (x - self.mu[0]) / self.sd[0]
# crop data if needed
if self.patch_size is not None:
x = x[..., : self.img_shape[0], : self.img_shape[1]]
xs.append(x)
# convert to tensor dicts
assert len(self.invar_keys) == 1
invar = {self.invar_keys[0]: xs[0]}
assert len(self.outvar_keys) == len(xs) - 1
outvar = {self.outvar_keys[i]: x for i, x in enumerate(xs[1:])}
invar = Dataset._to_tensor_dict(invar)
outvar = Dataset._to_tensor_dict(outvar)
lambda_weighting = Dataset._to_tensor_dict(
{k: np.ones_like(v) for k, v in outvar.items()}
)
return invar, outvar, lambda_weighting
def __len__(self):
return self.length
def worker_init_fn(self, iworker):
super().worker_init_fn(iworker)
# open all year files at once on worker thread
self.data_files = [h5py.File(path, "r") for path in self.data_paths]
| modulus-sym-main | examples/fourcastnet/src/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import Dict
Tensor = torch.Tensor
class LpLoss(torch.nn.Module):
def __init__(
self,
d: float = 2.0,
p: float = 2.0,
):
"""Relative Lp loss normalized seperately in the batch dimension.
Expects inputs of the shape [B, C, ...]
Parameters
----------
p : float, optional
Norm power, by default 2.0
"""
super(LpLoss, self).__init__()
# Dimension and Lp-norm type are postive
assert p > 0.0
self.p = p
def _rel(self, x: torch.Tensor, y: torch.Tensor) -> float:
num_examples = x.size()[0]
xv = x.reshape(num_examples, -1)
yv = y.reshape(num_examples, -1)
diff_norms = torch.linalg.norm(xv - yv, ord=self.p, dim=1)
y_norms = torch.linalg.norm(yv, ord=self.p, dim=1)
return torch.mean(diff_norms / y_norms)
def forward(
self,
invar: Dict[str, Tensor],
pred_outvar: Dict[str, Tensor],
true_outvar: Dict[str, Tensor],
lambda_weighting: Dict[str, Tensor],
step: int,
) -> Dict[str, float]:
losses = {}
for key, value in pred_outvar.items():
losses[key] = self._rel(pred_outvar[key], true_outvar[key])
return losses
| modulus-sym-main | examples/fourcastnet/src/loss.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from pathlib import Path
import pytest
import shutil
from typing import List
import h5py
import numpy as np
import torch
from torch.utils.data import DataLoader
from modulus.sym.distributed import DistributedManager
from modulus.sym.domain.constraint.constraint import Constraint
from src.dali_dataset import ERA5HDF5GridDaliIterableDataset
from src.dataset import ERA5HDF5GridDataset
# TODO: hardcoded for now. Parameterize in the future.
NUM_SAMPLES = 4
NUM_CHANNELS = 3
IMG_HEIGHT = 17
IMG_WIDTH = 32
@pytest.fixture(scope="module")
def test_data(tmp_path_factory):
"""Creates a small data sample in ERA5-like format."""
data_dir = tmp_path_factory.mktemp("data")
train_dir = create_test_data(data_dir)
yield train_dir
# Cleanup.
shutil.rmtree(data_dir)
def create_test_data(data_dir: Path):
"""Creates a test data in ERA5 format."""
train_dir = data_dir / "train"
train_dir.mkdir()
stats_dir = data_dir / "stats"
stats_dir.mkdir()
# Create and write data.
data = (
np.random.default_rng(seed=1)
.normal(0.0, 1.0, (NUM_SAMPLES, NUM_CHANNELS, IMG_HEIGHT, IMG_WIDTH))
.astype(np.float32)
)
with h5py.File(train_dir / "1980.h5", mode="w") as h5file:
h5file["fields"] = data
# Write stats.
np.save(
stats_dir / "global_means.npy", np.mean(data, axis=(0, 2, 3), keepdims=True)
)
np.save(stats_dir / "global_stds.npy", np.std(data, axis=(0, 2, 3), keepdims=True))
return train_dir
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("num_workers", [1, 2])
@pytest.mark.parametrize("n_tsteps", [1, 2])
def test_dali_dataset_basic(
test_data: Path, batch_size: int, num_workers: int, n_tsteps: int
):
"""Basic test to verify DALI dataset functionality."""
data_path = test_data
channels = list(range(NUM_CHANNELS))
tstep = 1
patch_size = 8
base_loader, base_dset = _create_default_dataloader(
data_path,
channels,
tstep,
n_tsteps,
patch_size,
batch_size,
)
dali_loader, dali_dset = _create_dali_dataloader(
data_path,
channels,
tstep,
n_tsteps,
patch_size,
batch_size,
num_workers,
)
assert dali_dset.invar_keys == base_dset.invar_keys
assert dali_dset.outvar_keys == base_dset.outvar_keys
num_epochs = 2
for _ in range(num_epochs):
num_iters = 0
for batch_base, batch_dali in zip(base_loader, dali_loader):
invar_b, outvar_b, lw_b = batch_base
invar_d, outvar_d, lw_d = (
Constraint._set_device(i, "cpu") for i in batch_dali
)
# Check invars.
assert torch.allclose(invar_d["x_t0"], invar_b["x_t0"])
# Check outvars.
assert len(outvar_d) == len(outvar_b)
assert len(lw_d) == len(lw_b)
for k in outvar_d.keys():
assert torch.allclose(outvar_d[k], outvar_b[k])
# Weights are consts, so should be exactly the same.
assert (lw_d[k] == lw_b[k]).all()
num_iters += 1
assert num_iters == NUM_SAMPLES // batch_size
@pytest.mark.parametrize("batch_size", [1, 2])
@pytest.mark.parametrize("num_workers", [1, 2])
def test_dali_shuffle(test_data: Path, batch_size: int, num_workers: int):
"""Checks sample random shuffling functionality."""
data_path = test_data
n_tsteps = 1
channels = list(range(NUM_CHANNELS))
tstep = 1
patch_size = 8
dali_loader = partial(
_create_dali_dataloader,
data_path,
channels,
tstep,
n_tsteps,
patch_size,
batch_size,
num_workers,
)
base_loader, _ = dali_loader(shuffle=False)
shuf_loader, _ = dali_loader(shuffle=True)
num_epochs = 3
# Shuffled indices for each epoch.
epoch_indices = [
[2, 0, 1, 3],
[2, 0, 1, 3],
[3, 1, 2, 0],
]
for epoch in range(num_epochs):
base_batches = list(base_loader)
shuf_batches = list(shuf_loader)
# Check that shuf_batches is a permutation of the original.
x_t0_base = torch.cat([b[0]["x_t0"] for b in base_batches], dim=0)
assert x_t0_base.size(0) == NUM_SAMPLES
x_t0_shuf = torch.cat([b[0]["x_t0"] for b in shuf_batches], dim=0)
assert x_t0_shuf.size(0) == NUM_SAMPLES
for i in range(NUM_SAMPLES):
dst_idx = epoch_indices[epoch][i]
assert (
x_t0_shuf[i] == x_t0_base[dst_idx]
).all(), f"Mismatch at epoch {epoch}, sample {i}."
@pytest.mark.skip(reason="The test should be run using mpirun, not pytest.")
def test_distributed_dali_loader(data_path: Path):
n_tsteps = 1
channels = list(range(NUM_CHANNELS))
tstep = 1
patch_size = 8
batch_size = 1
num_workers = 1
m = DistributedManager()
world_size = m.world_size
# TODO: temporary restriction, remove.
assert (
world_size == 2
), "Only 2-GPU configuration is supported for now. Please run with mpirun -np 2"
base_loader, _ = _create_default_dataloader(
data_path,
channels,
tstep,
n_tsteps,
patch_size,
batch_size,
)
base_batches = list(base_loader)
x_t0_base = torch.cat([b[0]["x_t0"] for b in base_batches], dim=0)
# Make sure baseline contains all samples.
assert x_t0_base.size(0) == NUM_SAMPLES
dali_loader, _ = _create_dali_dataloader(
data_path,
channels,
tstep,
n_tsteps,
patch_size,
batch_size,
num_workers,
)
num_samples_per_rank = NUM_SAMPLES // world_size
dali_batches = list(dali_loader)
x_t0_dali = torch.cat([b[0]["x_t0"] for b in dali_batches], dim=0)
assert x_t0_dali.size(0) == num_samples_per_rank
# Check the samples are distributed across ranks properly.
idx_start = num_samples_per_rank * m.rank
assert torch.allclose(
x_t0_base[idx_start : idx_start + num_samples_per_rank], x_t0_dali.cpu()
)
def _create_default_dataloader(
data_path: Path,
channels: List[int],
tstep: int,
n_tsteps: int,
patch_size: int,
batch_size: int,
num_workers: int = 0,
shuffle: bool = False,
):
dataset = ERA5HDF5GridDataset(
data_path,
chans=channels,
tstep=tstep,
n_tsteps=n_tsteps,
patch_size=patch_size,
)
# Similar to Constraint.get_dataloader.
loader = DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
drop_last=True,
)
if num_workers == 0:
dataset.worker_init_fn(0)
return loader, dataset
def _create_dali_dataloader(
data_path: Path,
channels: List[int],
tstep: int,
n_tsteps: int,
patch_size: int,
batch_size: int,
num_workers: int,
shuffle: bool = False,
):
dataset = ERA5HDF5GridDaliIterableDataset(
data_path,
chans=channels,
tstep=tstep,
n_tsteps=n_tsteps,
patch_size=patch_size,
batch_size=batch_size,
num_workers=num_workers,
shuffle=shuffle,
)
return dataset, dataset
| modulus-sym-main | examples/fourcastnet/src/test_dali_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the FCN architecture"""
import logging
import torch
from torch import Tensor
from typing import List, Tuple, Dict
from modulus.sym.models.afno.afno import AFNONet
from modulus.sym.models.arch import Arch
from modulus.sym.key import Key
class FourcastNetArch(Arch):
"Defines the FourcastNet architecture"
def __init__(
self,
input_keys: List[Key],
output_keys: List[Key],
img_shape: Tuple[int, int],
detach_keys: List[Key] = [],
patch_size: int = 16,
embed_dim: int = 256,
depth: int = 4,
num_blocks: int = 4,
) -> None:
"""Fourcastnet model. This is a simple wrapper for Modulus' AFNO model.
The only difference is that FourcastNet needs multi-step training. This class
allows the model to auto-regressively predict multiple timesteps
Parameters (Same as AFNO)
----------
input_keys : List[Key]
Input key list. The key dimension size should equal the variables channel dim.
output_keys : List[Key]
Output key list. The key dimension size should equal the variables channel dim.
img_shape : Tuple[int, int]
Input image dimensions (height, width)
detach_keys : List[Key], optional
List of keys to detach gradients, by default []
patch_size : int, optional
Size of image patchs, by default 16
embed_dim : int, optional
Embedded channel size, by default 256
depth : int, optional
Number of AFNO layers, by default 4
num_blocks : int, optional
Number of blocks in the frequency weight matrices, by default 4
"""
super().__init__(
input_keys=input_keys,
output_keys=output_keys,
detach_keys=detach_keys,
)
# get number of timesteps steps to unroll
assert (
len(self.input_keys) == 1
), "Error, FourcastNet only accepts one input variable (x_t0)"
self.n_tsteps = len(self.output_keys)
logging.info(f"Unrolling FourcastNet over {self.n_tsteps} timesteps")
# get number of input/output channels
in_channels = self.input_keys[0].size
out_channels = self.output_keys[0].size
# intialise AFNO kernel
self._impl = AFNONet(
in_channels=in_channels,
out_channels=out_channels,
patch_size=(patch_size, patch_size),
img_size=img_shape,
embed_dim=embed_dim,
depth=depth,
num_blocks=num_blocks,
)
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
# prepare input tensor
x = self.prepare_input(
input_variables=in_vars,
mask=self.input_key_dict.keys(),
detach_dict=self.detach_key_dict,
dim=1,
input_scales=self.input_scales,
)
# unroll model over multiple timesteps
ys = []
for t in range(self.n_tsteps):
x = self._impl(x)
ys.append(x)
y = torch.cat(ys, dim=1)
# prepare output dict
return self.prepare_output(
output_tensor=y,
output_var=self.output_key_dict,
dim=1,
output_scales=self.output_scales,
)
| modulus-sym-main | examples/fourcastnet/src/fourcastnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sympy import Symbol, Eq, Abs, sin, cos
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import SequentialSolver
from modulus.sym.domain import Domain
from modulus.sym.loss.loss import CausalLossNorm
from modulus.sym.geometry.primitives_3d import Box
from modulus.sym.geometry.parameterization import OrderedParameterization
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.moving_time_window import MovingTimeWindowArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.inferencer import PointVTKInferencer
from modulus.sym.utils.io import (
VTKUniformGrid,
)
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# time window parameters
time_window_size = 1.0
t_symbol = Symbol("t")
time_range = {t_symbol: (0, time_window_size)}
nr_time_windows = 10
# make navier stokes equations
ns = NavierStokes(nu=0.002, rho=1.0, dim=3, time=True)
# define sympy variables to parametrize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make geometry for problem
channel_length = (0.0, 2 * np.pi)
channel_width = (0.0, 2 * np.pi)
channel_height = (0.0, 2 * np.pi)
box_bounds = {x: channel_length, y: channel_width, z: channel_height}
# define geometry
rec = Box(
(channel_length[0], channel_width[0], channel_height[0]),
(channel_length[1], channel_width[1], channel_height[1]),
parameterization=OrderedParameterization(time_range, key=t_symbol),
)
# make network for current step and previous step
flow_net = FullyConnectedArch(
input_keys=[Key("x"), Key("y"), Key("z"), Key("t")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
periodicity={"x": channel_length, "y": channel_width, "z": channel_height},
layer_size=256,
)
time_window_net = MovingTimeWindowArch(flow_net, time_window_size)
# make nodes to unroll graph on
nodes = ns.make_nodes() + [time_window_net.make_node(name="time_window_network")]
# make initial condition domain
ic_domain = Domain("initial_conditions")
# make moving window domain
window_domain = Domain("window")
# make initial condition
ic = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"u": sin(x) * cos(y) * cos(z),
"v": -cos(x) * sin(y) * cos(z),
"w": 0,
"p": 1.0 / 16 * (cos(2 * x) + cos(2 * y)) * (cos(2 * z) + 2),
},
batch_size=cfg.batch_size.initial_condition,
bounds=box_bounds,
lambda_weighting={"u": 100, "v": 100, "w": 100, "p": 100},
parameterization={t_symbol: 0},
)
ic_domain.add_constraint(ic, name="ic")
# make constraint for matching previous windows initial condition
ic = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"u_prev_step_diff": 0, "v_prev_step_diff": 0, "w_prev_step_diff": 0},
batch_size=cfg.batch_size.interior,
bounds=box_bounds,
lambda_weighting={
"u_prev_step_diff": 100,
"v_prev_step_diff": 100,
"w_prev_step_diff": 100,
},
parameterization={t_symbol: 0},
)
window_domain.add_constraint(ic, name="ic")
# make interior constraint
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
bounds=box_bounds,
batch_size=4000,
loss=CausalLossNorm(eps=1.0),
fixed_dataset=False,
shuffle=False,
)
ic_domain.add_constraint(interior, name="interior")
window_domain.add_constraint(interior, name="interior")
# add inference data for time slices
for i, specific_time in enumerate(np.linspace(0, time_window_size, 10)):
vtk_obj = VTKUniformGrid(
bounds=[(0, 2 * np.pi), (0, 2 * np.pi), (0, 2 * np.pi)],
npoints=[128, 128, 128],
export_map={"u": ["u", "v", "w"], "p": ["p"]},
)
grid_inference = PointVTKInferencer(
vtk_obj=vtk_obj,
nodes=nodes,
input_vtk_map={"x": "x", "y": "y", "z": "z"},
output_names=["u", "v", "w", "p"],
requires_grad=False,
invar={"t": np.full([128**3, 1], specific_time)},
batch_size=100000,
)
ic_domain.add_inferencer(grid_inference, name="time_slice_" + str(i).zfill(4))
window_domain.add_inferencer(
grid_inference, name="time_slice_" + str(i).zfill(4)
)
# make solver
slv = SequentialSolver(
cfg,
[(1, ic_domain), (nr_time_windows, window_domain)],
custom_update_operation=time_window_net.move_window,
)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/taylor_green/taylor_green_causal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import numpy as np
import matplotlib.pyplot as plt
from modulus.sym.utils.io import csv_to_dict
import os
import warnings
# get list of steps
window_dirs = glob.glob("./outputs/taylor_green/network_checkpoint/*")
window_dirs.sort()
window_dirs = [x for x in window_dirs if os.path.isdir(x)]
# read each file in each dir and store tke
index = 0
time_points = []
tke_points = []
for i, d in enumerate(window_dirs):
# get list of slices
slice_files = glob.glob(d + "/inferencers/time_slice_*.npz")
slice_files.sort()
for f in slice_files:
predicted_data = np.load(f, allow_pickle=True)["arr_0"].item()
# shift t
predicted_data["t"] += i
if float(predicted_data["t"][0, 0, 0]) < 10.0:
# store time
time_points.append(float(predicted_data["t"][0, 0, 0]))
# compute tke and store
tke = np.mean(
predicted_data["u"] ** 2 / 2
+ predicted_data["v"] ** 2 / 2
+ predicted_data["w"] ** 2 / 2
)
tke_points.append(tke)
index += 1
tke_points = tke_points / np.max(tke_points)
# load validation tke data
file_path = "validation_tke"
if os.path.exists(to_absolute_path(file_path)):
validation_tke_128 = csv_to_dict("validation_tke/tke_mean_Re500_N128.csv")
validation_tke_256 = csv_to_dict("validation_tke/tke_mean_Re500_N256.csv")
plt.plot(
validation_tke_128["Time"][:, 0],
validation_tke_128["TKE_mean"][:, 0],
label="Spectral Solver (grid res: 128)",
)
plt.plot(
validation_tke_256["Time"][:, 0],
validation_tke_256["TKE_mean"][:, 0],
label="Spectral Solver (grid res: 256)",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# plot turbulent kinetic energy decay
plt.plot(time_points, tke_points, label="Modulus")
plt.legend()
plt.title("TKE")
plt.ylabel("TKE")
plt.xlabel("time")
plt.savefig("tke_plot.png")
| modulus-sym-main | examples/taylor_green/plot_results.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sympy import Symbol, Eq, Abs, sin, cos
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import SequentialSolver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.moving_time_window import MovingTimeWindowArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.inferencer import PointVTKInferencer
from modulus.sym.utils.io import (
VTKUniformGrid,
)
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# time window parameters
time_window_size = 1.0
t_symbol = Symbol("t")
time_range = {t_symbol: (0, time_window_size)}
nr_time_windows = 10
# make navier stokes equations
ns = NavierStokes(nu=0.002, rho=1.0, dim=3, time=True)
# define sympy variables to parametrize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make geometry for problem
channel_length = (0.0, 2 * np.pi)
channel_width = (0.0, 2 * np.pi)
channel_height = (0.0, 2 * np.pi)
box_bounds = {x: channel_length, y: channel_width, z: channel_height}
# define geometry
rec = Box(
(channel_length[0], channel_width[0], channel_height[0]),
(channel_length[1], channel_width[1], channel_height[1]),
)
# make network for current step and previous step
flow_net = FullyConnectedArch(
input_keys=[Key("x"), Key("y"), Key("z"), Key("t")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
periodicity={"x": channel_length, "y": channel_width, "z": channel_height},
layer_size=256,
)
time_window_net = MovingTimeWindowArch(flow_net, time_window_size)
# make nodes to unroll graph on
nodes = ns.make_nodes() + [time_window_net.make_node(name="time_window_network")]
# make initial condition domain
ic_domain = Domain("initial_conditions")
# make moving window domain
window_domain = Domain("window")
# make initial condition
ic = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"u": sin(x) * cos(y) * cos(z),
"v": -cos(x) * sin(y) * cos(z),
"w": 0,
"p": 1.0 / 16 * (cos(2 * x) + cos(2 * y)) * (cos(2 * z) + 2),
},
batch_size=cfg.batch_size.initial_condition,
bounds=box_bounds,
lambda_weighting={"u": 100, "v": 100, "w": 100, "p": 100},
parameterization={t_symbol: 0},
)
ic_domain.add_constraint(ic, name="ic")
# make constraint for matching previous windows initial condition
ic = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"u_prev_step_diff": 0, "v_prev_step_diff": 0, "w_prev_step_diff": 0},
batch_size=cfg.batch_size.interior,
bounds=box_bounds,
lambda_weighting={
"u_prev_step_diff": 100,
"v_prev_step_diff": 100,
"w_prev_step_diff": 100,
},
parameterization={t_symbol: 0},
)
window_domain.add_constraint(ic, name="ic")
# make interior constraint
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
bounds=box_bounds,
batch_size=4094,
parameterization=time_range,
)
ic_domain.add_constraint(interior, name="interior")
window_domain.add_constraint(interior, name="interior")
# add inference data for time slices
for i, specific_time in enumerate(np.linspace(0, time_window_size, 10)):
vtk_obj = VTKUniformGrid(
bounds=[(0, 2 * np.pi), (0, 2 * np.pi), (0, 2 * np.pi)],
npoints=[128, 128, 128],
export_map={"u": ["u", "v", "w"], "p": ["p"]},
)
grid_inference = PointVTKInferencer(
vtk_obj=vtk_obj,
nodes=nodes,
input_vtk_map={"x": "x", "y": "y", "z": "z"},
output_names=["u", "v", "w", "p"],
requires_grad=False,
invar={"t": np.full([128**3, 1], specific_time)},
batch_size=100000,
)
ic_domain.add_inferencer(grid_inference, name="time_slice_" + str(i).zfill(4))
window_domain.add_inferencer(
grid_inference, name="time_slice_" + str(i).zfill(4)
)
# make solver
slv = SequentialSolver(
cfg,
[(1, ic_domain), (nr_time_windows, window_domain)],
custom_update_operation=time_window_net.move_window,
)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/taylor_green/taylor_green.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.sparse.linalg import eigsh
from scipy.sparse import diags
def Laplacian_1D_eig(a, b, N, eps=lambda x: np.ones_like(x), k=3):
n = N - 2
h = (b - a) / (N - 1)
L = diags([1, -2, 1], [-1, 0, 1], shape=(n, n))
L = -L / h**2
x = np.linspace(a, b, num=N)
M = diags([eps(x[1:-1])], [0])
eigvals, eigvecs = eigsh(L, k=k, M=M, which="SM")
eigvecs = np.vstack((np.zeros((1, k)), eigvecs, np.zeros((1, k))))
norm_eigvecs = np.linalg.norm(eigvecs, axis=0)
eigvecs /= norm_eigvecs
return eigvals.astype(np.float32), eigvecs.astype(np.float32), x.astype(np.float32)
| modulus-sym-main | examples/waveguide/EM_Utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, pi, sin, Number, Eq, And
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.utils.io.plotter import InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.eq.pdes.electromagnetic import PEC, SommerfeldBC, MaxwellFreqReal
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for domain
length = 2
height = 2
width = 2
eigenmode = [1]
wave_number = 16.0 # wave_number = freq/c
waveguide_port = Number(0)
for k in eigenmode:
waveguide_port += sin(k * pi * y / length) * sin(k * pi * z / height)
# define geometry
rec = Box((0, 0, 0), (width, length, height))
# make list of nodes to unroll graph on
hm = MaxwellFreqReal(k=wave_number)
pec = PEC()
pml = SommerfeldBC()
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("ux"), Key("uy"), Key("uz")],
frequencies=("axis,diagonal", [i / 2.0 for i in range(int(wave_number) + 1)]),
frequencies_params=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number) + 1)],
),
cfg=cfg.arch.modified_fourier,
)
nodes = (
hm.make_nodes()
+ pec.make_nodes()
+ pml.make_nodes()
+ [wave_net.make_node(name="wave_network")]
)
waveguide_domain = Domain()
wall_PEC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"PEC_x": 0.0, "PEC_y": 0.0, "PEC_z": 0.0},
batch_size=cfg.batch_size.PEC,
lambda_weighting={"PEC_x": 100.0, "PEC_y": 100.0, "PEC_z": 100.0},
criteria=And(~Eq(x, 0), ~Eq(x, width)),
)
waveguide_domain.add_constraint(wall_PEC, "PEC")
Waveguide_port = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"uz": waveguide_port},
batch_size=cfg.batch_size.Waveguide_port,
lambda_weighting={"uz": 100.0},
criteria=Eq(x, 0),
)
waveguide_domain.add_constraint(Waveguide_port, "Waveguide_port")
ABC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={
"SommerfeldBC_real_x": 0.0,
"SommerfeldBC_real_y": 0.0,
"SommerfeldBC_real_z": 0.0,
},
batch_size=cfg.batch_size.ABC,
lambda_weighting={
"SommerfeldBC_real_x": 10.0,
"SommerfeldBC_real_y": 10.0,
"SommerfeldBC_real_z": 10.0,
},
criteria=Eq(x, width),
)
waveguide_domain.add_constraint(ABC, "ABC")
Interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"Maxwell_Freq_real_x": 0,
"Maxwell_Freq_real_y": 0.0,
"Maxwell_Freq_real_z": 0.0,
},
batch_size=cfg.batch_size.Interior,
bounds={x: (0, width), y: (0, length), z: (0, height)},
lambda_weighting={
"Maxwell_Freq_real_x": 1.0 / wave_number**2,
"Maxwell_Freq_real_y": 1.0 / wave_number**2,
"Maxwell_Freq_real_z": 1.0 / wave_number**2,
},
fixed_dataset=False,
)
waveguide_domain.add_constraint(Interior, "Interior")
# add inferencer data
interior_points = rec.sample_interior(
10000, bounds={x: (0, width), y: (0, length), z: (0, height)}
)
numpy_inference = PointwiseInferencer(
nodes=nodes,
invar=interior_points,
output_names=["ux", "uy", "uz"],
plotter=InferencerPlotter(),
batch_size=2048,
)
waveguide_domain.add_inferencer(numpy_inference, "Inf" + str(wave_number).zfill(4))
# make solver
slv = Solver(cfg, waveguide_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/waveguide/cavity_3D/waveguide3D.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import sys
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from sympy import Symbol, Eq, Heaviside, sqrt
from sympy.logic.boolalg import And
import numpy as np
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.inferencer import VoxelInferencer
from modulus.sym.utils.io.plotter import InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.eq.pdes.electromagnetic import PEC, SommerfeldBC, MaxwellFreqReal
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for domain
length = 1
height = 1
width = 1
len_slab = 0.2
eps0 = 1.0
eps1 = 1.5
eps_sympy = sqrt(
eps0
+ (Heaviside(y + len_slab / 2) - Heaviside(y - len_slab / 2))
* (Heaviside(z + len_slab / 2) - Heaviside(z - len_slab / 2))
* (eps1 - eps0)
)
wave_number = 16.0 # wave_number = freq/c
file_path = "../validation/2Dwaveguideport.csv"
if not os.path.exists(to_absolute_path(file_path)):
warnings.warn(
f"Directory {file_path} does not exist. Cannot continue. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
sys.exit()
mapping = {"x": "x", "y": "y", **{"u" + str(k): "u" + str(k) for k in range(6)}}
data_var = csv_to_dict(
to_absolute_path("../validation/2Dwaveguideport.csv"), mapping
)
waveguide_port_invar_numpy = {
"x": np.zeros_like(data_var["x"]) - 0.5,
"y": data_var["x"],
"z": data_var["y"],
}
waveguide_port_outvar_numpy = {"uz": data_var["u0"]}
# define geometry
rec = Box(
(-width / 2, -length / 2, -height / 2), (width / 2, length / 2, height / 2)
)
# make list of nodes to unroll graph on
hm = MaxwellFreqReal(k=wave_number * eps_sympy)
pec = PEC()
pml = SommerfeldBC()
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("ux"), Key("uy"), Key("uz")],
frequencies=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number * np.sqrt(eps1)) * 2 + 1)],
),
frequencies_params=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number * np.sqrt(eps1)) * 2 + 1)],
),
cfg=cfg.arch.modified_fourier,
)
nodes = (
hm.make_nodes()
+ pec.make_nodes()
+ pml.make_nodes()
+ [wave_net.make_node(name="wave_network")]
)
waveguide_domain = Domain()
wall_PEC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"PEC_x": 0.0, "PEC_y": 0.0, "PEC_z": 0.0},
batch_size=cfg.batch_size.PEC,
lambda_weighting={"PEC_x": 100.0, "PEC_y": 100.0, "PEC_z": 100.0},
criteria=And(~Eq(x, -width / 2), ~Eq(x, width / 2)),
fixed_dataset=False,
)
waveguide_domain.add_constraint(wall_PEC, "PEC")
Waveguide_port = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=waveguide_port_invar_numpy,
outvar=waveguide_port_outvar_numpy,
batch_size=cfg.batch_size.Waveguide_port,
lambda_weighting={"uz": np.full_like(waveguide_port_invar_numpy["x"], 0.5)},
)
waveguide_domain.add_constraint(Waveguide_port, "Waveguide_port")
ABC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={
"SommerfeldBC_real_x": 0.0,
"SommerfeldBC_real_y": 0.0,
"SommerfeldBC_real_z": 0.0,
},
batch_size=cfg.batch_size.ABC,
lambda_weighting={
"SommerfeldBC_real_x": 10.0,
"SommerfeldBC_real_y": 10.0,
"SommerfeldBC_real_z": 10.0,
},
criteria=Eq(x, width / 2),
fixed_dataset=False,
)
waveguide_domain.add_constraint(ABC, "ABC")
Interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={
"Maxwell_Freq_real_x": 0,
"Maxwell_Freq_real_y": 0.0,
"Maxwell_Freq_real_z": 0.0,
},
batch_size=cfg.batch_size.Interior,
lambda_weighting={
"Maxwell_Freq_real_x": 1.0 / wave_number**2,
"Maxwell_Freq_real_y": 1.0 / wave_number**2,
"Maxwell_Freq_real_z": 1.0 / wave_number**2,
},
fixed_dataset=False,
)
waveguide_domain.add_constraint(Interior, "Interior")
# add inferencer data
slab_inference = VoxelInferencer(
bounds=[[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]],
npoints=[128, 128, 128],
nodes=nodes,
output_names=["ux", "uy", "uz"],
)
waveguide_domain.add_inferencer(slab_inference, "Inf" + str(int(wave_number)))
# make solver
slv = Solver(cfg, waveguide_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/waveguide/slab_3D/slab_3D.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq, Heaviside, sqrt
from sympy.logic.boolalg import Or
import numpy as np
from scipy.sparse.linalg import eigsh
from scipy.sparse import diags
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import VoxelInferencer
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.eq.pdes.wave_equation import HelmholtzEquation
from modulus.sym.eq.pdes.navier_stokes import GradNormal
x, y = Symbol("x"), Symbol("y")
# helper function for computing laplacian eigen values
def Laplacian_1D_eig(a, b, N, eps=lambda x: np.ones_like(x), k=3):
n = N - 2
h = (b - a) / (N - 1)
L = diags([1, -2, 1], [-1, 0, 1], shape=(n, n))
L = -L / h**2
x = np.linspace(a, b, num=N)
M = diags([eps(x[1:-1])], [0])
eigvals, eigvecs = eigsh(L, k=k, M=M, which="SM")
eigvecs = np.vstack((np.zeros((1, k)), eigvecs, np.zeros((1, k))))
norm_eigvecs = np.linalg.norm(eigvecs, axis=0)
eigvecs /= norm_eigvecs
return eigvals.astype(np.float32), eigvecs.astype(np.float32), x.astype(np.float32)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for domain
height = 2
width = 2
len_slab = 0.6
eps0 = 1.0
eps1 = 2.0
eps_numpy = lambda y: np.where(
np.logical_and(y > (height - len_slab) / 2, y < (height + len_slab) / 2),
eps1,
eps0,
)
eps_sympy = sqrt(
eps0
+ (
Heaviside(y - (height - len_slab) / 2)
- Heaviside(y - (height + len_slab) / 2)
)
* (eps1 - eps0)
)
eigvals, eigvecs, yv = Laplacian_1D_eig(0, height, 1000, eps=eps_numpy, k=3)
yv = yv.reshape((-1, 1))
eigenmode = [1]
wave_number = 16.0 # wave_number = freq/c
waveguide_port_invar_numpy = {"x": np.zeros_like(yv), "y": yv}
waveguide_port_outvar_numpy = {"u": 10 * eigvecs[:, 0:1]}
# define geometry
rec = Rectangle((0, 0), (width, height))
# make list of nodes to unroll graph on
hm = HelmholtzEquation(u="u", k=wave_number * eps_sympy, dim=2)
gn = GradNormal(T="u", dim=2, time=False)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number * np.sqrt(eps1)) * 2 + 1)],
),
frequencies_params=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number * np.sqrt(eps1)) * 2 + 1)],
),
cfg=cfg.arch.modified_fourier,
)
nodes = (
hm.make_nodes() + gn.make_nodes() + [wave_net.make_node(name="wave_network")]
)
waveguide_domain = Domain()
PEC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0.0},
batch_size=cfg.batch_size.PEC,
lambda_weighting={"u": 100.0},
criteria=Or(Eq(y, 0), Eq(y, height)),
)
waveguide_domain.add_constraint(PEC, "PEC")
Waveguide_port = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=waveguide_port_invar_numpy,
outvar=waveguide_port_outvar_numpy,
batch_size=cfg.batch_size.Waveguide_port,
lambda_weighting={"u": np.full_like(yv, 0.5)},
)
waveguide_domain.add_constraint(Waveguide_port, "Waveguide_port")
ABC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"normal_gradient_u": 0.0},
batch_size=cfg.batch_size.ABC,
lambda_weighting={"normal_gradient_u": 10.0},
criteria=Eq(x, width),
)
waveguide_domain.add_constraint(ABC, "ABC")
Interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"helmholtz": 0.0},
batch_size=cfg.batch_size.Interior,
lambda_weighting={
"helmholtz": 1.0 / wave_number**2,
},
)
waveguide_domain.add_constraint(Interior, "Interior")
# add inferencer data
slab_inference = VoxelInferencer(
bounds=[[0, 2], [0, 2]],
npoints=[256, 256],
nodes=nodes,
output_names=["u"],
)
waveguide_domain.add_inferencer(slab_inference, "Inf" + str(int(wave_number)))
# make solver
slv = Solver(cfg, waveguide_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/waveguide/slab_2D/slab_2D.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, pi, sin, Number, Eq
from sympy.logic.boolalg import Or
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.key import Key
from modulus.sym.eq.pdes.wave_equation import HelmholtzEquation
from modulus.sym.eq.pdes.navier_stokes import GradNormal
x, y = Symbol("x"), Symbol("y")
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for domain
height = 2
width = 2
eigenmode = [2]
wave_number = 32.0 # wave_number = freq/c
waveguide_port = Number(0)
for k in eigenmode:
waveguide_port += sin(k * pi * y / height)
# define geometry
rec = Rectangle((0, 0), (width, height))
# make list of nodes to unroll graph on
hm = HelmholtzEquation(u="u", k=wave_number, dim=2)
gn = GradNormal(T="u", dim=2, time=False)
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
frequencies=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number) * 2 + 1)],
),
frequencies_params=(
"axis,diagonal",
[i / 2.0 for i in range(int(wave_number) * 2 + 1)],
),
cfg=cfg.arch.modified_fourier,
)
nodes = (
hm.make_nodes() + gn.make_nodes() + [wave_net.make_node(name="wave_network")]
)
waveguide_domain = Domain()
PEC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0.0},
batch_size=cfg.batch_size.PEC,
lambda_weighting={"u": 100.0},
criteria=Or(Eq(y, 0), Eq(y, height)),
)
waveguide_domain.add_constraint(PEC, "PEC")
Waveguide_port = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": waveguide_port},
batch_size=cfg.batch_size.Waveguide_port,
lambda_weighting={"u": 100.0},
criteria=Eq(x, 0),
)
waveguide_domain.add_constraint(Waveguide_port, "Waveguide_port")
ABC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"normal_gradient_u": 0.0},
batch_size=cfg.batch_size.ABC,
lambda_weighting={"normal_gradient_u": 10.0},
criteria=Eq(x, width),
)
waveguide_domain.add_constraint(ABC, "ABC")
Interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"helmholtz": 0.0},
batch_size=cfg.batch_size.Interior,
bounds={x: (0, width), y: (0, height)},
lambda_weighting={
"helmholtz": 1.0 / wave_number**2,
},
)
waveguide_domain.add_constraint(Interior, "Interior")
# add validation data
file_path = "../validation/2Dwaveguide_32_2.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"x": "x", "y": "y", "u": "u"}
validation_var = csv_to_dict(to_absolute_path(file_path), mapping)
validation_invar_numpy = {
key: value for key, value in validation_var.items() if key in ["x", "y"]
}
validation_outvar_numpy = {
key: value for key, value in validation_var.items() if key in ["u"]
}
csv_validator = PointwiseValidator(
nodes=nodes,
invar=validation_invar_numpy,
true_outvar=validation_outvar_numpy,
batch_size=2048,
plotter=ValidatorPlotter(),
)
waveguide_domain.add_validator(csv_validator)
# add inferencer data
csv_inference = PointwiseInferencer(
nodes=nodes,
invar=validation_invar_numpy,
output_names=["u"],
plotter=InferencerPlotter(),
batch_size=2048,
)
waveguide_domain.add_inferencer(csv_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, waveguide_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/waveguide/cavity_2D/waveguide2D_TMz.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Function, Number
from modulus.sym.eq.pde import PDE
class SpringMass(PDE):
name = "SpringMass"
def __init__(self, k=(2, 1, 1, 2), m=(1, 1, 1)):
self.k = k
self.m = m
k1 = k[0]
k2 = k[1]
k3 = k[2]
k4 = k[3]
m1 = m[0]
m2 = m[1]
m3 = m[2]
t = Symbol("t")
input_variables = {"t": t}
x1 = Function("x1")(*input_variables)
x2 = Function("x2")(*input_variables)
x3 = Function("x3")(*input_variables)
if type(k1) is str:
k1 = Function(k1)(*input_variables)
elif type(k1) in [float, int]:
k1 = Number(k1)
if type(k2) is str:
k2 = Function(k2)(*input_variables)
elif type(k2) in [float, int]:
k2 = Number(k2)
if type(k3) is str:
k3 = Function(k3)(*input_variables)
elif type(k3) in [float, int]:
k3 = Number(k3)
if type(k4) is str:
k4 = Function(k4)(*input_variables)
elif type(k4) in [float, int]:
k4 = Number(k4)
if type(m1) is str:
m1 = Function(m1)(*input_variables)
elif type(m1) in [float, int]:
m1 = Number(m1)
if type(m2) is str:
m2 = Function(m2)(*input_variables)
elif type(m2) in [float, int]:
m2 = Number(m2)
if type(m3) is str:
m3 = Function(m3)(*input_variables)
elif type(m3) in [float, int]:
m3 = Number(m3)
self.equations = {}
self.equations["ode_x1"] = m1 * (x1.diff(t)).diff(t) + k1 * x1 - k2 * (x2 - x1)
self.equations["ode_x2"] = (
m2 * (x2.diff(t)).diff(t) + k2 * (x2 - x1) - k3 * (x3 - x2)
)
self.equations["ode_x3"] = m3 * (x3.diff(t)).diff(t) + k3 * (x3 - x2) + k4 * x3
| modulus-sym-main | examples/ode_spring_mass/spring_mass_ode.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Point1D
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from spring_mass_ode import SpringMass
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
sm = SpringMass(k=(2, 1, 1, 2), m=(1, 1, 1))
sm_net = instantiate_arch(
input_keys=[Key("t")],
output_keys=[Key("x1"), Key("x2"), Key("x3")],
cfg=cfg.arch.fully_connected,
)
nodes = sm.make_nodes() + [sm_net.make_node(name="spring_mass_network")]
# add constraints to solver
# make geometry
geo = Point1D(0)
t_max = 10.0
t_symbol = Symbol("t")
x = Symbol("x")
time_range = {t_symbol: (0, t_max)}
# make domain
domain = Domain()
# initial conditions
IC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"x1": 1.0, "x2": 0, "x3": 0, "x1__t": 0, "x2__t": 0, "x3__t": 0},
batch_size=cfg.batch_size.IC,
lambda_weighting={
"x1": 1.0,
"x2": 1.0,
"x3": 1.0,
"x1__t": 1.0,
"x2__t": 1.0,
"x3__t": 1.0,
},
parameterization={t_symbol: 0},
)
domain.add_constraint(IC, name="IC")
# solve over given time period
interior = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"ode_x1": 0.0, "ode_x2": 0.0, "ode_x3": 0.0},
batch_size=cfg.batch_size.interior,
parameterization=time_range,
)
domain.add_constraint(interior, "interior")
# add validation data
deltaT = 0.001
t = np.arange(0, t_max, deltaT)
t = np.expand_dims(t, axis=-1)
invar_numpy = {"t": t}
outvar_numpy = {
"x1": (1 / 6) * np.cos(t)
+ (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
"x2": (2 / 6) * np.cos(t)
+ (0 / 2) * np.cos(np.sqrt(3) * t)
- (1 / 3) * np.cos(2 * t),
"x3": (1 / 6) * np.cos(t)
- (1 / 2) * np.cos(np.sqrt(3) * t)
+ (1 / 3) * np.cos(2 * t),
}
validator = PointwiseValidator(
nodes=nodes, invar=invar_numpy, true_outvar=outvar_numpy, batch_size=1024
)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/ode_spring_mass/spring_mass_solver.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import matplotlib.pyplot as plt
base_dir = "outputs/spring_mass_solver/validators/"
# plot in 1d
data = np.load(base_dir + "validator.npz", allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
plt.plot(data["t"], data["true_x1"], label="True x1")
plt.plot(data["t"], data["true_x2"], label="True x2")
plt.plot(data["t"], data["true_x3"], label="True x3")
plt.plot(data["t"], data["pred_x1"], label="Pred x1")
plt.plot(data["t"], data["pred_x2"], label="Pred x2")
plt.plot(data["t"], data["pred_x3"], label="Pred x3")
plt.legend()
plt.savefig("comparison.png")
| modulus-sym-main | examples/ode_spring_mass/plot_results_spring.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
import torch
import numpy as np
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig, to_yaml
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.deeponet import DeepONetArch
from modulus.sym.domain.constraint.continuous import DeepONetConstraint
from modulus.sym.domain.validator.discrete import GridValidator
from modulus.sym.dataset.discrete import DictGridDataset
from modulus.sym.key import Key
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# [init-model]
trunk_net = FourierNetArch(
input_keys=[Key("x")],
output_keys=[Key("trunk", 128)],
)
branch_net = FullyConnectedArch(
input_keys=[Key("a", 100)],
output_keys=[Key("branch", 128)],
)
deeponet = DeepONetArch(
output_keys=[Key("u")],
branch_net=branch_net,
trunk_net=trunk_net,
)
nodes = [deeponet.make_node("deepo")]
# [init-model]
# [datasets]
# load training data
file_path = "data/anti_derivative.npy"
if not os.path.exists(to_absolute_path(file_path)):
warnings.warn(
f"Directory {file_path} does not exist. Cannot continue. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
sys.exit()
data = np.load(to_absolute_path(file_path), allow_pickle=True).item()
x_train = data["x_train"]
a_train = data["a_train"]
u_train = data["u_train"]
# load test data
x_test = data["x_test"]
a_test = data["a_test"]
u_test = data["u_test"]
# [datasets]
# [constraint]
# make domain
domain = Domain()
data = DeepONetConstraint.from_numpy(
nodes=nodes,
invar={"a": a_train, "x": x_train},
outvar={"u": u_train},
batch_size=cfg.batch_size.train,
)
domain.add_constraint(data, "data")
# [constraint]
# [validator]
# add validators
for k in range(10):
invar_valid = {
"a": a_test[k * 100 : (k + 1) * 100],
"x": x_test[k * 100 : (k + 1) * 100],
}
outvar_valid = {"u": u_test[k * 100 : (k + 1) * 100]}
dataset = DictGridDataset(invar_valid, outvar_valid)
validator = GridValidator(nodes=nodes, dataset=dataset, plotter=None)
domain.add_validator(validator, "validator_{}".format(k))
# [validator]
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/anti_derivative/data_informed.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
import torch
import numpy as np
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.deeponet import DeepONetArch
from modulus.sym.domain.constraint.continuous import DeepONetConstraint
from modulus.sym.domain.validator.discrete import GridValidator
from modulus.sym.dataset.discrete import DictGridDataset
from modulus.sym.key import Key
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# [init-model]
# make list of nodes to unroll graph on
trunk_net = FourierNetArch(
input_keys=[Key("x")],
output_keys=[Key("trunk", 128)],
)
branch_net = FullyConnectedArch(
input_keys=[Key("a", 100)],
output_keys=[Key("branch", 128)],
)
deeponet = DeepONetArch(
output_keys=[Key("u")],
branch_net=branch_net,
trunk_net=trunk_net,
)
nodes = [deeponet.make_node("deepo")]
# [init-model]
# [datasets]
# load training data
file_path = "data/anti_derivative.npy"
if not os.path.exists(to_absolute_path(file_path)):
warnings.warn(
f"Directory {file_path} does not exist. Cannot continue. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
sys.exit()
data = np.load(to_absolute_path(file_path), allow_pickle=True).item()
x_train = data["x_train"]
a_train = data["a_train"]
u_train = data["u_train"]
x_r_train = data["x_r_train"]
a_r_train = data["a_r_train"]
u_r_train = data["u_r_train"]
# load test data
x_test = data["x_test"]
a_test = data["a_test"]
u_test = data["u_test"]
# [datasets]
# [constraint1]
# make domain
domain = Domain()
# add constraints to domain
IC = DeepONetConstraint.from_numpy(
nodes=nodes,
invar={"a": a_train, "x": np.zeros_like(x_train)},
outvar={"u": np.zeros_like(u_train)},
batch_size=cfg.batch_size.train,
)
domain.add_constraint(IC, "IC")
# [constraint1]
# [constraint2]
interior = DeepONetConstraint.from_numpy(
nodes=nodes,
invar={"a": a_r_train, "x": x_r_train},
outvar={"u__x": u_r_train},
batch_size=cfg.batch_size.train,
)
domain.add_constraint(interior, "Residual")
# [constraint2]
# [validator]
# add validators
for k in range(10):
invar_valid = {
"a": a_test[k * 100 : (k + 1) * 100],
"x": x_test[k * 100 : (k + 1) * 100],
}
outvar_valid = {"u": u_test[k * 100 : (k + 1) * 100]}
dataset = DictGridDataset(invar_valid, outvar_valid)
validator = GridValidator(nodes=nodes, dataset=dataset, plotter=None)
domain.add_validator(validator, "validator_{}".format(k))
# [validator]
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/anti_derivative/physics_informed.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
import numpy as np
from sympy import Symbol, sqrt, Max
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.geometry.tessellation import Tessellation
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# read stl files to make geometry
point_path = to_absolute_path("./stl_files")
inlet_mesh = Tessellation.from_stl(
point_path + "/aneurysm_inlet.stl", airtight=False
)
outlet_mesh = Tessellation.from_stl(
point_path + "/aneurysm_outlet.stl", airtight=False
)
noslip_mesh = Tessellation.from_stl(
point_path + "/aneurysm_noslip.stl", airtight=False
)
integral_mesh = Tessellation.from_stl(
point_path + "/aneurysm_integral.stl", airtight=False
)
interior_mesh = Tessellation.from_stl(
point_path + "/aneurysm_closed.stl", airtight=True
)
# params
nu = 0.025
inlet_vel = 1.5
# inlet velocity profile
def circular_parabola(x, y, z, center, normal, radius, max_vel):
centered_x = x - center[0]
centered_y = y - center[1]
centered_z = z - center[2]
distance = sqrt(centered_x**2 + centered_y**2 + centered_z**2)
parabola = max_vel * Max((1 - (distance / radius) ** 2), 0)
return normal[0] * parabola, normal[1] * parabola, normal[2] * parabola
# normalize meshes
def normalize_mesh(mesh, center, scale):
mesh = mesh.translate([-c for c in center])
mesh = mesh.scale(scale)
return mesh
# normalize invars
def normalize_invar(invar, center, scale, dims=2):
invar["x"] -= center[0]
invar["y"] -= center[1]
invar["z"] -= center[2]
invar["x"] *= scale
invar["y"] *= scale
invar["z"] *= scale
if "area" in invar.keys():
invar["area"] *= scale**dims
return invar
# scale and normalize mesh and openfoam data
center = (-18.40381048596882, -50.285383353981196, 12.848136936899031)
scale = 0.4
inlet_mesh = normalize_mesh(inlet_mesh, center, scale)
outlet_mesh = normalize_mesh(outlet_mesh, center, scale)
noslip_mesh = normalize_mesh(noslip_mesh, center, scale)
integral_mesh = normalize_mesh(integral_mesh, center, scale)
interior_mesh = normalize_mesh(interior_mesh, center, scale)
# geom params
inlet_normal = (0.8526, -0.428, 0.299)
inlet_area = 21.1284 * (scale**2)
inlet_center = (-4.24298030045776, 4.082857101816247, -4.637790193399717)
inlet_radius = np.sqrt(inlet_area / np.pi)
outlet_normal = (0.33179, 0.43424, 0.83747)
outlet_area = 12.0773 * (scale**2)
outlet_radius = np.sqrt(outlet_area / np.pi)
# make aneurysm domain
domain = Domain()
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu * scale, rho=1.0, dim=3, time=False)
normal_dot_vel = NormalDotVec(["u", "v", "w"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# add constraints to solver
# inlet
u, v, w = circular_parabola(
Symbol("x"),
Symbol("y"),
Symbol("z"),
center=inlet_center,
normal=inlet_normal,
radius=inlet_radius,
max_vel=inlet_vel,
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet_mesh,
outvar={"u": u, "v": v, "w": w},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet_mesh,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=noslip_mesh,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
)
domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=interior_mesh,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.interior,
)
domain.add_constraint(interior, "interior")
# Integral Continuity 1
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=outlet_mesh,
outvar={"normal_dot_vel": 2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
)
domain.add_constraint(integral_continuity, "integral_continuity_1")
# Integral Continuity 2
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=integral_mesh,
outvar={"normal_dot_vel": -2.540},
batch_size=1,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
)
domain.add_constraint(integral_continuity, "integral_continuity_1")
# add validation data
file_path = "./openfoam/aneurysm_parabolicInlet_sol0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p": "p",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_invar = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_invar = normalize_invar(openfoam_invar, center, scale, dims=3)
openfoam_outvar = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar,
true_outvar=openfoam_outvar,
batch_size=4096,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add pressure monitor
pressure_monitor = PointwiseMonitor(
inlet_mesh.sample_boundary(16),
output_names=["p"],
metrics={"pressure_drop": lambda var: torch.mean(var["p"])},
nodes=nodes,
)
domain.add_monitor(pressure_monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/aneurysm/aneurysm.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This PDE problem was taken from,
"A Physics-Informed Neural Network Framework
for PDEs on 3D Surfaces: Time Independent
Problems" by Zhiwei Fang and Justin Zhan.
"""
from sympy import Symbol, Function
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Point1D
from modulus.sym.geometry.primitives_3d import Sphere
from modulus.sym.geometry.parameterization import Parameterization, Parameter
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pde import PDE
# define Poisson equation with sympy
class SurfacePoisson(PDE):
name = "SurfacePoisson"
def __init__(self):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# u
u = Function("u")(x, y, z)
# set equations
self.equations = {}
self.equations["poisson_u"] = u.diff(x, 2) + u.diff(y, 2) + u.diff(z, 2)
self.equations["flux_u"] = (
normal_x * u.diff(x) + normal_y * u.diff(y) + normal_z * u.diff(z)
)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
sp = SurfacePoisson()
poisson_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = sp.make_nodes() + [poisson_net.make_node(name="poisson_network")]
# add constraints to solver
# make geometry
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
center = (0, 0, 0)
radius = 1
geo = Sphere(center, radius)
p = Point1D(
1, parameterization=Parameterization({Parameter("y"): 0, Parameter("z"): 0})
)
# make domain
domain = Domain()
# sphere surface
surface = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"poisson_u": -18.0 * x * y * z, "flux_u": 0},
batch_size=cfg.batch_size.surface,
lambda_weighting={"poisson_u": 1.0, "flux_u": 1.0},
)
domain.add_constraint(surface, "surface")
# single point
point = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=p,
outvar={"u": 0.0},
batch_size=2,
lambda_weighting={"u": 1.0},
)
domain.add_constraint(point, "point")
# validation data
surface_points = geo.sample_boundary(10000)
true_solution = {
"u": surface_points["x"] * surface_points["y"] * surface_points["z"]
}
validator = PointwiseValidator(
nodes=nodes, invar=surface_points, true_outvar=true_solution, batch_size=128
)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/surface_pde/sphere/sphere.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq, sin, cos, Min, Max, Abs, log, exp
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from custom_k_ep import kEpsilonInit, kEpsilon, kEpsilonStdWF
@modulus.sym.main(config_path="conf_re590_k_ep", config_name="config")
def run(cfg: ModulusConfig) -> None:
# simulation parameters
Re = 590
nu = 1 / Re
y_plus = 30
karman_constant = 0.4187
resolved_y_start = y_plus * nu
channel_width = (-1, 1)
channel_length = (-np.pi / 2, np.pi / 2)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
geo_sdf = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
# geometry where the equations are solved
geo_resolved = Channel2D(
(channel_length[0], channel_width[0] + resolved_y_start),
(channel_length[1], channel_width[1] - resolved_y_start),
)
# make list of nodes to unroll graph on
init = kEpsilonInit(nu=nu, rho=1.0)
eq = kEpsilon(nu=nu, rho=1.0)
wf = kEpsilonStdWF(nu=nu, rho=1.0)
u_tau_net = instantiate_arch(
input_keys=[Key("u_in"), Key("y_in")],
output_keys=[Key("u_tau_out")],
cfg=cfg.arch.fully_connected,
)
flow_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("u"), Key("v")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
p_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("p")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
k_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("k_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
ep_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("ep_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
nodes = (
init.make_nodes()
+ eq.make_nodes()
+ wf.make_nodes()
+ [
Node.from_sympy(
sin(2 * np.pi * Symbol("x") / (channel_length[1] - channel_length[0])),
"x_sin",
)
]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("k_star"))) + 1e-4, 20), "k")]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("ep_star"))) + 1e-4, 180), "ep")]
+ [flow_net.make_node(name="flow_network")]
+ [p_net.make_node(name="p_network")]
+ [k_net.make_node(name="k_network")]
+ [ep_net.make_node(name="ep_network")]
)
nodes_u_tau = (
[Node.from_sympy(Symbol("normal_distance"), "y_in")]
+ [
Node.from_sympy(
(
(
Symbol("u")
- (
Symbol("u") * (-Symbol("normal_x"))
+ Symbol("v") * (-Symbol("normal_y"))
)
* (-Symbol("normal_x"))
)
** 2
+ (
Symbol("v")
- (
Symbol("u") * (-Symbol("normal_x"))
+ Symbol("v") * (-Symbol("normal_y"))
)
* (-Symbol("normal_y"))
)
** 2
)
** 0.5,
"u_parallel_to_wall",
)
]
+ [Node.from_sympy(Symbol("u_parallel_to_wall"), "u_in")]
+ [Node.from_sympy(Symbol("u_tau_out"), "u_tau")]
+ [u_tau_net.make_node(name="u_tau_network", optimize=False)]
)
# add constraints to solver
p_grad = 1.0
x, y = Symbol("x"), Symbol("y")
# make domain
domain = Domain()
# Point where wall funciton is applied
wf_pt = PointwiseBoundaryConstraint(
nodes=nodes + nodes_u_tau,
geometry=geo_resolved,
outvar={
"velocity_wall_normal_wf": 0,
"velocity_wall_parallel_wf": 0,
"ep_wf": 0,
"k_wf": 0,
"wall_shear_stress_x_wf": 0,
"wall_shear_stress_y_wf": 0,
},
lambda_weighting={
"velocity_wall_normal_wf": 100,
"velocity_wall_parallel_wf": 100,
"ep_wf": 1,
"k_wf": 1,
"wall_shear_stress_x_wf": 100,
"wall_shear_stress_y_wf": 100,
},
batch_size=cfg.batch_size.wf_pt,
parameterization={"normal_distance": resolved_y_start},
)
domain.add_constraint(wf_pt, "WF")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"k_equation": 0,
"ep_equation": 0,
},
lambda_weighting={
"continuity": 100,
"momentum_x": 1000,
"momentum_y": 1000,
"k_equation": 10,
"ep_equation": 1,
},
batch_size=cfg.batch_size.interior,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "Interior")
# pressure pc
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"p": p_grad * (channel_length[1] - channel_length[0])},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "Inlet")
# pressure pc
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "Outlet")
# flow initialization
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={"u_init": 0, "v_init": 0, "k_init": 0, "p_init": 0, "ep_init": 0},
batch_size=cfg.batch_size.interior_init,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "InteriorInit")
# add inferencing and monitor
invar_wf_pt = geo_resolved.sample_boundary(
1024, parameterization={"normal_distance": resolved_y_start}
)
u_tau_monitor = PointwiseMonitor(
invar_wf_pt,
output_names=["u_tau"],
metrics={"mean_u_tau": lambda var: torch.mean(var["u_tau"])},
nodes=nodes + nodes_u_tau,
)
domain.add_monitor(u_tau_monitor)
# add inferencer data
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_interior(
5000, bounds={x: channel_length, y: channel_width}
),
output_names=["u", "v", "p", "k", "ep"],
)
domain.add_inferencer(inference, "inf_interior")
inference = PointwiseInferencer(
nodes=nodes + nodes_u_tau,
invar=geo_resolved.sample_boundary(
10, parameterization={"normal_distance": resolved_y_start}
),
output_names=["u", "v", "p", "k", "ep", "normal_distance", "u_tau"],
)
domain.add_inferencer(inference, "inf_wf")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/re590_k_ep.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number, log, Abs, simplify
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class kEpsilonInit(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# flow initialization
C_mu = 0.09
u_avg = 21 # Approx average velocity
Re_d = (
u_avg * 1 / nu
) # Reynolds number based on centerline and channel hydraulic dia
l = 0.038 * 2 # Approx turbulent length scale
I = 0.16 * Re_d ** (
-1 / 8
) # Turbulent intensity for a fully developed pipe flow
u_init = u_avg
v_init = 0
p_init = 0
k_init = 1.5 * (u_avg * I) ** 2
ep_init = (C_mu ** (3 / 4)) * (k_init ** (3 / 2)) / l
# set equations
self.equations = {}
self.equations["u_init"] = u - u_init
self.equations["v_init"] = v - v_init
self.equations["p_init"] = p - p_init
self.equations["k_init"] = k - k_init
self.equations["ep_init"] = ep - ep_init
class kEpsilon(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# Model constants
sig_k = 1.0
sig_ep = 1.3
C_ep1 = 1.44
C_ep2 = 1.92
C_mu = 0.09
E = 9.793
# Turbulent Viscosity
nu_t = C_mu * (k**2) / (ep + 1e-4)
# Turbulent Production Term
P_k = nu_t * (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
# set equations
self.equations = {}
self.equations["continuity"] = simplify(u.diff(x) + v.diff(y))
self.equations["momentum_x"] = simplify(
u * u.diff(x)
+ v * u.diff(y)
+ p.diff(x)
- ((nu + nu_t) * u.diff(x)).diff(x)
- ((nu + nu_t) * u.diff(y)).diff(y)
)
self.equations["momentum_y"] = simplify(
u * v.diff(x)
+ v * v.diff(y)
+ p.diff(y)
- ((nu + nu_t) * v.diff(x)).diff(x)
- ((nu + nu_t) * v.diff(y)).diff(y)
)
self.equations["k_equation"] = simplify(
u * k.diff(x)
+ v * k.diff(y)
- ((nu + nu_t / sig_k) * k.diff(x)).diff(x)
- ((nu + nu_t / sig_k) * k.diff(y)).diff(y)
- P_k
+ ep
)
self.equations["ep_equation"] = simplify(
u * ep.diff(x)
+ v * ep.diff(y)
- ((nu + nu_t / sig_ep) * ep.diff(x)).diff(x)
- ((nu + nu_t / sig_ep) * ep.diff(y)).diff(y)
- (C_ep1 * P_k - C_ep2 * ep) * ep / (k + 1e-3)
)
class kEpsilonStdWF(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# normals
normal_x = -1 * Symbol(
"normal_x"
) # Multiply by -1 to flip the direction of normal
normal_y = -1 * Symbol(
"normal_y"
) # Multiply by -1 to flip the direction of normal
# wall distance
normal_distance = Function("normal_distance")(*input_variables)
# Model constants
C_mu = 0.09
E = 9.793
C_k = -0.36
B_k = 8.15
karman_constant = 0.4187
# Turbulent Viscosity
nu_t = C_mu * (k**2) / (ep + 1e-4)
u_tau = Function("u_tau")(*input_variables)
y_plus = u_tau * normal_distance / nu
u_plus = log(Abs(E * y_plus)) / karman_constant
ep_true = (C_mu ** (3 / 4)) * (k ** (3 / 2)) / karman_constant / normal_distance
u_parallel_to_wall = [
u - (u * normal_x + v * normal_y) * normal_x,
v - (u * normal_x + v * normal_y) * normal_y,
]
du_parallel_to_wall_dx = [
u.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_x,
v.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_y,
]
du_parallel_to_wall_dy = [
u.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_x,
v.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_y,
]
du_dsdf = [
du_parallel_to_wall_dx[0] * normal_x + du_parallel_to_wall_dy[0] * normal_y,
du_parallel_to_wall_dx[1] * normal_x + du_parallel_to_wall_dy[1] * normal_y,
]
wall_shear_stress_true_x = (
u_tau * u_parallel_to_wall[0] * karman_constant / log(Abs(E * y_plus))
)
wall_shear_stress_true_y = (
u_tau * u_parallel_to_wall[1] * karman_constant / log(Abs(E * y_plus))
)
wall_shear_stress_x = (nu + nu_t) * du_dsdf[0]
wall_shear_stress_y = (nu + nu_t) * du_dsdf[1]
u_normal_to_wall = u * normal_x + v * normal_y
u_normal_to_wall_true = 0
u_parallel_to_wall_mag = (
u_parallel_to_wall[0] ** 2 + u_parallel_to_wall[1] ** 2
) ** 0.5
u_parallel_to_wall_true = u_plus * u_tau
# k_normal_gradient = normal_x*k.diff(x) + normal_y*k.diff(y)
# k_normal_gradient_true = 0
k_true = u_tau**2 / C_mu**0.5
# set equations
self.equations = {}
self.equations["velocity_wall_normal_wf"] = (
u_normal_to_wall - u_normal_to_wall_true
)
self.equations["velocity_wall_parallel_wf"] = (
u_parallel_to_wall_mag - u_parallel_to_wall_true
)
self.equations["k_wf"] = k - k_true
self.equations["ep_wf"] = ep - ep_true
self.equations["wall_shear_stress_x_wf"] = (
wall_shear_stress_x - wall_shear_stress_true_x
)
self.equations["wall_shear_stress_y_wf"] = (
wall_shear_stress_y - wall_shear_stress_true_y
)
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/custom_k_ep.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries
import torch
from torch import nn
from typing import Dict, List
# Import from Modulus
from modulus.sym.eq.derivatives import gradient
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight = (torch.tanh((20000 - step_tensor) * smoothness) + 1.0) * 0.5
# Add losses
for key in losses.keys():
if "init" not in key:
loss += (1 - decay_weight) * (losses[key])
else:
loss += decay_weight * (losses[key])
return loss
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/custom_aggregator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq, sin, cos, Min, Max, Abs, log, exp
from scipy import optimize
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
@modulus.sym.main(config_path="conf_u_tau_lookup", config_name="config")
def run(cfg: ModulusConfig) -> None:
u = np.linspace(1e-3, 50, num=100)
y = np.linspace(1e-3, 0.5, num=100)
U, Y = np.meshgrid(u, y)
U = np.reshape(U, (U.size,))
Y = np.reshape(Y, (Y.size,))
Re = 590
nu = 1 / Re
def f(u_tau, y, u):
return u_tau * np.log(9.793 * y * u_tau / nu) - u * 0.4187
def fprime(u_tau, y, u):
return 1 + np.log(9.793 * y * u_tau / nu)
u_tau = []
for i in range(len(U)):
u_tau_calc = optimize.newton(
f,
1.0,
fprime=fprime,
args=(Y[i], U[i]),
tol=1.48e-08,
maxiter=200,
fprime2=None,
)
u_tau.append(u_tau_calc)
# save tabulations to a csv file
results = np.concatenate(
(
np.reshape(U, (len(U), 1)),
np.reshape(Y, (len(Y), 1)),
np.reshape(u_tau, (len(u_tau), 1)),
),
axis=1,
)
np.savetxt("u_tau.csv", results, delimiter=",")
invar = {"u_in": np.reshape(U, (len(U), 1)), "y_in": np.reshape(Y, (len(Y), 1))}
outvar = {"u_tau_out": np.reshape(u_tau, (len(u_tau), 1))}
u = np.random.uniform(1e-3, 50, size=100)
y = np.random.uniform(1e-3, 0.5, size=100)
U, Y = np.meshgrid(u, y)
U = np.reshape(U, (U.size,))
Y = np.reshape(Y, (Y.size,))
u_tau_val = []
for i in range(len(U)):
u_tau_calc = optimize.newton(
f,
1.0,
fprime=fprime,
args=(Y[i], U[i]),
tol=1.48e-08,
maxiter=200,
fprime2=None,
)
u_tau_val.append(u_tau_calc)
# save tabulations to a csv file
results = np.concatenate(
(
np.reshape(U, (len(U), 1)),
np.reshape(Y, (len(Y), 1)),
np.reshape(u_tau, (len(u_tau), 1)),
),
axis=1,
)
np.savetxt("u_tau_val.csv", results, delimiter=",")
invar_val = {"u_in": np.reshape(U, (len(U), 1)), "y_in": np.reshape(Y, (len(Y), 1))}
outvar_val = {"u_tau_out": np.reshape(u_tau_val, (len(u_tau_val), 1))}
# make list of nodes to unroll graph on
u_tau_net = instantiate_arch(
input_keys=[Key("u_in"), Key("y_in")],
output_keys=[Key("u_tau_out")],
cfg=cfg.arch.fully_connected,
)
nodes = [u_tau_net.make_node(name="u_tau_network")]
# make domain
domain = Domain()
train = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=invar,
outvar=outvar,
batch_size=10000,
)
domain.add_constraint(train, "LogLawLoss")
# add validation
validator = PointwiseValidator(nodes=nodes, invar=invar_val, true_outvar=outvar_val)
domain.add_validator(validator)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/u_tau_lookup.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number, log, Abs, simplify, pi
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class kOmegaInit(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables)
# flow initialization
C_mu = 0.09
u_avg = 21 # Approx average velocity
Re_d = (
u_avg * 1 / nu
) # Reynolds number based on centerline and channel hydraulic dia
l = 0.038 * 2 # Approx turbulent length scale
I = 0.16 * Re_d ** (
-1 / 8
) # Turbulent intensity for a fully developed pipe flow
u_init = u_avg
v_init = 0
p_init = pi / 2
k_init = 1.5 * (u_avg * I) ** 2
ep_init = (C_mu ** (3 / 4)) * (k_init ** (3 / 2)) / l
om_plus_init = ep_init / C_mu / k_init * nu # Solving for om_plus
# set equations
self.equations = {}
self.equations["u_init"] = u - u_init
self.equations["v_init"] = v - v_init
self.equations["p_init"] = p - p_init
self.equations["k_init"] = k - k_init
self.equations["om_plus_init"] = om_plus - om_plus_init
class kOmega(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables) # Solving for om_plus
# Model constants
sig = 0.5
sig_star = 0.5
C_mu = 0.09
E = 9.793
beta = 3 / 40
alpha = 5 / 9
beta_star = 9 / 100
# Turbulent Viscosity
nu_t = k * nu / (om_plus + 1e-4)
# Turbulent Production Term
P_k = nu_t * (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
# set equations
self.equations = {}
self.equations["continuity"] = simplify(u.diff(x) + v.diff(y))
self.equations["momentum_x"] = simplify(
u * u.diff(x)
+ v * u.diff(y)
+ p.diff(x)
- ((nu + nu_t) * u.diff(x)).diff(x)
- ((nu + nu_t) * u.diff(y)).diff(y)
)
self.equations["momentum_y"] = simplify(
u * v.diff(x)
+ v * v.diff(y)
+ p.diff(y)
- ((nu + nu_t) * v.diff(x)).diff(x)
- ((nu + nu_t) * v.diff(y)).diff(y)
)
self.equations["k_equation"] = simplify(
u * k.diff(x)
+ v * k.diff(y)
- ((nu + nu_t * sig_star) * k.diff(x)).diff(x)
- ((nu + nu_t * sig_star) * k.diff(y)).diff(y)
- P_k
+ beta_star * k * om_plus / nu
)
self.equations["om_plus_equation"] = simplify(
u * om_plus.diff(x) / nu
+ v * om_plus.diff(y) / nu
- ((nu + nu_t * sig) * om_plus.diff(x)).diff(x) / nu
- ((nu + nu_t * sig) * om_plus.diff(y)).diff(y) / nu
- alpha
* (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
+ beta * om_plus * om_plus / nu / nu
)
class kOmegaStdWF(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables)
# normals
normal_x = -1 * Symbol(
"normal_x"
) # Multiply by -1 to flip the direction of normal
normal_y = -1 * Symbol(
"normal_y"
) # Multiply by -1 to flip the direction of normal
# wall distance
normal_distance = Function("normal_distance")(*input_variables)
# Model constants
C_mu = 0.09
E = 9.793
C_k = -0.36
B_k = 8.15
karman_constant = 0.4187
beta_star = 9 / 100
# Turbulent Viscosity
nu_t = k * nu / (om_plus + 1e-4)
u_tau = Function("u_tau")(*input_variables)
y_plus = u_tau * normal_distance / nu
u_plus = log(Abs(E * y_plus) + 1e-3) / karman_constant
om_plus_true = (
(k**0.5) / (beta_star**0.25) / karman_constant / normal_distance
) * nu
u_parallel_to_wall = [
u - (u * normal_x + v * normal_y) * normal_x,
v - (u * normal_x + v * normal_y) * normal_y,
]
du_parallel_to_wall_dx = [
u.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_x,
v.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_y,
]
du_parallel_to_wall_dy = [
u.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_x,
v.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_y,
]
du_dsdf = [
du_parallel_to_wall_dx[0] * normal_x + du_parallel_to_wall_dy[0] * normal_y,
du_parallel_to_wall_dx[1] * normal_x + du_parallel_to_wall_dy[1] * normal_y,
]
wall_shear_stress_true_x = (
u_tau
* u_parallel_to_wall[0]
* karman_constant
/ log(Abs(E * y_plus) + 1e-3)
)
wall_shear_stress_true_y = (
u_tau
* u_parallel_to_wall[1]
* karman_constant
/ log(Abs(E * y_plus) + 1e-3)
)
wall_shear_stress_x = (nu + nu_t) * du_dsdf[0]
wall_shear_stress_y = (nu + nu_t) * du_dsdf[1]
u_normal_to_wall = u * normal_x + v * normal_y
u_normal_to_wall_true = 0
u_parallel_to_wall_mag = (
u_parallel_to_wall[0] ** 2 + u_parallel_to_wall[1] ** 2
) ** 0.5
u_parallel_to_wall_true = u_plus * u_tau
# k_normal_gradient = normal_x*k.diff(x) + normal_y*k.diff(y)
# k_normal_gradient_true = 0
k_true = u_tau**2 / C_mu**0.5
# set equations
self.equations = {}
self.equations["velocity_wall_normal_wf"] = (
u_normal_to_wall - u_normal_to_wall_true
)
self.equations["velocity_wall_parallel_wf"] = (
u_parallel_to_wall_mag - u_parallel_to_wall_true
)
self.equations["k_wf"] = k - k_true
self.equations["om_plus_wf"] = om_plus - om_plus_true
self.equations["wall_shear_stress_x_wf"] = (
wall_shear_stress_x - wall_shear_stress_true_x
)
self.equations["wall_shear_stress_y_wf"] = (
wall_shear_stress_y - wall_shear_stress_true_y
)
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/custom_k_om.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
from modulus.sym.utils.io import csv_to_dict
# path for checkpoint
checkpoint = "./outputs/re590_k_ep/network_checkpoint/"
# read data to compute u_tau
data = np.load(checkpoint + "inferencers/inf_wf.npz", allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
u_tau = np.mean(data["u_tau"])
# read data to plot profiles
interior_data = np.load(checkpoint + "inferencers/inf_interior.npz", allow_pickle=True)
interior_data = np.atleast_1d(interior_data.f.arr_0)[0]
y = interior_data["y"]
u = interior_data["u"]
k = interior_data["k"]
nu = 1 / 590
u_plus = u / u_tau
y_plus = (1 - np.abs(y)) * u_tau / nu
k_plus = k / u_tau / u_tau
y = 1 - np.abs(y)
fig, ax = plt.subplots(2, figsize=(4.5, 9))
# read validation data
# Fluent data from Turbulence lecture notes: Gianluca Iaccarino: https://web.stanford.edu/class/me469b/handouts/turbulence.pdf
# DNS data from Moser et al.: https://aip.scitation.org/doi/10.1063/1.869966
file_path = "../validation_data"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"u+": "u_plus", "y+": "y_plus"}
u_dns_data = csv_to_dict("../validation_data/re590-moser-dns-u_plus.csv", mapping)
u_fluent_gi_data = csv_to_dict(
"../validation_data/re590-gi-fluent-u_plus.csv", mapping
)
mapping = {"k+": "k_plus", "y/2H": "y"}
k_dns_data = csv_to_dict("../validation_data/re590-moser-dns-k_plus.csv", mapping)
k_fluent_gi_data = csv_to_dict(
"../validation_data/re590-gi-fluent-k_plus.csv", mapping
)
ax[0].scatter(k_dns_data["y"], k_dns_data["k_plus"], label="DNS: Moser")
ax[0].scatter(k_fluent_gi_data["y"], k_fluent_gi_data["k_plus"], label="Fluent: GI")
ax[1].scatter(u_dns_data["y_plus"], u_dns_data["u_plus"], label="DNS: Moser")
ax[1].scatter(
u_fluent_gi_data["y_plus"], u_fluent_gi_data["u_plus"], label="Fluent: GI"
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
ax[0].scatter(y, k_plus, label="Modulus")
ax[0].set(title="TKE: u_tau=" + str(round(u_tau, 3)))
ax[0].set(xlabel="y", ylabel="k+")
ax[0].legend()
ax[1].scatter(y_plus, u_plus, label="Modulus")
ax[1].set_xscale("log")
ax[1].set(title="U+: u_tau=" + str(round(u_tau, 3)))
ax[1].set(xlabel="y+", ylabel="u+")
ax[1].legend()
plt.tight_layout()
plt.savefig("results_k_ep.png")
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/plot_results.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq, sin, cos, Min, Max, Abs, log, exp, tanh
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from custom_k_om import kOmegaInit, kOmega, kOmegaStdWF
@modulus.sym.main(config_path="conf_re590_k_om", config_name="config")
def run(cfg: ModulusConfig) -> None:
# simulation parameters
Re = 590
nu = 1 / Re
y_plus = 30
karman_constant = 0.4187
resolved_y_start = y_plus * nu
channel_width = (-1, 1)
channel_length = (-np.pi / 2, np.pi / 2)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
geo_sdf = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
# geometry where the equations are solved
geo_resolved = Channel2D(
(channel_length[0], channel_width[0] + resolved_y_start),
(channel_length[1], channel_width[1] - resolved_y_start),
)
# make list of nodes to unroll graph on
init = kOmegaInit(nu=nu, rho=1.0)
eq = kOmega(nu=nu, rho=1.0)
wf = kOmegaStdWF(nu=nu, rho=1.0)
u_tau_net = instantiate_arch(
input_keys=[Key("u_in"), Key("y_in")],
output_keys=[Key("u_tau_out")],
cfg=cfg.arch.fully_connected,
)
flow_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("u"), Key("v")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
p_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("p")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
k_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("k_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
om_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("om_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
nodes = (
init.make_nodes()
+ eq.make_nodes()
+ wf.make_nodes()
+ [
Node.from_sympy(
sin(2 * np.pi * Symbol("x") / (channel_length[1] - channel_length[0])),
"x_sin",
)
]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("k_star"))) + 1e-4, 20), "k")]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("om_star"))) + 1e-4, 20), "om_plus")]
+ [flow_net.make_node(name="flow_network")]
+ [p_net.make_node(name="p_network")]
+ [k_net.make_node(name="k_network")]
+ [om_net.make_node(name="om_network")]
)
nodes_u_tau = (
[Node.from_sympy(Symbol("normal_distance"), "y_in")]
+ [
Node.from_sympy(
(
(
Symbol("u")
- (
Symbol("u") * (-Symbol("normal_x"))
+ Symbol("v") * (-Symbol("normal_y"))
)
* (-Symbol("normal_x"))
)
** 2
+ (
Symbol("v")
- (
Symbol("u") * (-Symbol("normal_x"))
+ Symbol("v") * (-Symbol("normal_y"))
)
* (-Symbol("normal_y"))
)
** 2
)
** 0.5,
"u_parallel_to_wall",
)
]
+ [Node.from_sympy(Symbol("u_parallel_to_wall"), "u_in")]
+ [Node.from_sympy(Symbol("u_tau_out"), "u_tau")]
+ [u_tau_net.make_node(name="u_tau_network", optimize=False)]
)
# add constraints to solver
p_grad = 1.0
x, y = Symbol("x"), Symbol("y")
# make domain
domain = Domain()
# Point where wall funciton is applied
wf_pt = PointwiseBoundaryConstraint(
nodes=nodes + nodes_u_tau,
geometry=geo_resolved,
outvar={
"velocity_wall_normal_wf": 0,
"velocity_wall_parallel_wf": 0,
"om_plus_wf": 0,
"k_wf": 0,
"wall_shear_stress_x_wf": 0,
"wall_shear_stress_y_wf": 0,
},
lambda_weighting={
"velocity_wall_normal_wf": 100,
"velocity_wall_parallel_wf": 100,
"om_plus_wf": 10,
"k_wf": 1,
"wall_shear_stress_x_wf": 100,
"wall_shear_stress_y_wf": 100,
},
batch_size=cfg.batch_size.wf_pt,
parameterization={"normal_distance": resolved_y_start},
)
domain.add_constraint(wf_pt, "WF")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"k_equation": 0,
"om_plus_equation": 0,
},
lambda_weighting={
"continuity": 100,
"momentum_x": 1000,
"momentum_y": 1000,
"k_equation": 10,
"om_plus_equation": 0.1,
},
batch_size=cfg.batch_size.interior,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "Interior")
# pressure pc
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"p": p_grad * (channel_length[1] - channel_length[0])},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "Inlet")
# pressure pc
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "Outlet")
# flow initialization
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={"u_init": 0, "v_init": 0, "k_init": 0, "p_init": 0, "om_plus_init": 0},
batch_size=cfg.batch_size.interior_init,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "InteriorInit")
# add inferencing and monitor
invar_wf_pt = geo_resolved.sample_boundary(
1024, parameterization={"normal_distance": resolved_y_start}
)
u_tau_monitor = PointwiseMonitor(
invar_wf_pt,
output_names=["u_tau"],
metrics={"mean_u_tau": lambda var: torch.mean(var["u_tau"])},
nodes=nodes + nodes_u_tau,
)
domain.add_monitor(u_tau_monitor)
# add inferencer data
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_interior(
5000, bounds={x: channel_length, y: channel_width}
),
output_names=["u", "v", "p", "k", "om_plus"],
)
domain.add_inferencer(inference, "inf_interior")
inference = PointwiseInferencer(
nodes=nodes + nodes_u_tau,
invar=geo_resolved.sample_boundary(
10, parameterization={"normal_distance": resolved_y_start}
),
output_names=["u", "v", "p", "k", "om_plus", "normal_distance", "u_tau"],
)
domain.add_inferencer(inference, "inf_wf")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/turbulent_channel/2d_std_wf/re590_k_om.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries
import torch
from torch import nn
from typing import Dict, List
# Import from Modulus
from modulus.sym.eq.derivatives import gradient
from modulus.sym.loss.aggregator import Aggregator
class CustomSum(Aggregator):
"""
Loss aggregation by summation
"""
def __init__(self, params, num_losses, weights=None):
super().__init__(params, num_losses, weights)
def forward(self, losses: Dict[str, torch.Tensor], step: int) -> torch.Tensor:
"""
Aggregates the losses by summation
Parameters
----------
losses : Dict[str, torch.Tensor]
A dictionary of losses
step : int
Optimizer step
Returns
-------
loss : torch.Tensor
Aggregated loss
"""
# weigh losses
losses = self.weigh_losses(losses, self.weights)
# Initialize loss
loss: torch.Tensor = torch.zeros_like(self.init_loss)
smoothness = 0.0005 # use 0.0005 to smoothen the transition over ~10k steps
step_tensor = torch.tensor(step, dtype=torch.float32)
decay_weight = (torch.tanh((20000 - step_tensor) * smoothness) + 1.0) * 0.5
# Add losses
for key in losses.keys():
if "init" not in key:
loss += (1 - decay_weight) * (losses[key])
else:
loss += decay_weight * (losses[key])
return loss
| modulus-sym-main | examples/turbulent_channel/2d/custom_aggregator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq, sin, cos, Min, Max, Abs, log, exp
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.key import Key
from modulus.sym.node import Node
from custom_k_om_ls import kOmegaInit, kOmega, kOmegaLSWF
@modulus.sym.main(config_path="conf_re590_k_om_LS", config_name="config")
def run(cfg: ModulusConfig) -> None:
# simulation parameters
Re = 590
nu = 1 / Re
y_plus = 30
karman_constant = 0.4187
resolved_y_start = y_plus * nu
channel_width = (-1, 1)
channel_length = (-np.pi / 2, np.pi / 2)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
geo_sdf = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
# geometry where the equations are solved
geo_resolved = Channel2D(
(channel_length[0], channel_width[0] + resolved_y_start),
(channel_length[1], channel_width[1] - resolved_y_start),
)
# make list of nodes to unroll graph on
init = kOmegaInit(nu=nu, rho=1.0)
eq = kOmega(nu=nu, rho=1.0)
wf = kOmegaLSWF(nu=nu, rho=1.0)
flow_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("u"), Key("v")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
p_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("p")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
k_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("k_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
om_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("om_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
nodes = (
init.make_nodes()
+ eq.make_nodes()
+ wf.make_nodes()
+ [
Node.from_sympy(
sin(2 * np.pi * Symbol("x") / (channel_length[1] - channel_length[0])),
"x_sin",
)
]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("k_star"))) + 1e-4, 20), "k")]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("om_star"))) + 1e-4, 20), "om_plus")]
+ [flow_net.make_node(name="flow_network")]
+ [p_net.make_node(name="p_network")]
+ [k_net.make_node(name="k_network")]
+ [om_net.make_node(name="om_network")]
)
# add constraints to solver
p_grad = 1.0
x, y = Symbol("x"), Symbol("y")
# make domain
domain = Domain()
# Point where wall funciton is applied
wf_pt = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"velocity_wall_normal_wf": 0,
"velocity_wall_parallel_wf": 0,
"om_plus_wf": 0,
"wall_shear_stress_x_wf": 0,
"wall_shear_stress_y_wf": 0,
},
lambda_weighting={
"velocity_wall_normal_wf": 100,
"velocity_wall_parallel_wf": 100,
"om_plus_wf": 10,
"wall_shear_stress_x_wf": 100,
"wall_shear_stress_y_wf": 100,
},
batch_size=cfg.batch_size.wf_pt,
parameterization={"normal_distance": resolved_y_start},
)
domain.add_constraint(wf_pt, "WF")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"k_equation": 0,
"om_plus_equation": 0,
},
lambda_weighting={
"continuity": 100,
"momentum_x": 1000,
"momentum_y": 1000,
"k_equation": 10,
"om_plus_equation": 0.1,
},
batch_size=cfg.batch_size.interior,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "Interior")
# pressure pc
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"p": p_grad * (channel_length[1] - channel_length[0])},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "Inlet")
# pressure pc
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "Outlet")
# flow initialization
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={"u_init": 0, "v_init": 0, "k_init": 0, "p_init": 0, "om_plus_init": 0},
batch_size=cfg.batch_size.interior_init,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "InteriorInit")
# add inferencing and monitor
invar_wf_pt = geo_resolved.sample_boundary(
1024,
)
u_tau_monitor = PointwiseMonitor(
invar_wf_pt,
output_names=["k"],
metrics={
"mean_u_tau": lambda var: torch.mean((0.09**0.25) * torch.sqrt(var["k"]))
},
nodes=nodes,
)
domain.add_monitor(u_tau_monitor)
# add inferencer data
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_interior(
5000, bounds={x: channel_length, y: channel_width}
),
output_names=["u", "v", "p", "k", "om_plus"],
)
domain.add_inferencer(inference, "inf_interior")
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_boundary(
10, parameterization={"normal_distance": resolved_y_start}
),
output_names=["u", "v", "p", "k", "om_plus", "normal_distance"],
)
domain.add_inferencer(inference, "inf_wf")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/turbulent_channel/2d/re590_k_om_LS.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number, log, Abs, simplify, pi
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class kOmegaInit(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables)
# flow initialization
C_mu = 0.09
u_avg = 21 # Approx average velocity
Re_d = (
u_avg * 1 / nu
) # Reynolds number based on centerline and channel hydraulic dia
l = 0.038 * 2 # Approx turbulent length scale
I = 0.16 * Re_d ** (
-1 / 8
) # Turbulent intensity for a fully developed pipe flow
u_init = u_avg
v_init = 0
p_init = pi / 2
k_init = 1.5 * (u_avg * I) ** 2
ep_init = (C_mu ** (3 / 4)) * (k_init ** (3 / 2)) / l
om_plus_init = ep_init / C_mu / k_init * nu # Solving for om_plus
# set equations
self.equations = {}
self.equations["u_init"] = u - u_init
self.equations["v_init"] = v - v_init
self.equations["p_init"] = p - p_init
self.equations["k_init"] = k - k_init
self.equations["om_plus_init"] = om_plus - om_plus_init
class kOmega(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables) # Solving for om_plus
# Model constants
sig = 0.5
sig_star = 0.5
C_mu = 0.09
E = 9.793
beta = 3 / 40
alpha = 5 / 9
beta_star = 9 / 100
# Turbulent Viscosity
nu_t = k * nu / (om_plus + 1e-4)
# Turbulent Production Term
P_k = nu_t * (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
# set equations
self.equations = {}
self.equations["continuity"] = simplify(u.diff(x) + v.diff(y))
self.equations["momentum_x"] = simplify(
u * u.diff(x)
+ v * u.diff(y)
+ p.diff(x)
- ((nu + nu_t) * u.diff(x)).diff(x)
- ((nu + nu_t) * u.diff(y)).diff(y)
)
self.equations["momentum_y"] = simplify(
u * v.diff(x)
+ v * v.diff(y)
+ p.diff(y)
- ((nu + nu_t) * v.diff(x)).diff(x)
- ((nu + nu_t) * v.diff(y)).diff(y)
)
self.equations["k_equation"] = simplify(
u * k.diff(x)
+ v * k.diff(y)
- ((nu + nu_t * sig_star) * k.diff(x)).diff(x)
- ((nu + nu_t * sig_star) * k.diff(y)).diff(y)
- P_k
+ beta_star * k * om_plus / nu
)
self.equations["om_plus_equation"] = simplify(
u * om_plus.diff(x) / nu
+ v * om_plus.diff(y) / nu
- ((nu + nu_t * sig) * om_plus.diff(x)).diff(x) / nu
- ((nu + nu_t * sig) * om_plus.diff(y)).diff(y) / nu
- alpha
* (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
+ beta * om_plus * om_plus / nu / nu
)
class kOmegaLSWF(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
k = Function("k")(*input_variables)
om_plus = Function("om_plus")(*input_variables)
# normals
normal_x = -1 * Symbol(
"normal_x"
) # Multiply by -1 to flip the direction of normal
normal_y = -1 * Symbol(
"normal_y"
) # Multiply by -1 to flip the direction of normal
# wall distance
normal_distance = Function("normal_distance")(*input_variables)
# Model constants
C_mu = 0.09
E = 9.793
C_k = -0.36
B_k = 8.15
karman_constant = 0.4187
beta_star = 9 / 100
# Turbulent Viscosity
nu_t = k * nu / (om_plus + 1e-4)
u_tau = (C_mu**0.25) * (k**0.5)
y_plus = u_tau * normal_distance / nu
u_plus = log(Abs(E * y_plus) + 1e-3) / karman_constant
om_plus_true = (
(k**0.5) / (beta_star**0.25) / karman_constant / normal_distance
) * nu
u_parallel_to_wall = [
u - (u * normal_x + v * normal_y) * normal_x,
v - (u * normal_x + v * normal_y) * normal_y,
]
du_parallel_to_wall_dx = [
u.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_x,
v.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_y,
]
du_parallel_to_wall_dy = [
u.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_x,
v.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_y,
]
du_dsdf = [
du_parallel_to_wall_dx[0] * normal_x + du_parallel_to_wall_dy[0] * normal_y,
du_parallel_to_wall_dx[1] * normal_x + du_parallel_to_wall_dy[1] * normal_y,
]
wall_shear_stress_true_x = (
u_tau
* u_parallel_to_wall[0]
* karman_constant
/ log(Abs(E * y_plus) + 1e-3)
)
wall_shear_stress_true_y = (
u_tau
* u_parallel_to_wall[1]
* karman_constant
/ log(Abs(E * y_plus) + 1e-3)
)
wall_shear_stress_x = (nu + nu_t) * du_dsdf[0]
wall_shear_stress_y = (nu + nu_t) * du_dsdf[1]
u_normal_to_wall = u * normal_x + v * normal_y
u_normal_to_wall_true = 0
u_parallel_to_wall_mag = (
u_parallel_to_wall[0] ** 2 + u_parallel_to_wall[1] ** 2
) ** 0.5
u_parallel_to_wall_true = u_plus * u_tau
k_normal_gradient = normal_x * k.diff(x) + normal_y * k.diff(y)
k_normal_gradient_true = 0
# set equations
self.equations = {}
self.equations["velocity_wall_normal_wf"] = (
u_normal_to_wall - u_normal_to_wall_true
)
self.equations["velocity_wall_parallel_wf"] = (
u_parallel_to_wall_mag - u_parallel_to_wall_true
)
self.equations["om_plus_wf"] = om_plus - om_plus_true
self.equations["wall_shear_stress_x_wf"] = (
wall_shear_stress_x - wall_shear_stress_true_x
)
self.equations["wall_shear_stress_y_wf"] = (
wall_shear_stress_y - wall_shear_stress_true_y
)
| modulus-sym-main | examples/turbulent_channel/2d/custom_k_om_ls.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Equations related to Navier Stokes Equations
"""
from sympy import Symbol, Function, Number, log, Abs, simplify
from modulus.sym.eq.pde import PDE
from modulus.sym.node import Node
class kEpsilonInit(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# flow initialization
C_mu = 0.09
u_avg = 21 # Approx average velocity
Re_d = (
u_avg * 1 / nu
) # Reynolds number based on centerline and channel hydraulic dia
l = 0.038 * 2 # Approx turbulent length scale
I = 0.16 * Re_d ** (
-1 / 8
) # Turbulent intensity for a fully developed pipe flow
u_init = u_avg
v_init = 0
p_init = 0
k_init = 1.5 * (u_avg * I) ** 2
ep_init = (C_mu ** (3 / 4)) * (k_init ** (3 / 2)) / l
# set equations
self.equations = {}
self.equations["u_init"] = u - u_init
self.equations["v_init"] = v - v_init
self.equations["p_init"] = p - p_init
self.equations["k_init"] = k - k_init
self.equations["ep_init"] = ep - ep_init
class kEpsilon(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
p = Function("p")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# Model constants
sig_k = 1.0
sig_ep = 1.3
C_ep1 = 1.44
C_ep2 = 1.92
C_mu = 0.09
E = 9.793
# Turbulent Viscosity
nu_t = C_mu * (k**2) / (ep + 1e-4)
# Turbulent Production Term
P_k = nu_t * (
2 * (u.diff(x)) ** 2
+ 2 * (v.diff(y)) ** 2
+ (u.diff(y)) ** 2
+ (v.diff(x)) ** 2
+ 2 * u.diff(y) * v.diff(x)
)
# set equations
self.equations = {}
self.equations["continuity"] = simplify(u.diff(x) + v.diff(y))
self.equations["momentum_x"] = simplify(
u * u.diff(x)
+ v * u.diff(y)
+ p.diff(x)
- ((nu + nu_t) * u.diff(x)).diff(x)
- ((nu + nu_t) * u.diff(y)).diff(y)
)
self.equations["momentum_y"] = simplify(
u * v.diff(x)
+ v * v.diff(y)
+ p.diff(y)
- ((nu + nu_t) * v.diff(x)).diff(x)
- ((nu + nu_t) * v.diff(y)).diff(y)
)
self.equations["k_equation"] = simplify(
u * k.diff(x)
+ v * k.diff(y)
- ((nu + nu_t / sig_k) * k.diff(x)).diff(x)
- ((nu + nu_t / sig_k) * k.diff(y)).diff(y)
- P_k
+ ep
)
self.equations["ep_equation"] = simplify(
u * ep.diff(x)
+ v * ep.diff(y)
- ((nu + nu_t / sig_ep) * ep.diff(x)).diff(x)
- ((nu + nu_t / sig_ep) * ep.diff(y)).diff(y)
- (C_ep1 * P_k - C_ep2 * ep) * ep / (k + 1e-3)
)
class kEpsilonLSWF(PDE):
def __init__(self, nu=1, rho=1):
# set params
nu = Number(nu)
rho = Number(rho)
# coordinates
x = Symbol("x")
y = Symbol("y")
# make input variables
input_variables = {"x": x, "y": y}
# velocity componets
u = Function("u")(*input_variables)
v = Function("v")(*input_variables)
k = Function("k")(*input_variables)
ep = Function("ep")(*input_variables)
# normals
normal_x = -1 * Symbol(
"normal_x"
) # Multiply by -1 to flip the direction of normal
normal_y = -1 * Symbol(
"normal_y"
) # Multiply by -1 to flip the direction of normal
# wall distance
normal_distance = Function("normal_distance")(*input_variables)
# Model constants
C_mu = 0.09
E = 9.793
C_k = -0.36
B_k = 8.15
karman_constant = 0.4187
# Turbulent Viscosity
nu_t = C_mu * (k**2) / (ep + 1e-4)
u_tau = (C_mu**0.25) * (k**0.5)
y_plus = u_tau * normal_distance / nu
u_plus = log(Abs(E * y_plus)) / karman_constant
ep_true = (C_mu ** (3 / 4)) * (k ** (3 / 2)) / karman_constant / normal_distance
u_parallel_to_wall = [
u - (u * normal_x + v * normal_y) * normal_x,
v - (u * normal_x + v * normal_y) * normal_y,
]
du_parallel_to_wall_dx = [
u.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_x,
v.diff(x) - (u.diff(x) * normal_x + v.diff(x) * normal_y) * normal_y,
]
du_parallel_to_wall_dy = [
u.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_x,
v.diff(y) - (u.diff(y) * normal_x + v.diff(y) * normal_y) * normal_y,
]
du_dsdf = [
du_parallel_to_wall_dx[0] * normal_x + du_parallel_to_wall_dy[0] * normal_y,
du_parallel_to_wall_dx[1] * normal_x + du_parallel_to_wall_dy[1] * normal_y,
]
wall_shear_stress_true_x = (
u_tau * u_parallel_to_wall[0] * karman_constant / log(Abs(E * y_plus))
)
wall_shear_stress_true_y = (
u_tau * u_parallel_to_wall[1] * karman_constant / log(Abs(E * y_plus))
)
wall_shear_stress_x = (nu + nu_t) * du_dsdf[0]
wall_shear_stress_y = (nu + nu_t) * du_dsdf[1]
u_normal_to_wall = u * normal_x + v * normal_y
u_normal_to_wall_true = 0
u_parallel_to_wall_mag = (
u_parallel_to_wall[0] ** 2 + u_parallel_to_wall[1] ** 2
) ** 0.5
u_parallel_to_wall_true = u_plus * u_tau
k_normal_gradient = normal_x * k.diff(x) + normal_y * k.diff(y)
k_normal_gradient_true = 0
# set equations
self.equations = {}
self.equations["velocity_wall_normal_wf"] = (
u_normal_to_wall - u_normal_to_wall_true
)
self.equations["velocity_wall_parallel_wf"] = (
u_parallel_to_wall_mag - u_parallel_to_wall_true
)
self.equations["ep_wf"] = ep - ep_true
self.equations["wall_shear_stress_x_wf"] = (
wall_shear_stress_x - wall_shear_stress_true_x
)
self.equations["wall_shear_stress_y_wf"] = (
wall_shear_stress_y - wall_shear_stress_true_y
)
| modulus-sym-main | examples/turbulent_channel/2d/custom_k_ep_ls.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
from modulus.sym.utils.io import csv_to_dict
# path for checkpoint
checkpoint = "./outputs/re590_k_ep_LS/network_checkpoint/"
# read data to compute u_tau
data = np.load(checkpoint + "inferencers/inf_wf.npz", allow_pickle=True)
data = np.atleast_1d(data.f.arr_0)[0]
k_wf = data["k"]
u_tau = np.mean((0.09**0.25) * (k_wf**0.5))
# read data to plot profiles
interior_data = np.load(checkpoint + "inferencers/inf_interior.npz", allow_pickle=True)
interior_data = np.atleast_1d(interior_data.f.arr_0)[0]
y = interior_data["y"]
u = interior_data["u"]
k = interior_data["k"]
nu = 1 / 590
u_plus = u / u_tau
y_plus = (1 - np.abs(y)) * u_tau / nu
k_plus = k / u_tau / u_tau
y = 1 - np.abs(y)
fig, ax = plt.subplots(2, figsize=(4.5, 9))
# read validation data
# Fluent data from Turbulence lecture notes: Gianluca Iaccarino: https://web.stanford.edu/class/me469b/handouts/turbulence.pdf
# DNS data from Moser et al.: https://aip.scitation.org/doi/10.1063/1.869966
file_path = "../validation_data"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"u+": "u_plus", "y+": "y_plus"}
u_dns_data = csv_to_dict("../validation_data/re590-moser-dns-u_plus.csv", mapping)
u_fluent_gi_data = csv_to_dict(
"../validation_data/re590-gi-fluent-u_plus.csv", mapping
)
mapping = {"k+": "k_plus", "y/2H": "y"}
k_dns_data = csv_to_dict("../validation_data/re590-moser-dns-k_plus.csv", mapping)
k_fluent_gi_data = csv_to_dict(
"../validation_data/re590-gi-fluent-k_plus.csv", mapping
)
ax[0].scatter(k_dns_data["y"], k_dns_data["k_plus"], label="DNS: Moser")
ax[0].scatter(k_fluent_gi_data["y"], k_fluent_gi_data["k_plus"], label="Fluent: GI")
ax[1].scatter(u_dns_data["y_plus"], u_dns_data["u_plus"], label="DNS: Moser")
ax[1].scatter(
u_fluent_gi_data["y_plus"], u_fluent_gi_data["u_plus"], label="Fluent: GI"
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
ax[0].scatter(y, k_plus, label="Modulus")
ax[0].set(title="TKE: u_tau=" + str(round(u_tau, 3)))
ax[0].set(xlabel="y", ylabel="k+")
ax[0].legend()
ax[1].scatter(y_plus, u_plus, label="Modulus")
ax[1].set_xscale("log")
ax[1].set(title="U+: u_tau=" + str(round(u_tau, 3)))
ax[1].set(xlabel="y+", ylabel="u+")
ax[1].legend()
plt.tight_layout()
plt.savefig("results_k_ep_LS.png")
| modulus-sym-main | examples/turbulent_channel/2d/plot_results.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq, sin, cos, Min, Max, Abs, log, exp
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from custom_k_ep_ls import kEpsilonInit, kEpsilon, kEpsilonLSWF
@modulus.sym.main(config_path="conf_re590_k_ep_LS", config_name="config")
def run(cfg: ModulusConfig) -> None:
# simulation parameters
Re = 590
nu = 1 / Re
y_plus = 30
karman_constant = 0.4187
resolved_y_start = y_plus * nu
channel_width = (-1, 1)
channel_length = (-np.pi / 2, np.pi / 2)
inlet = Line(
(channel_length[0], channel_width[0]),
(channel_length[0], channel_width[1]),
normal=1,
)
outlet = Line(
(channel_length[1], channel_width[0]),
(channel_length[1], channel_width[1]),
normal=1,
)
geo_sdf = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
# geometry where the equations are solved
geo_resolved = Channel2D(
(channel_length[0], channel_width[0] + resolved_y_start),
(channel_length[1], channel_width[1] - resolved_y_start),
)
# make list of nodes to unroll graph on
init = kEpsilonInit(nu=nu, rho=1.0)
eq = kEpsilon(nu=nu, rho=1.0)
wf = kEpsilonLSWF(nu=nu, rho=1.0)
flow_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("u"), Key("v")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
p_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("p")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
k_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("k_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
ep_net = instantiate_arch(
input_keys=[Key("x_sin"), Key("y")],
output_keys=[Key("ep_star")],
frequencies=("axis", [i / 2 for i in range(8)]),
frequencies_params=("axis", [i / 2 for i in range(8)]),
cfg=cfg.arch.fourier,
)
nodes = (
init.make_nodes()
+ eq.make_nodes()
+ wf.make_nodes()
+ [
Node.from_sympy(
sin(2 * np.pi * Symbol("x") / (channel_length[1] - channel_length[0])),
"x_sin",
)
]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("k_star"))) + 1e-4, 20), "k")]
+ [Node.from_sympy(Min(log(1 + exp(Symbol("ep_star"))) + 1e-4, 180), "ep")]
+ [flow_net.make_node(name="flow_network")]
+ [p_net.make_node(name="p_network")]
+ [k_net.make_node(name="k_network")]
+ [ep_net.make_node(name="ep_network")]
)
# add constraints to solver
p_grad = 1.0
x, y = Symbol("x"), Symbol("y")
# make domain
domain = Domain()
# Point where wall funciton is applied
wf_pt = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"velocity_wall_normal_wf": 0,
"velocity_wall_parallel_wf": 0,
"ep_wf": 0,
"wall_shear_stress_x_wf": 0,
"wall_shear_stress_y_wf": 0,
},
lambda_weighting={
"velocity_wall_normal_wf": 100,
"velocity_wall_parallel_wf": 100,
"ep_wf": 1,
"wall_shear_stress_x_wf": 100,
"wall_shear_stress_y_wf": 100,
},
batch_size=cfg.batch_size.wf_pt,
parameterization={"normal_distance": resolved_y_start},
)
domain.add_constraint(wf_pt, "WF")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={
"continuity": 0,
"momentum_x": 0,
"momentum_y": 0,
"k_equation": 0,
"ep_equation": 0,
},
lambda_weighting={
"continuity": 100,
"momentum_x": 1000,
"momentum_y": 1000,
"k_equation": 10,
"ep_equation": 1,
},
batch_size=cfg.batch_size.interior,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "Interior")
# pressure pc
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"p": p_grad * (channel_length[1] - channel_length[0])},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "Inlet")
# pressure pc
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
lambda_weighting={"p": 10},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "Outlet")
# flow initialization
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo_resolved,
outvar={"u_init": 0, "v_init": 0, "k_init": 0, "p_init": 0, "ep_init": 0},
batch_size=cfg.batch_size.interior_init,
bounds={x: channel_length, y: channel_width},
)
domain.add_constraint(interior, "InteriorInit")
# add inferencing and monitor
invar_wf_pt = geo_resolved.sample_boundary(
1024,
)
u_tau_monitor = PointwiseMonitor(
invar_wf_pt,
output_names=["k"],
metrics={
"mean_u_tau": lambda var: torch.mean((0.09**0.25) * torch.sqrt(var["k"]))
},
nodes=nodes,
)
domain.add_monitor(u_tau_monitor)
# add inferencer data
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_interior(
5000, bounds={x: channel_length, y: channel_width}
),
output_names=["u", "v", "p", "k", "ep"],
)
domain.add_inferencer(inference, "inf_interior")
inference = PointwiseInferencer(
nodes=nodes,
invar=geo_resolved.sample_boundary(
10, parameterization={"normal_distance": resolved_y_start}
),
output_names=["u", "v", "p", "k", "ep", "normal_distance"],
)
domain.add_inferencer(inference, "inf_wf")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/turbulent_channel/2d/re590_k_ep_LS.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
import numpy as np
from sympy import Symbol, Function, Number
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.key import Key
from modulus.sym.eq.pdes.wave_equation import WaveEquation
from modulus.sym.eq.pde import PDE
from modulus.sym.utils.io.plotter import ValidatorPlotter
# Read in npz files generated using finite difference simulator Devito
def read_wf_data(time, dLen):
file_path = "Training_data"
if not os.path.exists(to_absolute_path(file_path)):
warnings.warn(
f"Directory {file_path} does not exist. Cannot continue. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
sys.exit()
wf_filename = to_absolute_path(f"Training_data/wf_{int(time):04d}ms.npz")
wave = np.load(wf_filename)["arr_0"].astype(np.float32)
mesh_y, mesh_x = np.meshgrid(
np.linspace(0, dLen, wave.shape[0]),
np.linspace(0, dLen, wave.shape[1]),
indexing="ij",
)
invar = {}
invar["x"] = np.expand_dims(mesh_y.astype(np.float32).flatten(), axis=-1)
invar["y"] = np.expand_dims(mesh_x.astype(np.float32).flatten(), axis=-1)
invar["t"] = np.full_like(invar["x"], time * 0.001)
outvar = {}
outvar["u"] = np.expand_dims(wave.flatten(), axis=-1)
return invar, outvar
# define open boundary conditions
class OpenBoundary(PDE):
"""
Open boundary condition for wave problems
Ref: http://hplgit.github.io/wavebc/doc/pub/._wavebc_cyborg002.html
Parameters
==========
u : str
The dependent variable.
c : float, Sympy Symbol/Expr, str
Wave speed coefficient. If `c` is a str then it is
converted to Sympy Function of form 'c(x,y,z,t)'.
If 'c' is a Sympy Symbol or Expression then this
is substituted into the equation.
dim : int
Dimension of the wave equation (1, 2, or 3). Default is 2.
time : bool
If time-dependent equations or not. Default is True.
"""
name = "OpenBoundary"
def __init__(self, u="u", c="c", dim=3, time=True):
# set params
self.u = u
self.dim = dim
self.time = time
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# normal
normal_x, normal_y, normal_z = (
Symbol("normal_x"),
Symbol("normal_y"),
Symbol("normal_z"),
)
# time
t = Symbol("t")
# make input variables
input_variables = {"x": x, "y": y, "z": z, "t": t}
if self.dim == 1:
input_variables.pop("y")
input_variables.pop("z")
elif self.dim == 2:
input_variables.pop("z")
if not self.time:
input_variables.pop("t")
# Scalar function
assert type(u) == str, "u needs to be string"
u = Function(u)(*input_variables)
# wave speed coefficient
if type(c) is str:
c = Function(c)(*input_variables)
elif type(c) in [float, int]:
c = Number(c)
# set equations
self.equations = {}
self.equations["open_boundary"] = (
u.diff(t)
+ normal_x * c * u.diff(x)
+ normal_y * c * u.diff(y)
+ normal_z * c * u.diff(z)
)
class WavePlotter(ValidatorPlotter):
"Define custom validator plotting class"
def __call__(self, invar, true_outvar, pred_outvar):
# only plot x,y dimensions
invar = {k: v for k, v in invar.items() if k in ["x", "y"]}
fs = super().__call__(invar, true_outvar, pred_outvar)
return fs
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
"""
2d acoustic wave propagation at a domain of 2kmx2km, with a single Ricker source at the middle of the 2D domain
"""
# override defaults
cfg.arch.fully_connected.layer_size = 128
# define PDEs
we = WaveEquation(u="u", c="c", dim=2, time=True)
ob = OpenBoundary(u="u", c="c", dim=2, time=True)
# define networks and nodes
wave_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("t")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
speed_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("c")],
cfg=cfg.arch.fully_connected,
)
nodes = (
we.make_nodes(detach_names=["c"])
+ ob.make_nodes(detach_names=["c"])
+ [
wave_net.make_node(name="wave_network"),
speed_net.make_node(name="speed_network"),
]
)
# define geometry
dLen = 2 # km
rec = Rectangle((0, 0), (dLen, dLen))
# define sympy domain variables
x, y, t = Symbol("x"), Symbol("y"), Symbol("t")
# define time range
time_length = 1
time_range = {t: (0.15, time_length)}
# define target velocity model
# 2.0 km/s at the bottom and 1.0 km/s at the top using tanh function
mesh_x, mesh_y = np.meshgrid(
np.linspace(0, 2, 512), np.linspace(0, 2, 512), indexing="ij"
)
wave_speed_invar = {}
wave_speed_invar["x"] = np.expand_dims(mesh_x.flatten(), axis=-1)
wave_speed_invar["y"] = np.expand_dims(mesh_y.flatten(), axis=-1)
wave_speed_outvar = {}
wave_speed_outvar["c"] = np.tanh(80 * (wave_speed_invar["y"] - 1.0)) / 2 + 1.5
# make domain
domain = Domain()
# add velocity constraint
velocity = PointwiseConstraint.from_numpy(
nodes=nodes, invar=wave_speed_invar, outvar=wave_speed_outvar, batch_size=1024
)
domain.add_constraint(velocity, "Velocity")
# add initial timesteps constraints
batch_size = 1024
for i, ms in enumerate(np.linspace(150, 300, 4)):
timestep_invar, timestep_outvar = read_wf_data(ms, dLen)
lambda_weighting = {}
lambda_weighting["u"] = np.full_like(timestep_invar["x"], 10.0 / batch_size)
timestep = PointwiseConstraint.from_numpy(
nodes,
timestep_invar,
timestep_outvar,
batch_size,
lambda_weighting=lambda_weighting,
)
domain.add_constraint(timestep, f"BC{i:04d}")
# add interior constraint
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"wave_equation": 0},
batch_size=4096,
bounds={x: (0, dLen), y: (0, dLen)},
lambda_weighting={"wave_equation": 0.0001},
parameterization=time_range,
)
domain.add_constraint(interior, "Interior")
# add open boundary constraint
edges = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"open_boundary": 0},
batch_size=1024,
lambda_weighting={"open_boundary": 0.01 * time_length},
parameterization=time_range,
)
domain.add_constraint(edges, "Edges")
# add validators
for i, ms in enumerate(np.linspace(350, 950, 13)):
val_invar, val_true_outvar = read_wf_data(ms, dLen)
validator = PointwiseValidator(
nodes=nodes,
invar=val_invar,
true_outvar=val_true_outvar,
batch_size=1024,
plotter=WavePlotter(),
)
domain.add_validator(validator, f"VAL_{i:04d}")
validator = PointwiseValidator(
nodes=nodes,
invar=wave_speed_invar,
true_outvar=wave_speed_outvar,
batch_size=1024,
plotter=WavePlotter(),
)
domain.add_validator(validator, "Velocity")
slv = Solver(cfg, domain)
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/seismic_wave/wave_2d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.key import Key
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.dataset import DictVariationalDataset
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
VariationalConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.loss import Loss
# VPINN imports
from modulus.sym.utils.vpinn.test_functions import (
Test_Function,
Legendre_test,
Trig_test,
Vector_Test,
)
from modulus.sym.utils.vpinn.integral import tensor_int
x, y = Symbol("x"), Symbol("y")
# parameters
E = 10.0 # MPa
nu = 0.2
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
domain_origin = (-0.5, -0.5)
domain_dim = (1, 1)
# bounds
bounds_x = (domain_origin[0], domain_origin[0] + domain_dim[0])
bounds_y = (domain_origin[1], domain_origin[1] + domain_dim[1])
class DGLoss(Loss):
def __init__(self):
super().__init__()
test_fn = Test_Function(
name_ord_dict={
Legendre_test: [k for k in range(10)],
Trig_test: [k for k in range(10)],
},
box=[
[domain_origin[0], domain_origin[1]],
[domain_origin[0] + domain_dim[0], domain_origin[1] + domain_dim[1]],
],
diff_list=["grad"],
)
self.v = Vector_Test(test_fn, test_fn, mix=0.02)
def forward(
self,
list_invar,
list_outvar,
step: int,
):
torch.cuda.nvtx.range_push("Make_DGLoss")
torch.cuda.nvtx.range_push("Make_DGLoss_Get_Data")
# self.v.sample_vector_test()
# get points on the interior
x_interior = list_invar[2]["x"]
y_interior = list_invar[2]["y"]
area_interior = list_invar[2]["area"]
# compute solution for the interior
u_x_interior = list_outvar[2]["u__x"]
u_y_interior = list_outvar[2]["u__y"]
v_x_interior = list_outvar[2]["v__x"]
v_y_interior = list_outvar[2]["v__y"]
# get points on the boundary
x_bottom_dir = list_invar[0]["x"]
y_bottom_dir = list_invar[0]["y"]
normal_x_bottom_dir = list_invar[0]["normal_x"]
normal_y_bottom_dir = list_invar[0]["normal_y"]
area_bottom_dir = list_invar[0]["area"]
x_top_dir = list_invar[1]["x"]
y_top_dir = list_invar[1]["y"]
normal_x_top_dir = list_invar[1]["normal_x"]
normal_y_top_dir = list_invar[1]["normal_y"]
area_top_dir = list_invar[1]["area"]
# compute solution for the boundary
u_x_bottom_dir = list_outvar[0]["u__x"]
u_y_bottom_dir = list_outvar[0]["u__y"]
v_x_bottom_dir = list_outvar[0]["v__x"]
v_y_bottom_dir = list_outvar[0]["v__y"]
u_x_top_dir = list_outvar[1]["u__x"]
u_y_top_dir = list_outvar[1]["u__y"]
v_x_top_dir = list_outvar[1]["v__x"]
v_y_top_dir = list_outvar[1]["v__y"]
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("Make_DGLoss_Test_Function")
# test functions
vx_x_interior, vy_x_interior = self.v.eval_test("vx", x_interior, y_interior)
vx_y_interior, vy_y_interior = self.v.eval_test("vy", x_interior, y_interior)
vx_bottom_dir, vy_bottom_dir = self.v.eval_test("v", x_bottom_dir, y_bottom_dir)
vx_top_dir, vy_top_dir = self.v.eval_test("v", x_top_dir, y_top_dir)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("Make_DGLoss_Computation")
w_z_interior = -lambda_ / (lambda_ + 2 * mu) * (u_x_interior + v_y_interior)
sigma_xx_interior = (
lambda_ * (u_x_interior + v_y_interior + w_z_interior)
+ 2 * mu * u_x_interior
)
sigma_yy_interior = (
lambda_ * (u_x_interior + v_y_interior + w_z_interior)
+ 2 * mu * v_y_interior
)
sigma_xy_interior = mu * (u_y_interior + v_x_interior)
w_z_bottom_dir = (
-lambda_ / (lambda_ + 2 * mu) * (u_x_bottom_dir + v_y_bottom_dir)
)
sigma_xx_bottom_dir = (
lambda_ * (u_x_bottom_dir + v_y_bottom_dir + w_z_bottom_dir)
+ 2 * mu * u_x_bottom_dir
)
sigma_yy_bottom_dir = (
lambda_ * (u_x_bottom_dir + v_y_bottom_dir + w_z_bottom_dir)
+ 2 * mu * v_y_bottom_dir
)
sigma_xy_bottom_dir = mu * (u_y_bottom_dir + v_x_bottom_dir)
w_z_top_dir = -lambda_ / (lambda_ + 2 * mu) * (u_x_top_dir + v_y_top_dir)
sigma_xx_top_dir = (
lambda_ * (u_x_top_dir + v_y_top_dir + w_z_top_dir) + 2 * mu * u_x_top_dir
)
sigma_yy_top_dir = (
lambda_ * (u_x_top_dir + v_y_top_dir + w_z_top_dir) + 2 * mu * v_y_top_dir
)
sigma_xy_top_dir = mu * (u_y_top_dir + v_x_top_dir)
traction_x_bottom_dir = (
sigma_xx_bottom_dir * normal_x_bottom_dir
+ sigma_xy_bottom_dir * normal_y_bottom_dir
)
traction_y_bottom_dir = (
sigma_xy_bottom_dir * normal_x_bottom_dir
+ sigma_yy_bottom_dir * normal_y_bottom_dir
)
traction_x_top_dir = (
sigma_xx_top_dir * normal_x_top_dir + sigma_xy_top_dir * normal_y_top_dir
)
traction_y_top_dir = (
sigma_xy_top_dir * normal_x_top_dir + sigma_yy_top_dir * normal_y_top_dir
)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("Make_DGLoss_Integral")
interior_loss = tensor_int(
area_interior,
sigma_xx_interior * vx_x_interior
+ sigma_yy_interior * vy_y_interior
+ sigma_xy_interior * (vx_y_interior + vy_x_interior),
)
boundary_loss1 = tensor_int(
area_bottom_dir,
traction_x_bottom_dir * vx_bottom_dir
+ traction_y_bottom_dir * vy_bottom_dir,
)
boundary_loss2 = tensor_int(
area_top_dir,
traction_x_top_dir * vx_top_dir + traction_y_top_dir * vy_top_dir,
)
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_push("Make_DGLoss_Register_Loss")
losses = {
"variational_plane": torch.abs(
interior_loss - boundary_loss1 - boundary_loss2
)
.pow(2)
.sum()
}
torch.cuda.nvtx.range_pop()
torch.cuda.nvtx.range_pop()
return losses
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
elasticity_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v")],
cfg=cfg.arch.fully_connected,
)
nodes = [elasticity_net.make_node(name="elasticity_net")]
# domain
square = Rectangle(
domain_origin,
(domain_origin[0] + domain_dim[0], domain_origin[1] + domain_dim[1]),
)
geo = square
# make domain
domain = Domain()
bottomBC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0.0, "v": 0.0},
batch_size=cfg.batch_size.bottom,
batch_per_epoch=5000,
lambda_weighting={"u": 10.0, "v": 10.0},
criteria=Eq(y, domain_origin[1]),
)
domain.add_constraint(bottomBC, "bottomBC_differential")
topBC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0.0, "v": 0.1},
batch_size=cfg.batch_size.top,
batch_per_epoch=5000,
lambda_weighting={"u": 10.0, "v": 10.0},
criteria=Eq(y, domain_origin[1] + domain_dim[1])
& (x <= domain_origin[0] + domain_dim[0] / 2.0),
)
domain.add_constraint(topBC, "topBC_differential")
# register variational data
batch_per_epoch = 1
variational_datasets = {}
batch_sizes = {}
# bottomBC, index : 0
invar = geo.sample_boundary(
batch_per_epoch * cfg.batch_size.bottom,
criteria=Eq(y, domain_origin[1]),
quasirandom=True,
)
invar["area"] *= batch_per_epoch
variational_datasets["bottom_bc"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y", "v__x", "v__y"],
)
batch_sizes["bottom_bc"] = cfg.batch_size.bottom
# topBC, index : 1
invar = geo.sample_boundary(
batch_per_epoch * cfg.batch_size.top,
criteria=Eq(y, domain_origin[1] + domain_dim[1])
& (x <= domain_origin[0] + domain_dim[0] / 2.0),
quasirandom=True,
)
invar["area"] *= batch_per_epoch
variational_datasets["top_bc"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y", "v__x", "v__y"],
)
batch_sizes["top_bc"] = cfg.batch_size.top
# Interior, index : 2
invar = geo.sample_interior(
batch_per_epoch * cfg.batch_size.interior,
bounds={x: bounds_x, y: bounds_y},
quasirandom=True,
)
invar["area"] *= batch_per_epoch
variational_datasets["interior"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y", "v__x", "v__y"],
)
batch_sizes["interior"] = cfg.batch_size.interior
# make variational constraints
variational_constraint = VariationalConstraint(
datasets=variational_datasets,
batch_sizes=batch_sizes,
nodes=nodes,
num_workers=1,
loss=DGLoss(),
)
domain.add_constraint(variational_constraint, "variational")
# add inferencer data
inferencer = PointwiseInferencer(
nodes=nodes,
invar=geo.sample_interior(
2 * cfg.batch_size.interior,
bounds={x: bounds_x, y: bounds_y},
),
output_names=["u", "v"],
batch_size=2048,
plotter=InferencerPlotter(),
)
domain.add_inferencer(inferencer)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/plane_displacement/plane_displacement.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=100,
)
# make datasets
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# print out training/ test data shapes
for d in (invar_train, outvar_train, invar_test, outvar_test):
for k in d:
print(f"{k}: {d[k].shape}")
decoder_net = instantiate_arch(
cfg=cfg.arch.decoder,
output_keys=output_keys,
)
fno = instantiate_arch(
cfg=cfg.arch.fno,
input_keys=input_keys,
decoder_net=decoder_net,
)
nodes = [fno.make_node("fno")]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_FNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import HDF5GridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_FNO")
def run(cfg: ModulusConfig) -> None:
# [keys]
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
train_path = to_absolute_path(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5"
)
test_path = to_absolute_path(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5"
)
# [keys]
# [datasets]
# make datasets
train_dataset = HDF5GridDataset(
train_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=1000
)
test_dataset = HDF5GridDataset(
test_path, invar_keys=["coeff"], outvar_keys=["sol"], n_examples=100
)
# [datasets]
# [init-model]
# make list of nodes to unroll graph on
decoder_net = instantiate_arch(
cfg=cfg.arch.decoder,
output_keys=output_keys,
)
fno = instantiate_arch(
cfg=cfg.arch.fno,
input_keys=input_keys,
decoder_net=decoder_net,
)
nodes = [fno.make_node("fno")]
# [init-model]
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
num_workers=4, # number of parallel data loaders
)
domain.add_constraint(supervised, "supervised")
# [constraint]
# [validator]
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# [validator]
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_FNO_lazy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import numpy as np
import torch
import torch.nn.functional as F
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.models.layers.spectral_layers import fourier_derivatives
from modulus.sym.node import Node
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from modulus.sym.utils.io.vtk import grid_to_vtk
from utilities import download_FNO_dataset, load_FNO_dataset
from ops import dx, ddx
# [pde-loss]
class Darcy(torch.nn.Module):
"Custom Darcy PDE definition for PINO"
def __init__(self, gradient_method: str = "exact"):
super().__init__()
self.gradient_method = str(gradient_method)
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
# get inputs
u = input_var["sol"]
c = input_var["coeff"]
dcdx = input_var["Kcoeff_y"] # data is reversed
dcdy = input_var["Kcoeff_x"]
dxf = 1.0 / u.shape[-2]
dyf = 1.0 / u.shape[-1]
# Compute gradients based on method
# Exact first order and FDM second order
if self.gradient_method == "exact":
dudx_exact = input_var["sol__x"]
dudy_exact = input_var["sol__y"]
dduddx_exact = input_var["sol__x__x"]
dduddy_exact = input_var["sol__y__y"]
# compute darcy equation
darcy = (
1.0
+ (dcdx * dudx_exact)
+ (c * dduddx_exact)
+ (dcdy * dudy_exact)
+ (c * dduddy_exact)
)
# FDM gradients
elif self.gradient_method == "fdm":
dudx_fdm = dx(u, dx=dxf, channel=0, dim=0, order=1, padding="replication")
dudy_fdm = dx(u, dx=dyf, channel=0, dim=1, order=1, padding="replication")
dduddx_fdm = ddx(
u, dx=dxf, channel=0, dim=0, order=1, padding="replication"
)
dduddy_fdm = ddx(
u, dx=dyf, channel=0, dim=1, order=1, padding="replication"
)
# compute darcy equation
darcy = (
1.0
+ (dcdx * dudx_fdm)
+ (c * dduddx_fdm)
+ (dcdy * dudy_fdm)
+ (c * dduddy_fdm)
)
# Fourier derivative
elif self.gradient_method == "fourier":
dim_u_x = u.shape[2]
dim_u_y = u.shape[3]
u = F.pad(
u, (0, dim_u_y - 1, 0, dim_u_x - 1), mode="reflect"
) # Constant seems to give best results
f_du, f_ddu = fourier_derivatives(u, [2.0, 2.0])
dudx_fourier = f_du[:, 0:1, :dim_u_x, :dim_u_y]
dudy_fourier = f_du[:, 1:2, :dim_u_x, :dim_u_y]
dduddx_fourier = f_ddu[:, 0:1, :dim_u_x, :dim_u_y]
dduddy_fourier = f_ddu[:, 1:2, :dim_u_x, :dim_u_y]
# compute darcy equation
darcy = (
1.0
+ (dcdx * dudx_fourier)
+ (c * dduddx_fourier)
+ (dcdy * dudy_fourier)
+ (c * dduddy_fourier)
)
else:
raise ValueError(f"Derivative method {self.gradient_method} not supported.")
# Zero outer boundary
darcy = F.pad(darcy[:, :, 2:-2, 2:-2], [2, 2, 2, 2], "constant", 0)
# Return darcy
output_var = {
"darcy": dxf * darcy,
} # weight boundary loss higher
return output_var
# [pde-loss]
@modulus.sym.main(config_path="conf", config_name="config_PINO")
def run(cfg: ModulusConfig) -> None:
# [datasets]
# load training/ test data
input_keys = [
Key("coeff", scale=(7.48360e00, 4.49996e00)),
Key("Kcoeff_x"),
Key("Kcoeff_y"),
]
output_keys = [
Key("sol", scale=(5.74634e-03, 3.88433e-03)),
]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=cfg.custom.ntrain,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=cfg.custom.ntest,
)
# add additional constraining values for darcy variable
outvar_train["darcy"] = np.zeros_like(outvar_train["sol"])
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# [datasets]
# [init-model]
# Define FNO model
decoder_net = instantiate_arch(
cfg=cfg.arch.decoder,
output_keys=output_keys,
)
fno = instantiate_arch(
cfg=cfg.arch.fno,
input_keys=[input_keys[0]],
decoder_net=decoder_net,
)
if cfg.custom.gradient_method == "exact":
derivatives = [
Key("sol", derivatives=[Key("x")]),
Key("sol", derivatives=[Key("y")]),
Key("sol", derivatives=[Key("x"), Key("x")]),
Key("sol", derivatives=[Key("y"), Key("y")]),
]
fno.add_pino_gradients(
derivatives=derivatives,
domain_length=[1.0, 1.0],
)
# [init-model]
# [init-node]
# Make custom Darcy residual node for PINO
inputs = [
"sol",
"coeff",
"Kcoeff_x",
"Kcoeff_y",
]
if cfg.custom.gradient_method == "exact":
inputs += [
"sol__x",
"sol__y",
]
darcy_node = Node(
inputs=inputs,
outputs=["darcy"],
evaluate=Darcy(gradient_method=cfg.custom.gradient_method),
name="Darcy Node",
)
nodes = [fno.make_node("fno"), darcy_node]
# [init-node]
# [constraint]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# [constraint]
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
requires_grad=True,
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_PINO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus.sym
from modulus.sym.hydra import instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.key import Key
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.solver import Solver
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_FNO_dataset
@modulus.sym.main(config_path="conf", config_name="config_AFNO")
def run(cfg: ModulusConfig) -> None:
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=100,
)
# get training image shape
img_shape = [
next(iter(invar_train.values())).shape[-2],
next(iter(invar_train.values())).shape[-1],
]
# crop out some pixels so that img_shape is divisible by patch_size of AFNO
img_shape = [s - s % cfg.arch.afno.patch_size for s in img_shape]
print(f"cropped img_shape: {img_shape}")
for d in (invar_train, outvar_train, invar_test, outvar_test):
for k in d:
d[k] = d[k][:, :, : img_shape[0], : img_shape[1]]
print(f"{k}: {d[k].shape}")
# make datasets
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# make list of nodes to unroll graph on
model = instantiate_arch(
input_keys=input_keys,
output_keys=output_keys,
cfg=cfg.arch.afno,
img_shape=img_shape,
)
nodes = [model.make_node(name="AFNO")]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_AFNO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
def dx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute first order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
-0.5,
0.0,
0.5,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
-1.0 / 60.0,
3.0 / 20.0,
-3.0 / 4.0,
0.0,
3.0 / 4.0,
-3.0 / 20.0,
1.0 / 60.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
def ddx(inpt, dx, channel, dim, order=1, padding="zeros"):
"Compute second order numerical derivatives of input tensor"
var = inpt[:, channel : channel + 1, :, :]
# get filter
if order == 1:
ddx1D = torch.Tensor(
[
1.0,
-2.0,
1.0,
]
).to(inpt.device)
elif order == 3:
ddx1D = torch.Tensor(
[
1.0 / 90.0,
-3.0 / 20.0,
3.0 / 2.0,
-49.0 / 18.0,
3.0 / 2.0,
-3.0 / 20.0,
1.0 / 90.0,
]
).to(inpt.device)
ddx3D = torch.reshape(ddx1D, shape=[1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# apply convolution
if padding == "zeros":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "constant", 0)
elif padding == "replication":
var = F.pad(var, 4 * [(ddx1D.shape[0] - 1) // 2], "replicate")
output = F.conv2d(var, ddx3D, padding="valid")
output = (1.0 / dx**2) * output
if dim == 0:
output = output[:, :, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2]
elif dim == 1:
output = output[:, :, (ddx1D.shape[0] - 1) // 2 : -(ddx1D.shape[0] - 1) // 2, :]
return output
| modulus-sym-main | examples/darcy/ops.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.models.deeponet import DeepONetArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.pix2pix import Pix2PixArch
from modulus.sym.domain.constraint.continuous import DeepONetConstraint
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_deeponet_dataset
@modulus.sym.main(config_path="conf", config_name="config_DeepO")
def run(cfg: ModulusConfig) -> None:
# [datasets]
# load training/ test data
branch_input_keys = [Key("coeff")]
trunk_input_keys = [Key("x"), Key("y")]
output_keys = [Key("sol")]
download_FNO_dataset("Darcy_241", outdir="datasets/")
invar_train, outvar_train = load_deeponet_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in branch_input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_deeponet_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in branch_input_keys],
[k.name for k in output_keys],
n_examples=10,
)
# [datasets]
# [init-model]
# make list of nodes to unroll graph on
branch_net = instantiate_arch(
cfg=cfg.arch.branch,
)
trunk_net = instantiate_arch(
cfg=cfg.arch.trunk,
)
deeponet = instantiate_arch(
cfg=cfg.arch.deeponet,
branch_net=branch_net,
trunk_net=trunk_net,
)
nodes = [deeponet.make_node(name="deepo")]
# [init-model]
# [constraint]
# make domain
domain = Domain()
# add constraint to domain
data = DeepONetConstraint.from_numpy(
nodes=nodes,
invar=invar_train,
outvar=outvar_train,
batch_size=cfg.batch_size.train,
)
domain.add_constraint(data, "data")
# [constraint]
# [validator]
# add validators
val = PointwiseValidator(
nodes=nodes,
invar=invar_test,
true_outvar=outvar_test,
plotter=None,
)
domain.add_validator(val, "val")
# [validator]
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_DeepO.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import modulus.sym
from modulus.sym.hydra import instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
from modulus.sym.key import Key
from modulus.sym.distributed.manager import DistributedManager
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.solver import Solver
from modulus.sym.utils.io.plotter import GridValidatorPlotter
from utilities import download_FNO_dataset, load_FNO_dataset
import os
import torch.distributed as dist
# Set model parallel size to 2
os.environ["MODEL_PARALLEL_SIZE"] = "2"
@modulus.sym.main(config_path="conf", config_name="config_AFNO_MP")
def run(cfg: ModulusConfig) -> None:
manager = DistributedManager()
# Check that world_size is a multiple of model parallel size
if manager.world_size % 2 != 0:
print(
"WARNING: Total world size not a multiple of model parallel size (2). Exiting..."
)
return
# load training/ test data
input_keys = [Key("coeff", scale=(7.48360e00, 4.49996e00))]
output_keys = [Key("sol", scale=(5.74634e-03, 3.88433e-03))]
# Only rank 0 downloads the dataset to avoid a data race
if manager.rank == 0:
download_FNO_dataset("Darcy_241", outdir="datasets/")
dist.barrier()
# All ranks can safely load the dataset once available
invar_train, outvar_train = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth1.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=1000,
)
invar_test, outvar_test = load_FNO_dataset(
"datasets/Darcy_241/piececonst_r241_N1024_smooth2.hdf5",
[k.name for k in input_keys],
[k.name for k in output_keys],
n_examples=100,
)
# get training image shape
img_shape = next(iter(invar_train.values())).shape[-2:]
# crop out some pixels so that img_shape is divisible by patch_size of AFNO
img_shape = [s - s % cfg.arch.distributed_afno.patch_size for s in img_shape]
print(f"cropped img_shape: {img_shape}")
for d in (invar_train, outvar_train, invar_test, outvar_test):
for k in d:
d[k] = d[k][:, :, : img_shape[0], : img_shape[1]]
print(f"{k}: {d[k].shape}")
# make datasets
train_dataset = DictGridDataset(invar_train, outvar_train)
test_dataset = DictGridDataset(invar_test, outvar_test)
# make list of nodes to unroll graph on
model = instantiate_arch(
input_keys=input_keys,
output_keys=output_keys,
cfg=cfg.arch.distributed_afno,
img_shape=img_shape,
)
nodes = [model.make_node(name="DistributedAFNO")]
# make domain
domain = Domain()
# add constraints to domain
supervised = SupervisedGridConstraint(
nodes=nodes,
dataset=train_dataset,
batch_size=cfg.batch_size.grid,
)
domain.add_constraint(supervised, "supervised")
# add validator
val = GridValidator(
nodes,
dataset=test_dataset,
batch_size=cfg.batch_size.validation,
plotter=GridValidatorPlotter(n_examples=5),
)
domain.add_validator(val, "test")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/darcy/darcy_AFNO_MP.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import zipfile
try:
import gdown
except:
gdown = None
import scipy.io
import numpy as np
import h5py
from modulus.sym.hydra import to_absolute_path
# list of FNO dataset url ids on drive: https://drive.google.com/drive/folders/1UnbQh2WWc6knEHbLn-ZaXrKUZhp7pjt-
_FNO_datatsets_ids = {
"Darcy_241": "1ViDqN7nc_VCnMackiXv_d7CHZANAFKzV",
"Darcy_421": "1Z1uxG9R8AdAGJprG5STcphysjm56_0Jf",
}
_FNO_dataset_names = {
"Darcy_241": (
"piececonst_r241_N1024_smooth1.hdf5",
"piececonst_r241_N1024_smooth2.hdf5",
),
"Darcy_421": (
"piececonst_r421_N1024_smooth1.hdf5",
"piececonst_r421_N1024_smooth2.hdf5",
),
}
def load_FNO_dataset(path, input_keys, output_keys, n_examples=None):
"Loads a FNO dataset"
if not path.endswith(".hdf5"):
raise Exception(
".hdf5 file required: please use utilities.preprocess_FNO_mat to convert .mat file"
)
# load data
path = to_absolute_path(path)
data = h5py.File(path, "r")
_ks = [k for k in data.keys() if not k.startswith("__")]
print(f"loaded: {path}\navaliable keys: {_ks}")
# parse data
invar, outvar = dict(), dict()
for d, keys in [(invar, input_keys), (outvar, output_keys)]:
for k in keys:
# get data
x = data[k] # N, C, H, W
# cut examples out
if n_examples is not None:
x = x[:n_examples]
# print out normalisation values
print(f"selected key: {k}, mean: {x.mean():.5e}, std: {x.std():.5e}")
d[k] = x
del data
return (invar, outvar)
def load_deeponet_dataset(
path, input_keys, output_keys, n_examples=None, filter_size=8
):
"Loads a deeponet dataset"
# load dataset
invar, outvar = load_FNO_dataset(path, input_keys, output_keys, n_examples)
# reduce shape needed for deeponet
for key, value in invar.items():
invar[key] = value[:, :, ::filter_size, ::filter_size]
for key, value in outvar.items():
outvar[key] = value[:, :, ::filter_size, ::filter_size]
res = next(iter(invar.values())).shape[-1]
nr_points_per_sample = res**2
# tile invar
tiled_invar = {
key: np.concatenate(
[
np.tile(value[i], (nr_points_per_sample, 1, 1, 1))
for i in range(n_examples)
]
)
for key, value in invar.items()
}
# tile outvar
tiled_outvar = {key: value.flatten()[:, None] for key, value in outvar.items()}
# add cord points
x = np.linspace(0.0, 1.0, res)
y = np.linspace(0.0, 1.0, res)
x, y = [a.flatten()[:, None] for a in np.meshgrid(x, y)]
tiled_invar["x"] = np.concatenate(n_examples * [x], axis=0)
tiled_invar["y"] = np.concatenate(n_examples * [y], axis=0)
return (tiled_invar, tiled_outvar)
def download_FNO_dataset(name, outdir="datasets/"):
"Tries to download FNO dataset from drive"
if name not in _FNO_datatsets_ids:
raise Exception(
f"Error: FNO dataset {name} not recognised, select one from {list(_FNO_datatsets_ids.keys())}"
)
id = _FNO_datatsets_ids[name]
outdir = to_absolute_path(outdir) + "/"
namedir = f"{outdir}{name}/"
# skip if already exists
exists = True
for file_name in _FNO_dataset_names[name]:
if not os.path.isfile(namedir + file_name):
exists = False
break
if exists:
return
print(f"FNO dataset {name} not detected, downloading dataset")
# Make sure we have gdown installed
if gdown is None:
raise ModuleNotFoundError("gdown package is required to download the dataset!")
# get output directory
os.makedirs(namedir, exist_ok=True)
# download dataset
zippath = f"{outdir}{name}.zip"
_download_file_from_google_drive(id, zippath)
# unzip
with zipfile.ZipFile(zippath, "r") as f:
f.extractall(namedir)
os.remove(zippath)
# preprocess files
for file in os.listdir(namedir):
if file.endswith(".mat"):
matpath = f"{namedir}{file}"
preprocess_FNO_mat(matpath)
os.remove(matpath)
def _download_file_from_google_drive(id, path):
"Downloads a file from google drive"
# use gdown library to download file
gdown.download(id=id, output=path)
def preprocess_FNO_mat(path):
"Convert a FNO .mat file to a hdf5 file, adding extra dimension to data arrays"
assert path.endswith(".mat")
data = scipy.io.loadmat(path)
ks = [k for k in data.keys() if not k.startswith("__")]
with h5py.File(path[:-4] + ".hdf5", "w") as f:
for k in ks:
x = np.expand_dims(data[k], axis=1) # N, C, H, W
f.create_dataset(
k, data=x, dtype="float32"
) # note h5 files larger than .mat because no compression used
| modulus-sym-main | examples/darcy/utilities.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq, tanh
import numpy as np
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.geometry import Parameterization, Parameter
# geometry params for domain
channel_origin = (-2.5, -0.5, -0.5625)
channel_dim = (5.0, 1.0, 1.125)
heat_sink_base_origin = (-0.75, -0.5, -0.4375)
heat_sink_base_dim = (0.65, 0.05, 0.875)
fin_origin = heat_sink_base_origin
fin_dim = (0.65, 0.8625, 0.0075)
total_fins = 17
flow_box_origin = (-0.85, -0.5, -0.5625)
flow_box_dim = (0.85, 1.0, 1.125)
source_origin = (-0.55, -0.5, -0.125)
source_dim = (0.25, 0.0, 0.25)
# define sympy varaibles to parametize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# define geometry
# channel
channel_base = Channel(
channel_origin,
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
channel_void = Channel(
(channel_origin[0], channel_origin[1], channel_origin[2] + channel_dim[2] / 2),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
channel = channel_base - channel_void
# fpga heat sink
heat_sink_base = Box(
heat_sink_base_origin,
(
heat_sink_base_origin[0] + heat_sink_base_dim[0], # base of heat sink
heat_sink_base_origin[1] + heat_sink_base_dim[1],
heat_sink_base_origin[2] + heat_sink_base_dim[2] / 2,
),
)
fin_center = (
fin_origin[0] + fin_dim[0] / 2,
fin_origin[1] + fin_dim[1] / 2,
fin_origin[2] + fin_dim[2] / 2,
)
fin = Box(
fin_origin,
(
fin_origin[0] + fin_dim[0],
fin_origin[1] + fin_dim[1],
fin_origin[2] + fin_dim[2],
),
)
gap = (heat_sink_base_dim[2] - fin_dim[2]) / (total_fins - 1) # gap between fins
fin = fin.repeat(
gap,
repeat_lower=(0, 0, 0),
repeat_higher=(0, 0, total_fins - 1),
center=fin_center,
)
fin_void = Box(
(channel_origin[0], channel_origin[1], channel_origin[2] + channel_dim[2] / 2),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
fin = fin - fin_void
fpga = heat_sink_base + fin
# entire geometry
geo = channel - fpga
# inlet and outlet
inlet = Plane(
channel_origin,
(
channel_origin[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2] / 2,
),
-1,
)
outlet = Plane(
(channel_origin[0] + channel_dim[0], channel_origin[1], channel_origin[2]),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2] / 2,
),
1,
)
# planes for integral continuity
x_pos = Parameter("x_pos")
integral_plane = Plane(
(x_pos, channel_origin[1], channel_origin[2]),
(x_pos, channel_origin[1] + channel_dim[1], channel_origin[2] + channel_dim[2]),
1,
parameterization=Parameterization({x_pos: (-0.75, 0.0)}),
)
| modulus-sym-main | examples/fpga/laminar_symmetry/fpga_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fpga_geometry import *
import os
import warnings
import csv
import torch
import numpy as np
from sympy import Symbol, Eq, Abs, tanh, And, Or
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
@modulus.sym.main(config_path="conf_heat", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
# fluid params
nu = 0.02
rho = 1
# heat params
k_fluid = 1.0
k_solid = 5.0
D_solid = 0.10
D_fluid = 0.02
source_grad = 1.5
source_area = source_dim[0] * source_dim[2]
# make list of nodes to unroll graph on
ad = AdvectionDiffusion(T="theta_f", rho=rho, D=D_fluid, dim=3, time=False)
dif = Diffusion(T="theta_s", D=D_solid, dim=3, time=False)
dif_inteface = DiffusionInterface(
"theta_f", "theta_s", k_fluid, k_solid, dim=3, time=False
)
f_grad = GradNormal("theta_f", dim=3, time=False)
s_grad = GradNormal("theta_s", dim=3, time=False)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(input_keys=input_keys, output_keys=output_keys)
thermal_f_net = FourierNetArch(
input_keys=input_keys, output_keys=[Key("theta_f")]
)
thermal_s_net = FourierNetArch(
input_keys=input_keys, output_keys=[Key("theta_s")]
)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
thermal_nodes = (
ad.make_nodes()
+ dif.make_nodes()
+ dif_inteface.make_nodes()
+ f_grad.make_nodes()
+ s_grad.make_nodes()
+ [flow_net.make_node(name="flow_network", optimize=False)]
+ [thermal_f_net.make_node(name="thermal_f_network")]
+ [thermal_s_net.make_node(name="thermal_s_network")]
)
# make flow domain
thermal_domain = Domain()
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=inlet,
outvar={"theta_f": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
)
thermal_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
)
thermal_domain.add_constraint(constraint_outlet, "outlet")
# channel walls insulating
def channel_walls_criteria(invar, params):
sdf = fpga.sdf(invar, params)
return np.logical_and(
np.less(sdf["sdf"], -1e-5),
np.less(invar["z"], channel_origin[2] + channel_dim[2] / 2.0),
)
channel_walls = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=channel,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.channel_walls,
criteria=channel_walls_criteria,
)
thermal_domain.add_constraint(channel_walls, "channel_walls")
# symmetry bc
symmetry_fluid = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"theta_f__z": 0},
batch_size=cfg.batch_size.symmetry_fluid,
criteria=Eq(z, channel_origin[2] + channel_dim[2] / 2.0),
)
thermal_domain.add_constraint(symmetry_fluid, "symmetry_channel_fluid")
symmetry_solid = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"theta_s__z": 0},
batch_size=cfg.batch_size.symmetry_solid,
criteria=Eq(z, channel_origin[2] + channel_dim[2] / 2.0),
)
thermal_domain.add_constraint(symmetry_solid, "symmetry_channel_solid")
# fluid solid interface
def fpga_criteria(invar, params):
sdf = channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
fluid_solid_interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={
"diffusion_interface_dirichlet_theta_f_theta_s": 0,
"diffusion_interface_neumann_theta_f_theta_s": 0,
},
batch_size=cfg.batch_size.fluid_solid_interface,
criteria=fpga_criteria,
)
thermal_domain.add_constraint(fluid_solid_interface, "fluid_solid_interface")
# heat source
sharpen_tanh = 60.0
source_func_xl = (tanh(sharpen_tanh * (x - source_origin[0])) + 1.0) / 2.0
source_func_xh = (
tanh(sharpen_tanh * ((source_origin[0] + source_dim[0]) - x)) + 1.0
) / 2.0
source_func_zl = (tanh(sharpen_tanh * (z - source_origin[2])) + 1.0) / 2.0
source_func_zh = (
tanh(sharpen_tanh * ((source_origin[2] + source_dim[2]) - z)) + 1.0
) / 2.0
gradient_normal = (
source_grad * source_func_xl * source_func_xh * source_func_zl * source_func_zh
)
heat_source = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"normal_gradient_theta_s": gradient_normal},
batch_size=cfg.batch_size.heat_source,
criteria=(Eq(y, source_origin[1]))
& (z < channel_origin[2] + channel_dim[2] / 2.0),
)
thermal_domain.add_constraint(heat_source, "heat_source")
# flow interior low res away from fpga
lr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.lr_flow_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
)
thermal_domain.add_constraint(lr_flow_interior, "lr_flow_interior")
# flow interiror high res near fpga
hr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.hr_flow_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
)
thermal_domain.add_constraint(hr_flow_interior, "hr_flow_interior")
# solid interior
solid_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"diffusion_theta_s": 0},
batch_size=cfg.batch_size.solid_interior,
lambda_weighting={"diffusion_theta_s": 100},
)
thermal_domain.add_constraint(solid_interior, "solid_interior")
# flow validation data
file_path = "../openfoam/fpga_heat_fluid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
"T": "theta_f",
}
filename = to_absolute_path(file_path)
values = np.loadtxt(filename, skiprows=1, delimiter=",", unpack=False)
values = values[
values[:, -1] + channel_origin[2] <= 0.0, :
] # remove redundant data due to symmetry
# get column keys
csvfile = open(filename)
reader = csv.reader(csvfile)
first_line = next(iter(reader))
# set dictionary
csv_dict = {}
for i, name in enumerate(first_line):
if mapping is not None:
if name.strip() in mapping.keys():
csv_dict[mapping[name.strip()]] = values[:, i : i + 1]
else:
csv_dict[name.strip()] = values[:, i : i + 1]
openfoam_var = csv_dict
openfoam_var["theta_f"] = (
openfoam_var["theta_f"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_flow_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_thermal_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p", "theta_f"]
}
openfoam_flow_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_thermal_outvar_numpy,
)
thermal_domain.add_validator(
openfoam_flow_validator,
"thermal_flow_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# solid data
file_path = "../openfoam/fpga_heat_solid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "Points:2": "z", "T": "theta_s"}
filename = to_absolute_path(file_path)
values = np.loadtxt(filename, skiprows=1, delimiter=",", unpack=False)
values = values[
values[:, -1] + channel_origin[2] <= 0.0, :
] # remove redundant data due to symmetry
# get column keys
csvfile = open(filename)
reader = csv.reader(csvfile)
first_line = next(iter(reader))
# set dictionary
csv_dict = {}
for i, name in enumerate(first_line):
if mapping is not None:
if name.strip() in mapping.keys():
csv_dict[mapping[name.strip()]] = values[:, i : i + 1]
else:
csv_dict[name.strip()] = values[:, i : i + 1]
openfoam_var = csv_dict
openfoam_var["theta_s"] = (
openfoam_var["theta_s"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_outvar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_s"]
}
openfoam_solid_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_solid_numpy,
true_outvar=openfoam_outvar_solid_numpy,
)
thermal_domain.add_validator(
openfoam_solid_validator,
"thermal_solid_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
thermal_slv = Solver(cfg, thermal_domain)
# start thermal solver
thermal_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/laminar_symmetry/fpga_heat.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fpga_geometry import *
import os
import warnings
import csv
import sys
import torch
import modulus.sym
import numpy as np
from sympy import Symbol, Eq, Abs, tanh, And, Or
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes, Curl
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
# fluid params
nu = 0.02
rho = 1
inlet_vel = 1.0
volumetric_flow = 1.125 / 2
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu, rho=rho, dim=3, time=False)
normal_dot_vel = NormalDotVec()
equation_nodes = ns.make_nodes() + normal_dot_vel.make_nodes()
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(input_keys=input_keys, output_keys=output_keys)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
flow_nodes = equation_nodes + [flow_net.make_node(name="flow_network")]
# make flow domain
flow_domain = Domain()
# inlet
def channel_sdf(x, y, z):
sdf = channel.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"]
constraint_inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=inlet,
outvar={"u": inlet_vel, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={"u": channel_sdf, "v": 1.0, "w": 1.0}, # weight zero on edges
)
flow_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
)
flow_domain.add_constraint(constraint_outlet, "outlet")
# no slip for channel walls
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
criteria=z < channel_origin[2] + channel_dim[2] / 2.0,
)
flow_domain.add_constraint(no_slip, "no_slip")
# symmetry channel
symmetry = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"w": 0, "u__z": 0, "v__z": 0, "p__z": 0},
batch_size=cfg.batch_size.symmetry,
criteria=Eq(z, channel_origin[2] + channel_dim[2] / 2.0),
)
flow_domain.add_constraint(symmetry, "symmetry")
# flow interior low res away from fpga
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.lr_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interiror high res near fpga
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_z": 0, "momentum_y": 0},
batch_size=cfg.batch_size.hr_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
criteria=integral_criteria,
lambda_weighting={"normal_dot_vel": 1.0},
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# flow data
file_path = "../openfoam/fpga_heat_fluid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
}
filename = to_absolute_path(file_path)
values = np.loadtxt(filename, skiprows=1, delimiter=",", unpack=False)
values = values[
values[:, -1] + channel_origin[2] <= 0.0, :
] # remove redundant data due to symmetry
# get column keys
csvfile = open(filename)
reader = csv.reader(csvfile)
first_line = next(iter(reader))
# set dictionary
csv_dict = {}
for i, name in enumerate(first_line):
if mapping is not None:
if name.strip() in mapping.keys():
csv_dict[mapping[name.strip()]] = values[:, i : i + 1]
else:
csv_dict[name.strip()] = values[:, i : i + 1]
openfoam_var = csv_dict
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=flow_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
flow_domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add pressure monitor
invar_front_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] - heat_sink_base_dim[0],
},
)
pressure_monitor = PointwiseMonitor(
invar_front_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
invar_back_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] + 2 * heat_sink_base_dim[0],
},
)
pressure_monitor = PointwiseMonitor(
invar_back_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
# make solver
flow_slv = Solver(cfg, flow_domain)
# start flow solver
flow_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/laminar_symmetry/fpga_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq, tanh
import numpy as np
import yaml
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.geometry import Parameterization, Parameter
# check if the geometry is parameterized
with open("./conf/config.yaml", "r") as file:
yaml_data = yaml.safe_load(file)
parameterized = yaml_data["custom"]["parameterized"]
if parameterized:
# geometry parameterization
HS_height = Symbol("HS_height")
HS_length = Symbol("HS_length")
else:
HS_length = 0.65
HS_height = 0.8625
# geometry params for domain
channel_origin = (-2.5, -0.5, -0.5625)
channel_dim = (5.0, 1.0, 1.125)
heat_sink_base_origin = (-0.75, -0.5, -0.4375)
heat_sink_base_dim = (HS_length, 0.05, 0.875)
fin_origin = heat_sink_base_origin
fin_dim = (HS_length, HS_height, 0.0075)
total_fins = 17
flow_box_origin = (-0.85, -0.5, -0.5625)
flow_box_dim = (0.85, 1.0, 1.125)
source_origin = (-0.55, -0.5, -0.125)
source_dim = (0.25, 0.0, 0.25)
# define sympy varaibles to parametize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# define geometry
# channel
channel = Channel(
channel_origin,
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
# fpga heat sink
heat_sink_base = Box(
heat_sink_base_origin,
(
heat_sink_base_origin[0] + heat_sink_base_dim[0], # base of heat sink
heat_sink_base_origin[1] + heat_sink_base_dim[1],
heat_sink_base_origin[2] + heat_sink_base_dim[2],
),
)
fin_center = (
fin_origin[0] + fin_dim[0] / 2,
fin_origin[1] + fin_dim[1] / 2,
fin_origin[2] + fin_dim[2] / 2,
)
fin = Box(
fin_origin,
(
fin_origin[0] + fin_dim[0],
fin_origin[1] + fin_dim[1],
fin_origin[2] + fin_dim[2],
),
)
gap = (heat_sink_base_dim[2] - fin_dim[2]) / (total_fins - 1) # gap between fins
fin = fin.repeat(
gap,
repeat_lower=(0, 0, 0),
repeat_higher=(0, 0, total_fins - 1),
center=fin_center,
)
fpga = heat_sink_base + fin
# entire geometry
geo = channel - fpga
# inlet and outlet
inlet = Plane(
channel_origin,
(
channel_origin[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
-1,
)
outlet = Plane(
(channel_origin[0] + channel_dim[0], channel_origin[1], channel_origin[2]),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
1,
)
# planes for integral continuity
x_pos = Parameter("x_pos")
x_pos_range = {x_pos: (-0.75, 0.0)}
integral_plane = Plane(
(x_pos, channel_origin[1], channel_origin[2]),
(x_pos, channel_origin[1] + channel_dim[1], channel_origin[2] + channel_dim[2]),
1,
parameterization=Parameterization(x_pos_range),
)
| modulus-sym-main | examples/fpga/turbulent/fpga_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fpga_geometry import *
import os
import warnings
import torch
import modulus.sym
from sympy import Symbol, Eq, Abs, tanh, And, Or
import numpy as np
import sys
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
@modulus.sym.main(config_path="conf_heat", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
#############
# Real Params
#############
# fluid params
fluid_viscosity = 1.84e-05 # kg/m-s
fluid_density = 1.1614 # kg/m3
# boundary params
length_scale = 0.04 # m
inlet_velocity = 5.24386 # m/s
##############################
# Nondimensionalization Params
##############################
# fluid params
nu = fluid_viscosity / (fluid_density * inlet_velocity * length_scale)
rho = 1
normalize_inlet_vel = 1.0
# heat params
k_fluid = 1.0
k_solid = 5.0
D_solid = 0.10
D_fluid = 0.02
source_grad = 1.5
source_area = source_dim[0] * source_dim[2]
# make list of nodes to unroll graph on
ad = AdvectionDiffusion(T="theta_f", rho=rho, D=D_fluid, dim=3, time=False)
dif = Diffusion(T="theta_s", D=D_solid, dim=3, time=False)
dif_inteface = DiffusionInterface(
"theta_f", "theta_s", k_fluid, k_solid, dim=3, time=False
)
f_grad = GradNormal("theta_f", dim=3, time=False)
s_grad = GradNormal("theta_s", dim=3, time=False)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
if cfg.custom.parameterized:
input_keys += [Key("HS_height"), Key("HS_length")]
HS_height_range = (0.40625, 0.8625)
HS_length_range = (0.35, 0.65)
param_ranges = {HS_height: HS_height_range, HS_length: HS_length_range}
validation_param_ranges = {HS_height: 0.8625, HS_length: 0.65}
fixed_param_ranges = {
HS_height: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(*HS_height_range)
),
HS_length: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(*HS_length_range)
),
}
else:
param_ranges, validation_param_ranges, fixed_param_ranges = {}, {}, {}
# determine inputs outputs of the network
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
frequencies=("axis", [i for i in range(35)]),
frequencies_params=("axis", [i for i in range(35)]),
)
thermal_f_net = FourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_f")],
frequencies=("axis", [i for i in range(35)]),
frequencies_params=("axis", [i for i in range(35)]),
)
thermal_s_net = FourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_s")],
frequencies=("axis", [i for i in range(35)]),
frequencies_params=("axis", [i for i in range(35)]),
)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
thermal_nodes = (
ad.make_nodes()
+ dif.make_nodes()
+ dif_inteface.make_nodes()
+ f_grad.make_nodes()
+ s_grad.make_nodes()
+ [flow_net.make_node(name="flow_network", optimize=False)]
+ [thermal_f_net.make_node(name="thermal_f_network")]
+ [thermal_s_net.make_node(name="thermal_s_network")]
)
# make flow domain
thermal_domain = Domain()
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=inlet,
outvar={"theta_f": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(constraint_outlet, "outlet")
# channel walls insulating
def channel_walls_criteria(invar, params):
sdf = fpga.sdf(invar, params)
return np.less(sdf["sdf"], -1e-5)
channel_walls = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=channel,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.channel_walls,
criteria=channel_walls_criteria,
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(channel_walls, "channel_walls")
# fluid solid interface
def fpga_criteria(invar, params):
sdf = channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
fluid_solid_interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={
"diffusion_interface_dirichlet_theta_f_theta_s": 0,
"diffusion_interface_neumann_theta_f_theta_s": 0,
},
batch_size=cfg.batch_size.fluid_solid_interface,
criteria=fpga_criteria,
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(fluid_solid_interface, "fluid_solid_interface")
# heat source
sharpen_tanh = 60.0
source_func_xl = (tanh(sharpen_tanh * (x - source_origin[0])) + 1.0) / 2.0
source_func_xh = (
tanh(sharpen_tanh * ((source_origin[0] + source_dim[0]) - x)) + 1.0
) / 2.0
source_func_zl = (tanh(sharpen_tanh * (z - source_origin[2])) + 1.0) / 2.0
source_func_zh = (
tanh(sharpen_tanh * ((source_origin[2] + source_dim[2]) - z)) + 1.0
) / 2.0
gradient_normal = (
source_grad * source_func_xl * source_func_xh * source_func_zl * source_func_zh
)
heat_source = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"normal_gradient_theta_s": gradient_normal},
batch_size=cfg.batch_size.heat_source,
criteria=Eq(y, source_origin[1]),
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(heat_source, "heat_source")
# flow interior low res away from fpga
lr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.lr_flow_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(lr_flow_interior, "lr_flow_interior")
# flow interiror high res near fpga
hr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.hr_flow_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(hr_flow_interior, "hr_flow_interior")
# solid interior
solid_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"diffusion_theta_s": 0},
batch_size=cfg.batch_size.solid_interior,
lambda_weighting={"diffusion_theta_s": 100},
parameterization=param_ranges,
batch_per_epoch=5000,
)
thermal_domain.add_constraint(solid_interior, "solid_interior")
# flow validation data
file_path = "../openfoam/FPGA_re13239.6_tanh_OF_blockMesh_fullFake.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
"T": "theta_f",
}
openfoam_var = csv_to_dict(
to_absolute_path(file_path),
mapping,
)
openfoam_var["theta_f"] = (
openfoam_var["theta_f"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] / length_scale + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] / length_scale + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] / length_scale + channel_origin[2]
openfoam_var["u"] = openfoam_var["u"] / inlet_velocity
openfoam_var["v"] = openfoam_var["v"] / inlet_velocity
openfoam_var["w"] = openfoam_var["w"] / inlet_velocity
openfoam_var["p"] = (openfoam_var["p"]) / (inlet_velocity**2 * fluid_density)
if cfg.custom.parameterized:
openfoam_var["HS_height"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_height")]
)
openfoam_var["HS_length"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_length")]
)
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z", "HS_height", "HS_length"]
}
else:
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z"]
}
openfoam_flow_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_thermal_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p", "theta_f"]
}
openfoam_flow_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_thermal_outvar_numpy,
)
thermal_domain.add_validator(
openfoam_flow_validator,
"thermal_flow_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# solid data
file_path = "../openfoam/FPGA_re13239.6_tanh_OF_blockMesh_solid_fullFake.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "Points:2": "z", "T": "theta_s"}
openfoam_var = csv_to_dict(
to_absolute_path(file_path),
mapping,
)
openfoam_var["theta_s"] = (
openfoam_var["theta_s"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] / length_scale + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] / length_scale + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] / length_scale + channel_origin[2]
if cfg.custom.parameterized:
openfoam_var["HS_height"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_height")]
)
openfoam_var["HS_length"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_length")]
)
openfoam_invar_solid_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z", "HS_height", "HS_length"]
}
else:
openfoam_invar_solid_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z"]
}
openfoam_outvar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_s"]
}
openfoam_solid_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_solid_numpy,
true_outvar=openfoam_outvar_solid_numpy,
)
thermal_domain.add_validator(
openfoam_solid_validator,
"thermal_solid_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add peak temperature monitor
invar_heat_source = fpga.sample_boundary(
10000,
criteria=Eq(y, source_origin[1]),
parameterization=fixed_param_ranges,
)
temperature_monitor = PointwiseMonitor(
invar_heat_source,
output_names=["theta_s"],
metrics={"peak_temp": lambda var: torch.max(var["theta_s"])},
nodes=thermal_nodes,
)
thermal_domain.add_monitor(temperature_monitor)
# make solver
thermal_slv = Solver(cfg, thermal_domain)
# start thermal solver
thermal_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/turbulent/fpga_heat.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fpga_geometry import *
import os
import warnings
import sys
import torch
import modulus.sym
from sympy import Symbol, Eq, Abs, tanh, And, Or
import numpy as np
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes, Curl
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
#############
# Real Params
#############
# fluid params
fluid_viscosity = 1.84e-05 # kg/m-s
fluid_density = 1.1614 # kg/m3
# boundary params
length_scale = 0.04 # m
inlet_velocity = 5.24386 # m/s
##############################
# Nondimensionalization Params
##############################
# fluid params
nu = fluid_viscosity / (fluid_density * inlet_velocity * length_scale)
rho = 1
normalize_inlet_vel = 1.0
# heat params
D_solid = 0.1
D_fluid = 0.02
inlet_T = 0
source_grad = 1.5
source_area = source_dim[0] * source_dim[2]
u_profile = (
normalize_inlet_vel
* tanh((0.5 - Abs(y)) / 0.02)
* tanh((0.5625 - Abs(z)) / 0.02)
)
volumetric_flow = 1.0668 # value via integration of inlet profile
# make list of nodes to unroll graph on
ze = ZeroEquation(nu=nu, dim=3, time=False, max_distance=0.5)
ns = NavierStokes(nu=ze.equations["nu"], rho=rho, dim=3, time=False)
normal_dot_vel = NormalDotVec()
equation_nodes = ns.make_nodes() + ze.make_nodes() + normal_dot_vel.make_nodes()
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
if cfg.custom.parameterized:
input_keys += [Key("HS_height"), Key("HS_length")]
HS_height_range = (0.40625, 0.8625)
HS_length_range = (0.35, 0.65)
param_ranges = {HS_height: HS_height_range, HS_length: HS_length_range}
validation_param_ranges = {HS_height: 0.8625, HS_length: 0.65}
fixed_param_ranges = {
HS_height: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(*HS_height_range)
),
HS_length: lambda batch_size: np.full(
(batch_size, 1), np.random.uniform(*HS_length_range)
),
}
else:
param_ranges, validation_param_ranges, fixed_param_ranges = {}, {}, {}
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
frequencies=("axis", [i for i in range(35)]),
frequencies_params=("axis", [i for i in range(35)]),
)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
flow_nodes = equation_nodes + [flow_net.make_node(name="flow_network")]
# make flow domain
flow_domain = Domain()
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=inlet,
outvar={"u": u_profile, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={"u": 1.0, "v": 1.0, "w": 1.0},
batch_per_epoch=5000,
)
flow_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
batch_per_epoch=5000,
)
flow_domain.add_constraint(constraint_outlet, "outlet")
# no slip for channel walls
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
batch_per_epoch=5000,
)
flow_domain.add_constraint(no_slip, "no_slip")
# flow interior low res away from fpga
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.lr_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
batch_per_epoch=5000,
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interiror high res near fpga
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_z": 0, "momentum_y": 0},
batch_size=cfg.batch_size.hr_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
batch_per_epoch=5000,
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
criteria=integral_criteria,
lambda_weighting={"normal_dot_vel": 1.0},
parameterization={**x_pos_range, **param_ranges},
batch_per_epoch=5000,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# flow data
# validation data fluid
file_path = "../openfoam/FPGA_re13239.6_tanh_OF_blockMesh_fullFake.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
}
openfoam_var = csv_to_dict(
to_absolute_path(file_path),
mapping,
)
# normalize values
openfoam_var["x"] = openfoam_var["x"] / length_scale + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] / length_scale + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] / length_scale + channel_origin[2]
openfoam_var["u"] = openfoam_var["u"] / inlet_velocity
openfoam_var["v"] = openfoam_var["v"] / inlet_velocity
openfoam_var["w"] = openfoam_var["w"] / inlet_velocity
openfoam_var["p"] = (openfoam_var["p"]) / (inlet_velocity**2 * fluid_density)
if cfg.custom.parameterized:
openfoam_var["HS_height"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_height")]
)
openfoam_var["HS_length"] = (
np.ones_like(openfoam_var["x"])
* validation_param_ranges[Symbol("HS_length")]
)
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z", "HS_height", "HS_length"]
}
else:
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "z"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=flow_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
flow_domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add pressure monitor
invar_front_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] - 0.65,
**fixed_param_ranges,
},
)
pressure_monitor = PointwiseMonitor(
invar_front_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
invar_back_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] + 2 * 0.65,
**fixed_param_ranges,
},
)
pressure_monitor = PointwiseMonitor(
invar_back_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
# make solver
flow_slv = Solver(cfg, flow_domain)
# start flow solver
flow_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/turbulent/fpga_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq, tanh
import numpy as np
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.geometry import Parameterization, Parameter
# geometry params for domain
channel_origin = (-2.5, -0.5, -0.5625)
channel_dim = (5.0, 1.0, 1.125)
heat_sink_base_origin = (-0.75, -0.5, -0.4375)
heat_sink_base_dim = (0.65, 0.05, 0.875)
fin_origin = heat_sink_base_origin
fin_dim = (0.65, 0.8625, 0.0075)
total_fins = 17
flow_box_origin = (-0.85, -0.5, -0.5625)
flow_box_dim = (0.85, 1.0, 1.125)
source_origin = (-0.55, -0.5, -0.125)
source_dim = (0.25, 0.0, 0.25)
# define sympy varaibles to parametize domain curves
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# define geometry
# channel
channel = Channel(
channel_origin,
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
)
# fpga heat sink
heat_sink_base = Box(
heat_sink_base_origin,
(
heat_sink_base_origin[0] + heat_sink_base_dim[0], # base of heat sink
heat_sink_base_origin[1] + heat_sink_base_dim[1],
heat_sink_base_origin[2] + heat_sink_base_dim[2],
),
)
fin_center = (
fin_origin[0] + fin_dim[0] / 2,
fin_origin[1] + fin_dim[1] / 2,
fin_origin[2] + fin_dim[2] / 2,
)
fin = Box(
fin_origin,
(
fin_origin[0] + fin_dim[0],
fin_origin[1] + fin_dim[1],
fin_origin[2] + fin_dim[2],
),
)
gap = (heat_sink_base_dim[2] - fin_dim[2]) / (total_fins - 1) # gap between fins
fin = fin.repeat(
gap,
repeat_lower=(0, 0, 0),
repeat_higher=(0, 0, total_fins - 1),
center=fin_center,
)
fpga = heat_sink_base + fin
# entire geometry
geo = channel - fpga
# inlet and outlet
inlet = Plane(
channel_origin,
(
channel_origin[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
-1,
)
outlet = Plane(
(channel_origin[0] + channel_dim[0], channel_origin[1], channel_origin[2]),
(
channel_origin[0] + channel_dim[0],
channel_origin[1] + channel_dim[1],
channel_origin[2] + channel_dim[2],
),
1,
)
# planes for integral continuity
x_pos = Parameter("x_pos")
x_pos_range = {x_pos: (-0.75, 0.0)}
integral_plane = Plane(
(x_pos, channel_origin[1], channel_origin[2]),
(x_pos, channel_origin[1] + channel_dim[1], channel_origin[2] + channel_dim[2]),
1,
parameterization=Parameterization(x_pos_range),
)
| modulus-sym-main | examples/fpga/laminar/fpga_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fpga_geometry import *
import os
import warnings
import torch
from sympy import Symbol, Eq, Abs, tanh, And, Or
import numpy as np
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.diffusion import Diffusion, DiffusionInterface
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.siren import SirenArch
from modulus.sym.models.modified_fourier_net import ModifiedFourierNetArch
from modulus.sym.models.dgm import DGMArch
@modulus.sym.main(config_path="conf_heat", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
# fluid params
nu = 0.02
rho = 1
# heat params
k_fluid = 1.0
k_solid = 5.0
D_solid = 0.10
D_fluid = 0.02
source_grad = 1.5
source_area = source_dim[0] * source_dim[2]
# make list of nodes to unroll graph on
ad = AdvectionDiffusion(T="theta_f", rho=rho, D=D_fluid, dim=3, time=False)
dif = Diffusion(T="theta_s", D=D_solid, dim=3, time=False)
dif_inteface = DiffusionInterface(
"theta_f", "theta_s", k_fluid, k_solid, dim=3, time=False
)
f_grad = GradNormal("theta_f", dim=3, time=False)
s_grad = GradNormal("theta_s", dim=3, time=False)
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
if cfg.custom.exact_continuity:
c = Curl(("a", "b", "c"), ("u", "v", "w"))
equation_nodes += c.make_node()
output_keys = [Key("a"), Key("b"), Key("c"), Key("p")]
else:
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FullyConnectedArch":
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_f_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=[Key("theta_f")],
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_s_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=[Key("theta_s")],
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_f_net = FourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_f")],
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_s_net = FourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_s")],
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "SirenArch":
flow_net = SirenArch(
input_keys=input_keys,
output_keys=output_keys,
normalization={"x": (-2.5, 2.5), "y": (-2.5, 2.5), "z": (-2.5, 2.5)},
)
thermal_f_net = SirenArch(input_keys=input_keys, output_keys=[Key("theta_f")])
thermal_s_net = SirenArch(input_keys=input_keys, output_keys=[Key("theta_s")])
elif cfg.custom.arch == "ModifiedFourierNetArch":
flow_net = ModifiedFourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_f_net = ModifiedFourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_f")],
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_s_net = ModifiedFourierNetArch(
input_keys=input_keys,
output_keys=[Key("theta_s")],
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "DGMArch":
flow_net = DGMArch(
input_keys=input_keys,
output_keys=output_keys,
layer_size=128,
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_f_net = DGMArch(
input_keys=input_keys,
output_keys=[Key("theta_f")],
layer_size=128,
adaptive_activations=cfg.custom.adaptive_activations,
)
thermal_s_net = DGMArch(
input_keys=input_keys,
output_keys=[Key("theta_s")],
layer_size=128,
adaptive_activations=cfg.custom.adaptive_activations,
)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
thermal_nodes = (
ad.make_nodes()
+ dif.make_nodes()
+ dif_inteface.make_nodes()
+ f_grad.make_nodes()
+ s_grad.make_nodes()
+ [flow_net.make_node(name="flow_network", optimize=False)]
+ [thermal_f_net.make_node(name="thermal_f_network")]
+ [thermal_s_net.make_node(name="thermal_s_network")]
)
# make flow domain
thermal_domain = Domain()
# inlet
constraint_inlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=inlet,
outvar={"theta_f": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(constraint_outlet, "outlet")
# channel walls insulating
def channel_walls_criteria(invar, params):
sdf = fpga.sdf(invar, params)
return np.less(sdf["sdf"], -1e-5)
channel_walls = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=channel,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.channel_walls,
criteria=channel_walls_criteria,
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(channel_walls, "channel_walls")
# fluid solid interface
def fpga_criteria(invar, params):
sdf = channel.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
fluid_solid_interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={
"diffusion_interface_dirichlet_theta_f_theta_s": 0,
"diffusion_interface_neumann_theta_f_theta_s": 0,
},
batch_size=cfg.batch_size.fluid_solid_interface,
criteria=fpga_criteria,
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(fluid_solid_interface, "fluid_solid_interface")
# heat source
sharpen_tanh = 60.0
source_func_xl = (tanh(sharpen_tanh * (x - source_origin[0])) + 1.0) / 2.0
source_func_xh = (
tanh(sharpen_tanh * ((source_origin[0] + source_dim[0]) - x)) + 1.0
) / 2.0
source_func_zl = (tanh(sharpen_tanh * (z - source_origin[2])) + 1.0) / 2.0
source_func_zh = (
tanh(sharpen_tanh * ((source_origin[2] + source_dim[2]) - z)) + 1.0
) / 2.0
gradient_normal = (
source_grad * source_func_xl * source_func_xh * source_func_zl * source_func_zh
)
heat_source = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"normal_gradient_theta_s": gradient_normal},
batch_size=cfg.batch_size.heat_source,
criteria=Eq(y, source_origin[1]),
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(heat_source, "heat_source")
# flow interior low res away from fpga
lr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.lr_flow_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(lr_flow_interior, "lr_flow_interior")
# flow interiror high res near fpga
hr_flow_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.hr_flow_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(hr_flow_interior, "hr_flow_interior")
# solid interior
solid_interior = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=fpga,
outvar={"diffusion_theta_s": 0},
batch_size=cfg.batch_size.solid_interior,
lambda_weighting={"diffusion_theta_s": 100},
quasirandom=cfg.custom.quasirandom,
)
thermal_domain.add_constraint(solid_interior, "solid_interior")
# flow validation data
file_path = "../openfoam/fpga_heat_fluid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
"T": "theta_f",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["theta_f"] = (
openfoam_var["theta_f"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_flow_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_thermal_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p", "theta_f"]
}
openfoam_flow_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_thermal_outvar_numpy,
)
thermal_domain.add_validator(
openfoam_flow_validator,
"thermal_flow_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# solid data
file_path = "../openfoam/fpga_heat_solid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "Points:2": "z", "T": "theta_s"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["theta_s"] = (
openfoam_var["theta_s"] / 273.15 - 1.0
) # normalize heat
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_outvar_solid_numpy = {
key: value for key, value in openfoam_var.items() if key in ["theta_s"]
}
openfoam_solid_validator = PointwiseValidator(
nodes=thermal_nodes,
invar=openfoam_invar_solid_numpy,
true_outvar=openfoam_outvar_solid_numpy,
)
thermal_domain.add_validator(
openfoam_solid_validator,
"thermal_solid_data",
)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add peak temperature monitor
invar_heat_source = fpga.sample_boundary(10000, criteria=Eq(y, source_origin[1]))
temperature_monitor = PointwiseMonitor(
invar_heat_source,
output_names=["theta_s"],
metrics={"peak_temp": lambda var: torch.max(var["theta_s"])},
nodes=thermal_nodes,
)
thermal_domain.add_monitor(temperature_monitor)
# make solver
thermal_slv = Solver(cfg, thermal_domain)
# start thermal solver
thermal_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/laminar/fpga_heat.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import sys
import torch
from sympy import Symbol, Eq, Abs, tanh, And, Or
import numpy as np
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes, Curl
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.siren import SirenArch
from modulus.sym.models.modified_fourier_net import ModifiedFourierNetArch
from modulus.sym.models.dgm import DGMArch
from fpga_geometry import *
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for simulation
# fluid params
nu = 0.02
rho = 1
inlet_vel = 1.0
volumetric_flow = 1.125
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu, rho=rho, dim=3, time=False)
normal_dot_vel = NormalDotVec()
equation_nodes = ns.make_nodes() + normal_dot_vel.make_nodes()
# determine inputs outputs of the network
input_keys = [Key("x"), Key("y"), Key("z")]
if cfg.custom.exact_continuity:
c = Curl(("a", "b", "c"), ("u", "v", "w"))
equation_nodes += c.make_nodes()
output_keys = [Key("a"), Key("b"), Key("c"), Key("p")]
else:
output_keys = [Key("u"), Key("v"), Key("w"), Key("p")]
# select the network and the specific configs
if cfg.custom.arch == "FullyConnectedArch":
flow_net = FullyConnectedArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "FourierNetArch":
flow_net = FourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "SirenArch":
flow_net = SirenArch(
input_keys=input_keys,
output_keys=output_keys,
normalization={"x": (-2.5, 2.5), "y": (-2.5, 2.5), "z": (-2.5, 2.5)},
)
elif cfg.custom.arch == "ModifiedFourierNetArch":
flow_net = ModifiedFourierNetArch(
input_keys=input_keys,
output_keys=output_keys,
adaptive_activations=cfg.custom.adaptive_activations,
)
elif cfg.custom.arch == "DGMArch":
flow_net = DGMArch(
input_keys=input_keys,
output_keys=output_keys,
layer_size=128,
adaptive_activations=cfg.custom.adaptive_activations,
)
else:
sys.exit(
"Network not configured for this script. Please include the network in the script"
)
flow_nodes = equation_nodes + [flow_net.make_node(name="flow_network")]
# make flow domain
flow_domain = Domain()
# inlet
def channel_sdf(x, y, z):
sdf = channel.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"]
constraint_inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=inlet,
outvar={"u": inlet_vel, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
criteria=Eq(x, channel_origin[0]),
lambda_weighting={"u": channel_sdf, "v": 1.0, "w": 1.0}, # weight zero on edges
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(constraint_inlet, "inlet")
# outlet
constraint_outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
criteria=Eq(x, channel_origin[0] + channel_dim[0]),
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(constraint_outlet, "outlet")
# no slip for channel walls
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(no_slip, "no_slip")
# flow interior low res away from fpga
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.lr_interior,
criteria=Or(x < flow_box_origin[0], x > (flow_box_origin[0] + flow_box_dim[0])),
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interiror high res near fpga
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_z": 0, "momentum_y": 0},
batch_size=cfg.batch_size.hr_interior,
criteria=And(
x > flow_box_origin[0], x < (flow_box_origin[0] + flow_box_dim[0])
),
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
"momentum_z": Symbol("sdf"),
},
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
criteria=integral_criteria,
lambda_weighting={"normal_dot_vel": 1.0},
quasirandom=cfg.custom.quasirandom,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# flow data
file_path = "../openfoam/fpga_heat_fluid0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"Points:2": "z",
"U:0": "u",
"U:1": "v",
"U:2": "w",
"p_rgh": "p",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] = openfoam_var["x"] + channel_origin[0]
openfoam_var["y"] = openfoam_var["y"] + channel_origin[1]
openfoam_var["z"] = openfoam_var["z"] + channel_origin[2]
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y", "z"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "w", "p"]
}
openfoam_validator = PointwiseValidator(
nodes=flow_nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
requires_grad=cfg.custom.exact_continuity,
)
flow_domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add pressure monitor
invar_front_pressure = integral_plane.sample_boundary(
1024,
parameterization={x_pos: heat_sink_base_origin[0] - heat_sink_base_dim[0]},
)
pressure_monitor = PointwiseMonitor(
invar_front_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
invar_back_pressure = integral_plane.sample_boundary(
1024,
parameterization={
x_pos: heat_sink_base_origin[0] + 2 * heat_sink_base_dim[0],
},
)
pressure_monitor = PointwiseMonitor(
invar_back_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(pressure_monitor)
# make solver
flow_slv = Solver(cfg, flow_domain)
# start flow solver
flow_slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fpga/laminar/fpga_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.linear_elasticity import LinearElasticityPlaneStress
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# specify Panel properties
E = 73.0 * 10**9 # Pa
nu = 0.33
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu)) # Pa
mu_real = E / (2 * (1 + nu)) # Pa
lambda_ = lambda_ / mu_real # Dimensionless
mu = 1.0 # Dimensionless
# make list of nodes to unroll graph on
le = LinearElasticityPlaneStress(lambda_=lambda_, mu=mu)
elasticity_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("sigma_hoop")],
output_keys=[
Key("u"),
Key("v"),
Key("sigma_xx"),
Key("sigma_yy"),
Key("sigma_xy"),
],
cfg=cfg.arch.fully_connected,
)
nodes = le.make_nodes() + [elasticity_net.make_node(name="elasticity_network")]
# add constraints to solver
# make geometry
x, y, sigma_hoop = Symbol("x"), Symbol("y"), Symbol("sigma_hoop")
panel_origin = (-0.5, -0.9)
panel_dim = (1, 1.8) # Panel width is the characteristic length.
window_origin = (-0.125, -0.2)
window_dim = (0.25, 0.4)
panel_aux1_origin = (-0.075, -0.2)
panel_aux1_dim = (0.15, 0.4)
panel_aux2_origin = (-0.125, -0.15)
panel_aux2_dim = (0.25, 0.3)
hr_zone_origin = (-0.2, -0.4)
hr_zone_dim = (0.4, 0.8)
circle_nw_center = (-0.075, 0.15)
circle_ne_center = (0.075, 0.15)
circle_se_center = (0.075, -0.15)
circle_sw_center = (-0.075, -0.15)
circle_radius = 0.05
panel = Rectangle(
panel_origin, (panel_origin[0] + panel_dim[0], panel_origin[1] + panel_dim[1])
)
window = Rectangle(
window_origin,
(window_origin[0] + window_dim[0], window_origin[1] + window_dim[1]),
)
panel_aux1 = Rectangle(
panel_aux1_origin,
(
panel_aux1_origin[0] + panel_aux1_dim[0],
panel_aux1_origin[1] + panel_aux1_dim[1],
),
)
panel_aux2 = Rectangle(
panel_aux2_origin,
(
panel_aux2_origin[0] + panel_aux2_dim[0],
panel_aux2_origin[1] + panel_aux2_dim[1],
),
)
hr_zone = Rectangle(
hr_zone_origin,
(hr_zone_origin[0] + hr_zone_dim[0], hr_zone_origin[1] + hr_zone_dim[1]),
)
circle_nw = Circle(circle_nw_center, circle_radius)
circle_ne = Circle(circle_ne_center, circle_radius)
circle_se = Circle(circle_se_center, circle_radius)
circle_sw = Circle(circle_sw_center, circle_radius)
corners = (
window - panel_aux1 - panel_aux2 - circle_nw - circle_ne - circle_se - circle_sw
)
window = window - corners
geo = panel - window
hr_geo = geo & hr_zone
# Parameterization
characteristic_length = panel_dim[0]
characteristic_disp = 0.001 * window_dim[0]
sigma_normalization = characteristic_length / (mu_real * characteristic_disp)
sigma_hoop_lower = 46 * 10**6 * sigma_normalization
sigma_hoop_upper = 56.5 * 10**6 * sigma_normalization
sigma_hoop_range = (sigma_hoop_lower, sigma_hoop_upper)
param_ranges = {sigma_hoop: sigma_hoop_range}
inference_param_ranges = {sigma_hoop: 46 * 10**6 * sigma_normalization}
# bounds
bounds_x = (panel_origin[0], panel_origin[0] + panel_dim[0])
bounds_y = (panel_origin[1], panel_origin[1] + panel_dim[1])
hr_bounds_x = (hr_zone_origin[0], hr_zone_origin[0] + hr_zone_dim[0])
hr_bounds_y = (hr_zone_origin[1], hr_zone_origin[1] + hr_zone_dim[1])
# make domain
domain = Domain()
# left wall
panel_left = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"traction_x": 0.0, "traction_y": 0.0},
batch_size=cfg.batch_size.panel_left,
criteria=Eq(x, panel_origin[0]),
parameterization=param_ranges,
)
domain.add_constraint(panel_left, "panel_left")
# right wall
panel_right = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"traction_x": 0.0, "traction_y": 0.0},
batch_size=cfg.batch_size.panel_right,
criteria=Eq(x, panel_origin[0] + panel_dim[0]),
parameterization=param_ranges,
)
domain.add_constraint(panel_right, "panel_right")
# bottom wall
panel_bottom = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"v": 0.0},
batch_size=cfg.batch_size.panel_bottom,
criteria=Eq(y, panel_origin[1]),
parameterization=param_ranges,
)
domain.add_constraint(panel_bottom, "panel_bottom")
# corner point
panel_corner = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0.0},
batch_size=cfg.batch_size.panel_corner,
criteria=Eq(x, panel_origin[0])
& (y > panel_origin[1])
& (y < panel_origin[1] + 1e-3),
parameterization=param_ranges,
)
domain.add_constraint(panel_corner, "panel_corner")
# top wall
panel_top = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"traction_x": 0.0, "traction_y": sigma_hoop},
batch_size=cfg.batch_size.panel_top,
criteria=Eq(y, panel_origin[1] + panel_dim[1]),
parameterization=param_ranges,
)
domain.add_constraint(panel_top, "panel_top")
# pannel window
panel_window = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=window,
outvar={"traction_x": 0.0, "traction_y": 0.0},
batch_size=cfg.batch_size.panel_window,
parameterization=param_ranges,
)
domain.add_constraint(panel_window, "panel_window")
# low-resolution interior
lr_interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"equilibrium_x": 0.0,
"equilibrium_y": 0.0,
"stress_disp_xx": 0.0,
"stress_disp_yy": 0.0,
"stress_disp_xy": 0.0,
},
batch_size=cfg.batch_size.lr_interior,
bounds={x: bounds_x, y: bounds_y},
lambda_weighting={
"equilibrium_x": Symbol("sdf"),
"equilibrium_y": Symbol("sdf"),
"stress_disp_xx": Symbol("sdf"),
"stress_disp_yy": Symbol("sdf"),
"stress_disp_xy": Symbol("sdf"),
},
parameterization=param_ranges,
)
domain.add_constraint(lr_interior, "lr_interior")
# high-resolution interior
hr_interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=hr_geo,
outvar={
"equilibrium_x": 0.0,
"equilibrium_y": 0.0,
"stress_disp_xx": 0.0,
"stress_disp_yy": 0.0,
"stress_disp_xy": 0.0,
},
batch_size=cfg.batch_size.hr_interior,
bounds={x: hr_bounds_x, y: hr_bounds_y},
lambda_weighting={
"equilibrium_x": Symbol("sdf"),
"equilibrium_y": Symbol("sdf"),
"stress_disp_xx": Symbol("sdf"),
"stress_disp_yy": Symbol("sdf"),
"stress_disp_xy": Symbol("sdf"),
},
parameterization=param_ranges,
)
domain.add_constraint(hr_interior, "hr_interior")
# add inferencer data
invar_numpy = geo.sample_interior(
100000,
bounds={x: bounds_x, y: bounds_y},
parameterization=inference_param_ranges,
)
point_cloud_inference = PointwiseInferencer(
nodes=nodes,
invar=invar_numpy,
output_names=["u", "v", "sigma_xx", "sigma_yy", "sigma_xy"],
batch_size=4096,
)
domain.add_inferencer(point_cloud_inference, "inf_data")
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/fuselage_panel/panel.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.key import Key
from modulus.sym.eq.pdes.diffusion import Diffusion
from modulus.sym.utils.vpinn.test_functions import (
RBF_Function,
Test_Function,
Legendre_test,
Trig_test,
)
from modulus.sym.utils.vpinn.integral import tensor_int, Quad_Rect, Quad_Collection
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
VariationalConstraint,
)
from modulus.sym.dataset import DictVariationalDataset
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.loss import Loss
from sympy import Symbol, Heaviside, Eq
import numpy as np
import quadpy
# custom variational loss
class DGLoss(Loss):
def __init__(self, test_function):
super().__init__()
# make test function
self.test_function = test_function
if test_function == "rbf":
self.v = RBF_Function(dim=2, diff_list=["grad"])
self.eps = 10.0
elif test_function == "legendre":
self.v = Test_Function(
name_ord_dict={
Legendre_test: [k for k in range(10)],
Trig_test: [k for k in range(5)],
},
diff_list=["grad"],
)
def forward(
self,
list_invar,
list_outvar,
step: int,
):
# calculate test function
if self.test_function == "rbf":
v_outside = self.v.eval_test(
"v",
x=list_invar[0]["x"],
y=list_invar[0]["y"],
x_center=list_invar[3]["x"],
y_center=list_invar[3]["y"],
eps=self.eps,
)
v_center = self.v.eval_test(
"v",
x=list_invar[1]["x"],
y=list_invar[1]["y"],
x_center=list_invar[3]["x"],
y_center=list_invar[3]["y"],
eps=self.eps,
)
v_interior = self.v.eval_test(
"v",
x=list_invar[2]["x"],
y=list_invar[2]["y"],
x_center=list_invar[3]["x"],
y_center=list_invar[3]["y"],
eps=self.eps,
)
vx_interior = self.v.eval_test(
"vx",
x=list_invar[2]["x"],
y=list_invar[2]["y"],
x_center=list_invar[3]["x"],
y_center=list_invar[3]["y"],
eps=self.eps,
)
vy_interior = self.v.eval_test(
"vy",
x=list_invar[2]["x"],
y=list_invar[2]["y"],
x_center=list_invar[3]["x"],
y_center=list_invar[3]["y"],
eps=self.eps,
)
elif self.test_function == "legendre":
v_outside = self.v.eval_test(
"v", x=list_invar[0]["x"], y=list_invar[0]["y"]
)
v_center = self.v.eval_test("v", x=list_invar[1]["x"], y=list_invar[1]["y"])
v_interior = self.v.eval_test(
"v", x=list_invar[2]["x"], y=list_invar[2]["y"]
)
vx_interior = self.v.eval_test(
"vx", x=list_invar[2]["x"], y=list_invar[2]["y"]
)
vy_interior = self.v.eval_test(
"vy", x=list_invar[2]["x"], y=list_invar[2]["y"]
)
# calculate du/dn on surface
dudn = (
list_invar[0]["normal_x"] * list_outvar[0]["u__x"]
+ list_invar[0]["normal_y"] * list_outvar[0]["u__y"]
)
# form integrals of interior
f = -2.0
uxvx = list_outvar[2]["u__x"] * vx_interior
uyvy = list_outvar[2]["u__y"] * vy_interior
fv = f * v_interior
# calculate integrals
int_outside = tensor_int(list_invar[0]["area"], v_outside, dudn)
int_center = tensor_int(list_invar[1]["area"], 2.0 * v_center)
int_interior = tensor_int(list_invar[2]["area"], uxvx + uyvy - fv)
losses = {
"variational_poisson": torch.abs(int_interior - int_center - int_outside)
.pow(2)
.sum()
}
return losses
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
df = Diffusion(T="u", D=1.0, Q=-2.0, dim=2, time=False)
dg_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = df.make_nodes() + [dg_net.make_node(name="dg_net")]
# add constraints to solver
x, y = Symbol("x"), Symbol("y")
# make geometry
rec_1 = Rectangle((0, 0), (0.5, 1))
rec_2 = Rectangle((0.5, 0), (1, 1))
rec = rec_1 + rec_2
# make training domain for traditional PINN
eps = 0.02
rec_pinn = Rectangle((0 + eps, 0 + eps), (0.5 - eps, 1 - eps)) + Rectangle(
(0.5 + eps, 0 + eps), (1 - eps, 1 - eps)
)
# make domain
domain = Domain()
# PINN constraint
# interior = PointwiseInteriorConstraint(
# nodes=nodes,
# geometry=rec_pinn,
# outvar={"diffusion_u": 0},
# batch_size=4000,
# bounds={x: (0 + eps, 1 - eps), y: (0 + eps, 1 - eps)},
# lambda_weighting={"diffusion_u": 1.},
# )
# domain.add_constraint(interior, "interior")
# exterior boundary
g = ((x - 1) ** 2 * Heaviside(x - 0.5)) + (x**2 * Heaviside(-x + 0.5))
boundary = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": g},
batch_size=cfg.batch_size.boundary,
lambda_weighting={"u": 10.0}, # weight edges to be zero
criteria=~Eq(x, 0.5),
)
domain.add_constraint(boundary, "boundary")
batch_per_epoch = 100
variational_datasets = {}
batch_sizes = {}
# Middle line boundary
invar = rec.sample_boundary(
batch_per_epoch * cfg.batch_size.boundary, criteria=~Eq(x, 0.5)
)
invar["area"] *= batch_per_epoch
variational_datasets["boundary1"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y"],
)
batch_sizes["boundary1"] = cfg.batch_size.boundary
# Middle line boundary
invar = rec_1.sample_boundary(
batch_per_epoch * cfg.batch_size.boundary, criteria=Eq(x, 0.5)
)
invar["area"] *= batch_per_epoch
variational_datasets["boundary2"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x"],
)
batch_sizes["boundary2"] = cfg.batch_size.boundary
# Interior points
if cfg.training.use_quadratures:
paras = [
[
[[0, 0.5], [0, 1]],
20,
True,
lambda n: quadpy.c2.product(quadpy.c1.gauss_legendre(n)),
],
[
[[0.5, 1], [0, 1]],
20,
True,
lambda n: quadpy.c2.product(quadpy.c1.gauss_legendre(n)),
],
]
quad_rec = Quad_Collection(Quad_Rect, paras)
invar = {
"x": quad_rec.points_numpy[:, 0:1],
"y": quad_rec.points_numpy[:, 1:2],
"area": np.expand_dims(quad_rec.weights_numpy, -1),
}
variational_datasets["interior"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y"],
)
batch_sizes["interior"] = min(
[quad_rec.points_numpy.shape[0], cfg.batch_size.interior]
)
else:
invar = rec.sample_interior(
batch_per_epoch * cfg.batch_size.interior,
bounds=Bounds({x: (0.0, 1.0), y: (0.0, 1.0)}),
)
invar["area"] *= batch_per_epoch
variational_datasets["interior"] = DictVariationalDataset(
invar=invar,
outvar_names=["u__x", "u__y"],
)
batch_sizes["interior"] = cfg.batch_size.interior
# Add points for RBF
if cfg.training.test_function == "rbf":
invar = rec.sample_interior(
batch_per_epoch * cfg.batch_size.rbf_functions,
bounds=Bounds({x: (0.0, 1.0), y: (0.0, 1.0)}),
)
invar["area"] *= batch_per_epoch
variational_datasets["rbf"] = DictVariationalDataset(
invar=invar,
outvar_names=[],
)
batch_sizes["rbf"] = cfg.batch_size.rbf_functions
variational_constraint = VariationalConstraint(
datasets=variational_datasets,
batch_sizes=batch_sizes,
nodes=nodes,
num_workers=1,
loss=DGLoss(cfg.training.test_function),
)
domain.add_constraint(variational_constraint, "variational")
# add validation data
delta_x = 0.01
delta_y = 0.01
x0 = np.arange(0, 1, delta_x)
y0 = np.arange(0, 1, delta_y)
x_grid, y_grid = np.meshgrid(x0, y0)
x_grid = np.expand_dims(x_grid.flatten(), axis=-1)
y_grid = np.expand_dims(y_grid.flatten(), axis=-1)
u = np.where(x_grid <= 0.5, x_grid**2, (x_grid - 1) ** 2)
invar_numpy = {"x": x_grid, "y": y_grid}
outvar_numpy = {"u": u}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=invar_numpy,
true_outvar=outvar_numpy,
plotter=ValidatorPlotter(),
)
domain.add_validator(openfoam_validator)
# add inferencer data
inferencer = PointwiseInferencer(
nodes=nodes,
invar=invar_numpy,
output_names=["u"],
batch_size=2048,
plotter=InferencerPlotter(),
)
domain.add_inferencer(inferencer)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/discontinuous_galerkin/dg/dg.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import hydra
from omegaconf import DictConfig, OmegaConf
import modulus.sym
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.key import Key
from modulus.sym.eq.pdes.diffusion import Diffusion
from modulus.sym.utils.vpinn.test_functions import (
Test_Function,
Legendre_test,
Trig_test,
)
from modulus.sym.utils.vpinn.integral import tensor_int, Quad_Rect, Quad_Collection
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
VariationalDomainConstraint,
)
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.utils.io.plotter import InferencerPlotter
from modulus.sym.loss import Loss
from sympy import Symbol
from modulus.sym.constants import tf_dt
# custom variational loss
class DGLoss(Loss):
def __init__(self):
super().__init__()
# make test function
self.v = Test_Function(
name_ord_dict={
Legendre_test: [k for k in range(10)],
Trig_test: [k for k in range(5)],
},
box=[[-0.5, -0.5], [0.5, 0.5]],
diff_list=["grad"],
)
def forward(
self,
list_invar,
list_outvar,
step: int,
):
# calculate test function
v_outside = self.v.eval_test("v", x=list_invar[0]["x"], y=list_invar[0]["y"])
vx_interior = self.v.eval_test("vx", x=list_invar[1]["x"], y=list_invar[1]["y"])
vy_interior = self.v.eval_test("vy", x=list_invar[1]["x"], y=list_invar[1]["y"])
v_source = self.v.eval_test(
"v",
x=torch.zeros(1, 1, device=list_invar[1]["x"].device, dtype=tf_dt),
y=torch.zeros(1, 1, device=list_invar[1]["x"].device, dtype=tf_dt),
)
# calculate du/dn on surface
dudn = (
list_invar[0]["normal_x"] * list_outvar[0]["u__x"]
+ list_invar[0]["normal_y"] * list_outvar[0]["u__y"]
)
# form integrals of interior
uxvx = list_outvar[1]["u__x"] * vx_interior
uyvy = list_outvar[1]["u__y"] * vy_interior
fv = v_source
# calculate integrals
int_outside = tensor_int(list_invar[0]["area"], v_outside, dudn)
int_interior = tensor_int(list_invar[1]["area"], uxvx + uyvy) - fv
losses = {
"variational_poisson": torch.abs(int_interior - int_outside).pow(2).sum()
}
return losses
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
df = Diffusion(T="u", D=1.0, dim=2, time=False)
dg_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u")],
cfg=cfg.arch.fully_connected,
)
nodes = df.make_nodes() + [dg_net.make_node(name="dg_net")]
# add constraints to solver
x, y = Symbol("x"), Symbol("y")
# make geometry
rec = Rectangle((-0.5, -0.5), (0.5, 0.5))
# make domain
domain = Domain()
Wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0.0},
lambda_weighting={"u": 10.0},
batch_size=cfg.batch_size.boundary,
fixed_dataset=False,
batch_per_epoch=1,
quasirandom=True,
)
domain.add_constraint(Wall, name="OutsideWall")
# PINN constraint
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"diffusion_u": 0.0},
batch_size=cfg.batch_size.interior,
bounds=Bounds({x: (-0.5, 0.5), y: (-0.5, 0.5)}),
lambda_weighting={"diffusion_u": (x**2 + y**2)},
fixed_dataset=False,
batch_per_epoch=1,
quasirandom=True,
)
domain.add_constraint(interior, "interior")
# Variational contraint
variational = VariationalDomainConstraint(
nodes=nodes,
geometry=rec,
outvar_names=["u__x", "u__y"],
boundary_batch_size=cfg.batch_size.boundary,
interior_batch_size=cfg.batch_size.interior,
interior_bounds=Bounds({x: (-0.5, 0.5), y: (-0.5, 0.5)}),
loss=DGLoss(),
batch_per_epoch=1,
quasirandom=True,
)
domain.add_constraint(variational, "variational")
# add inferencer data
inferencer = PointwiseInferencer(
nodes=nodes,
invar=rec.sample_interior(10000),
output_names=["u"],
batch_size=2048,
plotter=InferencerPlotter(),
)
domain.add_inferencer(inferencer)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/discontinuous_galerkin/point_source/point_source.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq, Abs
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.utils.io import (
csv_to_dict,
ValidatorPlotter,
InferencerPlotter,
)
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = ns.make_nodes() + [flow_net.make_node(name="flow_network")]
# add constraints to solver
# make geometry
height = 0.1
width = 0.1
x, y = Symbol("x"), Symbol("y")
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make ldc domain
ldc_domain = Domain()
# top wall
top_wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 1.0, "v": 0},
batch_size=cfg.batch_size.TopWall,
lambda_weighting={"u": 1.0 - 20 * Abs(x), "v": 1.0}, # weight edges to be zero
criteria=Eq(y, height / 2),
)
ldc_domain.add_constraint(top_wall, "top_wall")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.NoSlip,
criteria=y < height / 2,
)
ldc_domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.Interior,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
)
ldc_domain.add_constraint(interior, "interior")
# add validator
file_path = "openfoam/cavity_uniformVel0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += -width / 2 # center OpenFoam data
openfoam_var["y"] += -height / 2 # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
plotter=ValidatorPlotter(),
)
ldc_domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=1024,
plotter=InferencerPlotter(),
)
ldc_domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, ldc_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/ldc/ldc_2d.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq, Abs
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.models.fully_connected import FullyConnectedArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.constraint import Constraint
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.graph import Graph
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ns = NavierStokes(nu=0.01, rho=1.0, dim=2, time=False)
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = ns.make_nodes() + [flow_net.make_node(name="flow_network")]
# make importance model
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
importance_model_graph = Graph(
nodes,
invar=[Key("x"), Key("y")],
req_names=[
Key("u", derivatives=[Key("x")]),
Key("u", derivatives=[Key("y")]),
Key("v", derivatives=[Key("x")]),
Key("v", derivatives=[Key("y")]),
],
).to(device)
def importance_measure(invar):
outvar = importance_model_graph(
Constraint._set_device(invar, device=device, requires_grad=True)
)
importance = (
outvar["u__x"] ** 2
+ outvar["u__y"] ** 2
+ outvar["v__x"] ** 2
+ outvar["v__y"] ** 2
) ** 0.5 + 10
return importance.cpu().detach().numpy()
# add constraints to solver
# make geometry
height = 0.1
width = 0.1
x, y = Symbol("x"), Symbol("y")
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make ldc domain
ldc_domain = Domain()
# top wall
top_wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 1.0, "v": 0},
batch_size=cfg.batch_size.TopWall,
lambda_weighting={"u": 1.0 - 20 * Abs(x), "v": 1.0}, # weight edges to be zero
criteria=Eq(y, height / 2),
importance_measure=importance_measure,
)
ldc_domain.add_constraint(top_wall, "top_wall")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.NoSlip,
criteria=y < height / 2,
importance_measure=importance_measure,
)
ldc_domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.Interior,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
importance_measure=importance_measure,
)
ldc_domain.add_constraint(interior, "interior")
# add validator
file_path = "openfoam/cavity_uniformVel0.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {"Points:0": "x", "Points:1": "y", "U:0": "u", "U:1": "v", "p": "p"}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += -width / 2 # center OpenFoam data
openfoam_var["y"] += -height / 2 # center OpenFoam data
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
plotter=ValidatorPlotter(),
)
ldc_domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p"],
batch_size=1024,
plotter=InferencerPlotter(),
)
ldc_domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, ldc_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/ldc/ldc_2d_importance_sampling.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from sympy import Symbol, Eq, Abs
import torch
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.utils.io.plotter import ValidatorPlotter, InferencerPlotter
from modulus.sym.key import Key
@modulus.sym.main(config_path="conf_zeroEq", config_name="config")
def run(cfg: ModulusConfig) -> None:
# add constraints to solver
# make geometry
height = 0.1
width = 0.1
x, y = Symbol("x"), Symbol("y")
rec = Rectangle((-width / 2, -height / 2), (width / 2, height / 2))
# make list of nodes to unroll graph on
ze = ZeroEquation(nu=1e-4, dim=2, time=False, max_distance=height / 2)
ns = NavierStokes(nu=ze.equations["nu"], rho=1.0, dim=2, time=False)
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes() + ze.make_nodes() + [flow_net.make_node(name="flow_network")]
)
# make ldc domain
ldc_domain = Domain()
# top wall
top_wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 1.5, "v": 0},
batch_size=cfg.batch_size.TopWall,
lambda_weighting={"u": 1.0 - 20 * Abs(x), "v": 1.0}, # weight edges to be zero
criteria=Eq(y, height / 2),
)
ldc_domain.add_constraint(top_wall, "top_wall")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=rec,
outvar={"u": 0, "v": 0},
batch_size=cfg.batch_size.NoSlip,
criteria=y < height / 2,
)
ldc_domain.add_constraint(no_slip, "no_slip")
# interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=rec,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.Interior,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
)
ldc_domain.add_constraint(interior, "interior")
# add validator
file_path = "openfoam/cavity_uniformVel_zeroEqn_refined.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"U:0": "u",
"U:1": "v",
"p": "p",
"d": "sdf",
"nuT": "nu",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["x"] += -width / 2 # center OpenFoam data
openfoam_var["y"] += -height / 2 # center OpenFoam data
openfoam_var["nu"] += 1e-4 # effective viscosity
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "sdf"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "nu"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
batch_size=1024,
plotter=ValidatorPlotter(),
requires_grad=True,
)
ldc_domain.add_validator(openfoam_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=openfoam_invar_numpy,
output_names=["u", "v", "p", "nu"],
batch_size=1024,
plotter=InferencerPlotter(),
requires_grad=True,
)
ldc_domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# add monitors
global_monitor = PointwiseMonitor(
rec.sample_interior(4000),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
ldc_domain.add_monitor(global_monitor)
# make solver
slv = Solver(cfg, ldc_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/ldc/ldc_2d_zeroEq.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.sym.geometry.tessellation import Tessellation
from modulus.sym.geometry.primitives_3d import Plane
from modulus.sym.utils.io.vtk import var_to_polyvtk
if __name__ == "__main__":
# number of points to sample
nr_points = 100000
# make tesselated geometry from stl file
geo = Tessellation.from_stl("./stl_files/tessellated_example.stl")
# tesselated geometries can be combined with primitives
cut_plane = Plane((0, -1, -1), (0, 1, 1))
geo = geo & cut_plane
# sample geometry for plotting in Paraview
s = geo.sample_boundary(nr_points=nr_points)
var_to_polyvtk(s, "tessellated_boundary")
print("Repeated Surface Area: {:.3f}".format(np.sum(s["area"])))
s = geo.sample_interior(nr_points=nr_points, compute_sdf_derivatives=True)
var_to_polyvtk(s, "tessellated_interior")
print("Repeated Volume: {:.3f}".format(np.sum(s["area"])))
| modulus-sym-main | examples/geometry/tessellated_example.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.