python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import json
import ipyvuetify as v
from .appLoader import AppLoader
from .common import load_template
# update the CSS a bit
# get_ipython().run_cell_magic(
# "HTML",
# "",
# "<style>\n.jp-Cell {\n margin:unset;\n padding: unset;\n}\n.jp-Cell:not(.jp-mod-noOutputs) .jp-Cell-outputWrapper{\n margin:unset;\n}\n.jp-Notebook {\n margin:unset;\n padding: unset;\n}\n.p-Widget {\n width: 100%;\n}\n</style>",
# )
# load the app configuration
with open("app.json") as f:
j = json.load(f)
theapp = AppLoader(j)
# with theapp.app_output:
display(theapp)
| modulus-toolchain-master | mpc/nvapp/app.py |
import ipyvuetify as v
import traitlets as tr
from .common import load_template, reload_module
import ipywidgets as ipw
import time
class AppLoader(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/app-loader.vue")).tag(sync=True)
apps = tr.List(["widget1", "widget2"]).tag(sync=True)
selected_app = tr.Unicode("").tag(sync=True)
info = tr.Unicode("").tag(sync=True)
app_dialog = tr.Bool(False).tag(sync=True)
wsz = tr.Dict({}).tag(sync=True)
output_dialog = tr.Bool(False).tag(sync=True)
loading_app = tr.Bool(False).tag(sync=True)
loading_app_result = tr.Unicode("").tag(sync=True)
def __init__(self, appcfg, *ag, **kargs):
super().__init__(*ag, **kargs)
self.appcfg = appcfg
self.app_box = ipw.VBox([])
self.app_output = ipw.Output()
self.components = {"mycontent": self.app_box, "app-output": self.app_output}
self.apps = self.appcfg["apps"]
self.selected_app = self.apps[0]
def vue_clear_output(self, data):
self.app_output.clear_output()
def vue_reload(self, data):
try:
self.app_output.clear_output()
self.loading_app_result = ""
self.loading_app = True
with self.app_output:
try:
t0 = time.time()
print(f"Loading {self.selected_app}")
self.info = ""
mod = reload_module(self.selected_app)
self.app = mod.new(output=self.app_output)
self.app_box.children = (self.app,)
self.loading_app_result = "success"
t1 = time.time()
print(f"Loaded {self.selected_app} in {t1-t0:.1f} s")
except Exception as e:
self.loading_app_result = "error"
self.info = f"Error: check out the output"
raise e
finally:
self.loading_app = False
except Exception as e:
self.info = f"{e} "
finally:
self.loading_app = False
def vue_pressed_r(self, data):
self.selected_app = "Pressed R" + str(data)
if data == "h":
self.m.center = [-70, 10]
| modulus-toolchain-master | mpc/nvapp/appLoader.py |
from .app import new
| modulus-toolchain-master | mpc/demoYAMLv1/__init__.py |
import os, time
import ipywidgets as ipw
import ipyvuetify as v
import time
import traitlets as tr
import os
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
var2name = {"lr": "Learning Rate"}
var2hint = {
"eps": "Numerical threshold below which numbers are considered 0",
"betas": "Need two fo those; inside [0,1]",
}
def load_config():
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path) as f:
return yaml.safe_load(f)
def save_config(conf):
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path, "w") as f:
return yaml.safe_dump(conf, f)
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
def config2dict(ctype):
choice = list(config_types[ctype].keys())[0]
conf = config_types[ctype][choice]
d = {k: v["default"] for k, v in conf.items() if "default" in v}
return d
def config2dictV2(ctype):
d = {}
for k, v in ctype.items():
if v["type"] == "option":
assert v["default"] in v["choices"], f"wrong default in {k}"
d[k] = config2dictV2(v["choices"][v["default"]])
d[k]["__selected__"] = v["default"]
elif v["type"] == "group":
d[k] = config2dictV2(v["default"])
else:
d[k] = v["default"]
return d
def config2UI(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-option.vue"
fname_grp = "vue-templates/config-from-yaml-group.vue"
fname_bas = "vue-templates/config-from-yaml-base.vue"
l = []
for k, v in schema.items():
print("[config2UI]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
else: # base
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
return l
def config2UIfast(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-fast.vue"
l = []
for k, v in schema.items():
print("[config2UIfast]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UIfast(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UIfast(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
# else: # base
# w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
# l.append(w)
return l
# config_types = load_yaml("config_types.yaml")
class ConfigFromYAMLClass(v.VuetifyTemplate):
template = tr.Unicode(
load_template("vue-templates/config-from-yaml-option.vue")
).tag(sync=True)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
key = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
def __init__(self, key, cdict, schema, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
self.template = load_template(template_path)
self.cdict = cdict
self.schema = schema
self.key = key
self.vbox = ipw.VBox([])
self.components = {"yaml-items": self.vbox}
def vue_update_cdict(self, data):
with self.app.output:
print("update", self.key)
self.app.update_cdict()
def vue_update_choice(self, selection):
with self.app.output:
print("selection", selection)
print("before")
print(self.cdict)
print("-" * 20)
self.schema[self.key]["default"] = selection
nschema = self.schema[self.key]["choices"][selection]
self.cdict = config2dictV2(self.schema)
print(self.cdict)
self.vbox.children = config2UIfast(nschema, self.cdict[self.key], self.app)
class ConfigParent(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/config-parent.vue")).tag(
sync=True
)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
stageid = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
show_cdict = tr.Bool(False).tag(sync=True)
def __init__(self, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
schema = load_yaml("config_types_v2.yaml")
# customize schema to reflect project
arch = schema["arch"].copy()
del schema["arch"]
eqn_list = [f"eq{i}" for i in range(5)]
nn_list = ["net1", "net2"]
for nn_var in nn_list:
schema[nn_var] = arch.copy()
schema[nn_var]["label"] = f"[{nn_var}] NN Architecture"
# equation constraints
eqc = {"type": "group", "default": {}}
for eqn in eqn_list:
eqc["default"][eqn] = {"type": "bool", "default": True}
schema["Equation Constraints"] = eqc
eqc = {"type": "group", "default": {}}
for nn_var in nn_list:
eqc["default"][f"{nn_var} Trainable"] = {"type": "bool", "default": True}
schema["Neural Networks"] = eqc
self.schema = schema
######
self.cdict = config2dictV2(schema)
self.components = {"stage-config": ipw.VBox(), "stage-config-fast": ipw.VBox()}
t0 = time.time()
self.components["stage-config"].children = config2UI(
schema, self.cdict, self.app
)
t1 = time.time()
self.components["stage-config-fast"].children = config2UIfast(
schema, self.cdict, self.app
)
t2 = time.time()
print(f"stage-config {t1-t0:.3f} s")
print(f"stage-config-fast {t2-t1:.3f} s")
def select_stage(self, stageid):
import json
self.stageid = stageid
stage = self.app.config["modulus_project"]["training"]["stages"][stageid]
if "data" not in stage:
stage_data = config2dictV2(self.schema.copy())
else:
stage_data = stage["data"]
self.cdict = json.loads(json.dumps(stage_data))
self.components["stage-config-fast"].children = config2UIfast(
schema, self.cdict, self.app
)
# self.components["stage-config"].children = config2UIfast(
# self.schema, self.cdict, self.app
# )
def vue_update_cdict(self, data):
with self.app.output:
def update_cdict(w):
"Recursively update the chain of dicts"
if w.schema[w.key]["type"] in ["option", "group"]:
for ww in w.vbox.children:
if ww.schema[ww.key]["type"] in ["int", "float"]:
fn = eval(ww.schema[ww.key]["type"])
ww.cdict[ww.key] = fn(ww.cdict[ww.key])
w.cdict[w.key][ww.key] = ww.cdict[ww.key]
update_cdict(ww)
d = self.cdict.copy()
for w in self.components["stage-config"].children:
update_cdict(w)
d[w.key] = w.cdict[w.key]
self.cdict = {}
self.cdict = d
class App(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/app.vue")).tag(sync=True)
# projname = tr.Unicode(os.environ["MPC_PROJECT_PATH"]).tag(sync=True)
# config = tr.Dict(load_config()).tag(sync=True)
# v2n = tr.Dict(var2name).tag(sync=True)
# v2hint = tr.Dict(var2hint).tag(sync=True)
# loading_problem = tr.Bool(False).tag(sync=True)
problemstr = tr.Unicode("").tag(sync=True)
problem_dialog = tr.Bool(False).tag(sync=True)
def __init__(self, *ag, output=None, **kargs):
super().__init__(*ag, **kargs)
self.output = output # for error and other messages
self.components = {
"config": ConfigParent(app=self),
}
def refresh_config(self):
self.config = load_config()
def vue_load_problem(self, data):
self.loading_problem = True
t0 = time.time()
res = os.popen("cd $MPC_PROJECT_PATH; mtc show problem").read()
t1 = time.time()
self.problemstr = (
f"[{os.environ['MPC_PROJECT_PATH']}] Fetched in {t1-t0:.3f} s\n" + res
)
self.loading_problem = False
def new(output=None):
"""Creates a new app"""
return App(output=output)
| modulus-toolchain-master | mpc/demoYAMLv1/app.py |
from .app import new
| modulus-toolchain-master | mpc/mpc/__init__.py |
import os, time
import ipywidgets as ipw
import ipyvuetify as v
import time
import traitlets as tr
import os
import sys
sys.path.append(os.environ["MPC_PATH"] + "../mtc")
print(sys.path)
from mtc.config_utils import customize_schema, config2dictV2
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
var2name = {"lr": "Learning Rate"}
var2hint = {
"eps": "Numerical threshold below which numbers are considered 0",
"betas": "Need two fo those; inside [0,1]",
}
def load_config():
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path) as f:
return yaml.safe_load(f)
def save_config(conf):
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path, "w") as f:
return yaml.safe_dump(conf, f)
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
def config2dict(ctype):
choice = list(config_types[ctype].keys())[0]
conf = config_types[ctype][choice]
d = {k: v["default"] for k, v in conf.items() if "default" in v}
return d
# def config2dictV2(ctype):
# d = {}
# for k, v in ctype.items():
# if v["type"] == "option":
# assert v["default"] in v["choices"], f"wrong default in {k}"
# d[k] = config2dictV2(v["choices"][v["default"]])
# d[k]["__selected__"] = v["default"]
# elif v["type"] == "group":
# d[k] = config2dictV2(v["default"])
# else:
# d[k] = v["default"]
# return d
def config2UI(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-fast.vue"
l = []
for k, v in schema.items():
print("[config2UIfast]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
return l
def config2UIold(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-option.vue"
fname_grp = "vue-templates/config-from-yaml-group.vue"
fname_bas = "vue-templates/config-from-yaml-base.vue"
l = []
for k, v in schema.items():
print("[config2UI]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
else: # base
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
return l
config_types = load_yaml("config_types.yaml")
class ConfigFromYAMLClass(v.VuetifyTemplate):
template = tr.Unicode(
load_template("vue-templates/config-from-yaml-option.vue")
).tag(sync=True)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
key = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
def __init__(self, key, cdict, schema, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
self.template = load_template(template_path)
self.cdict = cdict
self.schema = schema
self.key = key
self.vbox = ipw.VBox([])
self.components = {"yaml-items": self.vbox}
def vue_update_cdict(self, data):
with self.app.output:
print("update", self.key)
self.app.update_cdict()
def vue_update_choice(self, selection):
with self.app.output:
print("selection", selection)
print("before")
print(self.cdict)
print("-" * 20)
self.schema[self.key]["default"] = selection
nschema = self.schema[self.key]["choices"][selection]
self.cdict = config2dictV2(self.schema)
print(self.cdict)
self.vbox.children = config2UI(nschema, self.cdict[self.key], self.app)
class ConfigStage(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/config-stage.vue")).tag(
sync=True
)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
stageid = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
show_cdict = tr.Bool(False).tag(sync=True)
def __init__(self, stageid, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
self.stageid = stageid
# schema = load_yaml("config_types_v2.yaml")
# # customize schema to reflect project
# arch = schema["arch"].copy()
# del schema["arch"]
# for nn_var in self.app.config["modulus_project"]["neural_networks"]:
# schema[nn_var] = arch.copy()
# schema[nn_var]["label"] = f"[{nn_var}] NN Architecture"
# # equation constraints
# constraints = schema["constraints"].copy()
# del schema["constraints"]
# cstr = {}
# eqc = {"type": "group", "default": {}}
# import json
# for eqn in self.app.config["modulus_project"]["equation_constraints"].keys():
# # eqc["default"][eqn] = {"type": "bool", "default": True}
# # eqc["default"][eqn] = {
# # "type": "group",
# # "label": eqn,
# # "default": {"include": {"type": "bool", "default": True}},
# # }
# eqc["default"][eqn] = json.loads(json.dumps(constraints))
# eqc["default"][eqn]["label"] = f"{eqn}"
# schema["Equation Constraints"] = eqc
# eqc = {"type": "group", "default": {}}
# for nn_var in self.app.config["modulus_project"]["neural_networks"]:
# eqc["default"][f"{nn_var} Trainable"] = {"type": "bool", "default": True}
# schema["Neural Networks"] = eqc
# self.schema = schema
schema = customize_schema(path=os.environ["MPC_PROJECT_PATH"])
self.schema = schema
######
self.cdict = config2dictV2(schema)
from pprint import pprint
pprint(self.cdict)
self.components = {
"stage-config": ipw.VBox()
# "stage-config": ipw.VBox(config2UI(schema, self.cdict, self.app))
}
print("done", self.__class__)
def select_stage(self, stageid):
import json
self.stageid = stageid
stage = self.app.config["modulus_project"]["training"]["stages"][stageid]
if "data" not in stage:
stage_data = config2dictV2(self.schema.copy())
else:
stage_data = stage["data"]
self.cdict = json.loads(json.dumps(stage_data))
t0 = time.time()
print("inside select_stage", self.__class__)
self.components["stage-config"].children = config2UI(
self.schema, self.cdict, self.app
)
print(
"[finished] inside select_stage", self.__class__, f"{time.time()-t0:.3f} s"
)
def vue_update_cdict(self, data):
with self.app.output:
# def update_cdict(w):
# "Recursively update the chain of dicts"
# if w.schema[w.key]["type"] in ["option", "group"]:
# for ww in w.vbox.children:
# if ww.schema[ww.key]["type"] in ["int", "float"]:
# fn = eval(ww.schema[ww.key]["type"])
# ww.cdict[ww.key] = fn(ww.cdict[ww.key])
# w.cdict[w.key][ww.key] = ww.cdict[ww.key]
# update_cdict(ww)
# def update_cdict(w):
# "Recursively update the chain of dicts"
# if w.schema[w.key]["type"] in ["option", "group"]:
# if w.schema[w.key]["type"] == "group":
# for skey, sschema in w.schema[w.key]["default"].items():
# if sschema["type"] in ["int", "float"]:
# fn = eval(sschema["type"])
# w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
# for ww in w.vbox.children:
# # print(ww)
# # if ww.schema[ww.key]["type"] in ["int", "float"]:
# # fn = eval(ww.schema[ww.key]["type"])
# # ww.cdict[ww.key] = fn(ww.cdict[ww.key])
# # w.cdict[w.key][ww.key] = ww.cdict[ww.key]
# update_cdict(ww)
def update_cdict(w):
"Recursively update the chain of dicts"
if w.schema[w.key]["type"] in ["option", "group"]:
if w.schema[w.key]["type"] == "group":
for skey, sschema in w.schema[w.key]["default"].items():
if sschema["type"] in ["int", "float", "bool"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
if w.schema[w.key]["type"] == "option":
for skey, sschema in w.schema[w.key]["choices"][
w.cdict[w.key]["__selected__"]
].items():
if sschema["type"] in ["int", "float"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
sschema = w.schema[w.key]
# print("base case", w.key, sschema["type"])
if sschema["type"] in ["int", "float"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
# print(w.key, len(w.vbox.children))
for ww in w.vbox.children:
update_cdict(ww)
w.cdict[w.key][ww.key] = ww.cdict[ww.key]
d = self.cdict.copy()
for w in self.components["stage-config"].children:
update_cdict(w)
d[w.key] = w.cdict[w.key]
self.cdict = {}
self.cdict = d
class TrainingSelectedStage(v.VuetifyTemplate):
template = tr.Unicode(
load_template("vue-templates/training-selected-stage.vue")
).tag(sync=True)
stage = tr.Dict({"lr": 0.1}).tag(sync=True)
opt_schema = tr.Dict(config_types["optimizer"]["adam"]).tag(sync=True)
sched_schema = tr.Dict(config_types["scheduler"]["default"]).tag(sync=True)
stage_id = tr.Unicode("stage1").tag(sync=True)
def __init__(self, *ag, parent=None, app=None, **kargs):
super().__init__(*ag, **kargs)
self.app = app
self.parent = parent
self.components = {"config-stage": ConfigStage(self.stage_id, app=app)}
self._first_selection = True
self.select_stage(self.stage_id)
def sync_conf(self):
cs = self.components["config-stage"]
cs.vue_update_cdict(0)
self.stage["data"] = cs.cdict
self.app.config["modulus_project"]["training"]["stages"][
self.stage_id
] = self.stage.copy()
self.parent.not_saved = True
def select_stage(self, stageid):
with self.app.output:
# first, update the values from the UI
if not self._first_selection:
self.sync_conf()
else:
self._first_selection = False
self.stage_id = stageid
trconf = self.app.config["modulus_project"]["training"]
self.stage = trconf["stages"][self.stage_id]
for field in ["optimizer", "scheduler"]:
if field not in self.stage:
self.stage[field] = config2dict(field)
cs = self.components["config-stage"]
cs.select_stage(stageid)
self.stage = {}
self.stage = trconf["stages"][self.stage_id]
def vue_update(self, data):
with self.app.output:
self.sync_conf()
self.parent.update()
def vue_add_child_stage(self, data):
self.parent.vue_extend_stage(self.stage_id)
class TrainingStages(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/training-stages.vue")).tag(
sync=True
)
svgstr = tr.Unicode("").tag(sync=True)
stage_dag = tr.List([]).tag(sync=True)
stages = tr.List([]).tag(sync=True)
not_saved = tr.Bool(False).tag(sync=True)
editing_metadata = tr.Bool(False).tag(sync=True)
show_stage_dag = tr.Bool(True).tag(sync=True)
show_stage_ui = tr.Bool(True).tag(sync=True)
metadata = tr.Dict({}).tag(sync=True)
# nn_data = tr.Dict({}).tag(sync=True)
# nn_schema = tr.Dict({}).tag(sync=True)
# nn_types = tr.List([]).tag(sync=True)
def __init__(self, *ag, app=None, **kargs):
super().__init__(*ag, **kargs)
self.app = app
meta_schema = load_yaml("config_metadata.yaml")
if "modulus_project" not in self.app.config:
self.app.config["modulus_project"] = {}
cfg = self.app.config["modulus_project"]
if "metadata" in cfg:
meta_cdict = {"metadata": cfg["metadata"]}
else:
meta_cdict = config2dictV2(meta_schema)
self.metadata = meta_cdict["metadata"]
self.components = {
"project-metadata": ipw.VBox(config2UI(meta_schema, meta_cdict, self.app)),
"selected-stage": TrainingSelectedStage(parent=self, app=self.app),
}
self.meta_cdict = meta_cdict
self.update()
def vue_save_metadata(self, data):
with self.app.output:
def update_cdict(w):
"Recursively update the chain of dicts"
if w.schema[w.key]["type"] in ["option", "group"]:
for ww in w.vbox.children:
w.cdict[w.key][ww.key] = ww.cdict[ww.key]
update_cdict(ww)
d = self.meta_cdict.copy()
for w in self.components["project-metadata"].children:
update_cdict(w)
d[w.key] = w.cdict[w.key]
self.metadata = d["metadata"]
self.app.config["modulus_project"]["metadata"] = d["metadata"]
self.editing_metadata = False
self.not_saved = True
def vue_save_config_to_file(self, data):
self.components["selected-stage"].sync_conf()
self.update()
save_config(self.app.config)
self.not_saved = False
def vue_add_new_stage(self, data):
with self.app.output:
sn = f"stage{len(self.stages)+1}"
trconf = self.app.config["modulus_project"]["training"]
trconf["stages"][sn] = {"description": ""}
self.vue_select_stage(sn)
def vue_select_stage(self, stageid):
self.components["selected-stage"].select_stage(stageid)
self.update()
def vue_extend_stage(self, data):
with self.app.output:
sn = f"stage{len(self.stages)+1}"
trconf = self.app.config["modulus_project"]["training"]
# add new stage
trconf["stages"][sn] = {"description": ""}
# update DAG
trconf["stage-dag"] += [[data, sn]]
self.vue_select_stage(sn)
def update(self):
import graphviz as gv
with self.app.output:
trconf = self.app.config["modulus_project"]["training"]
self.stages = list(trconf["stages"].keys())
self.stage_dag = trconf["stage-dag"]
dot = gv.Digraph(comment="the round table")
dot.attr(rankdir="LR")
dot.attr("node", shape="box", style="rounded,filled")
dot.edges(trconf["stage-dag"])
for sname, stage in trconf["stages"].items():
fc = ""
if self.components["selected-stage"].stage_id == sname:
fc = "yellow"
dot.node(sname, sname + "\n" + stage["description"], fillcolor=fc)
self.svgstr = dot._repr_image_svg_xml()
class TrainingPlots(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/training-plots.vue")).tag(
sync=True
)
stages = tr.Dict({}).tag(sync=True)
not_saved = tr.Bool(False).tag(sync=True)
editing_metadata = tr.Bool(False).tag(sync=True)
metadata = tr.Dict({}).tag(sync=True)
def __init__(self, *ag, app=None, **kargs):
super().__init__(*ag, **kargs)
self.app = app
stages = {}
for stage in self.app.config["modulus_project"]["training"]["stages"].keys():
stages[stage] = {"plot": False, "trained": False}
stages[stage + "_sampled"] = {"plot": False, "trained": False}
self.stages = stages
self.outputwidget = ipw.Output()
with self.outputwidget:
import matplotlib.pyplot as plt
plt.close("all")
f = plt.figure(figsize=(5, 3))
self.figure = f
self.ax = plt.gca()
# self.components = {"plt-figure": self.outputwidget}
self.components = {"plt-figure": self.figure.canvas}
self.update_plot()
def vue_switch_stage(self, stage):
self.stages[stage]["plot"] = not self.stages[stage]["plot"]
self.update_plot()
def update_plot(self):
import os
import numpy as np
import matplotlib.pyplot as plt
with self.app.output:
self.ax.cla()
for stage in self.stages:
print(stage)
stage_path = os.path.join(
os.environ["MPC_PROJECT_PATH"], "training", stage.split("_")[0]
)
logfile = "train_sampled.log" if "_sampled" in stage else "train.log"
log_file = os.path.join(stage_path, "outputs", logfile)
file_exists = os.path.exists(log_file)
print(file_exists)
self.stages[stage]["trained"] = file_exists
if not self.stages[stage]["plot"]:
continue
try: # os.path.exists(log_file):
data = []
with open(log_file) as f:
file = f.read()
loss_lines = [l for l in file.split("\n") if "loss:" in l]
def line2float(s):
ns = s[s.find("[step:") :]
step = float(ns[ns.find(":") + 1 : ns.find("]")])
ns = s[s.find("loss") :]
loss = float(ns[ns.find(":") + 1 : ns.find(",")])
return (step, loss)
data = np.array([line2float(l) for l in loss_lines])
self.ax.semilogy(data[:, 0], data[:, 1], label=stage)
self.ax.set_xlabel("Step")
self.ax.set_ylabel("Avg Loss")
except Exception as e:
print(e)
self.ax.legend()
plt.tight_layout()
d = self.stages.copy()
self.stages = {}
self.stages = d
class App(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/app.vue")).tag(sync=True)
projname = tr.Unicode(os.environ["MPC_PROJECT_PATH"]).tag(sync=True)
config = tr.Dict(load_config()).tag(sync=True)
v2n = tr.Dict(var2name).tag(sync=True)
v2hint = tr.Dict(var2hint).tag(sync=True)
loading_problem = tr.Bool(False).tag(sync=True)
problemstr = tr.Unicode("").tag(sync=True)
problem_show_cmd = tr.Unicode("mtc show problem").tag(sync=True)
summary_type = tr.Unicode("Problem").tag(sync=True)
problem_dialog = tr.Bool(False).tag(sync=True)
def __init__(self, *ag, output=None, **kargs):
super().__init__(*ag, **kargs)
self.output = output # for error and other messages
self.components = {
"training": TrainingStages(app=self),
"training-plots": TrainingPlots(app=self),
}
# def update_cdict(self):
# # def update_cdict_working(cdict, w):
# # if w.schema[w.key]["type"] in ["option", "group"]:
# # for ww in w.vbox.children:
# # cdict[ww.key] = update_cdict(cdict[ww.key], ww)
# # else:
# # return w.cdict[w.key]
# # return cdict # cdict
# def update_cdict(w):
# "Recursively update the chain of dicts"
# if w.schema[w.key]["type"] in ["option", "group"]:
# for ww in w.vbox.children:
# w.cdict[w.key][ww.key] = ww.cdict[ww.key]
# update_cdict(ww)
# d = self.cdict.copy()
# for w in self.components["test"].children:
# update_cdict(w)
# d[w.key] = w.cdict[w.key]
# self.cdict = {}
# self.cdict = d
def refresh_config(self):
self.config = load_config()
def vue_load_show_training(self, data):
self.summary_type = "Training"
self.problem_show_cmd = "mtc show training"
self.loading_problem = True
t0 = time.time()
res = os.popen("cd $MPC_PROJECT_PATH; mtc show training").read()
t1 = time.time()
self.problemstr = (
f"[{os.environ['MPC_PROJECT_PATH']}] Fetched in {t1-t0:.3f} s\n" + res
)
self.loading_problem = False
def vue_load_problem(self, data):
self.summary_type = "Problem"
self.problem_show_cmd = "mtc show problem"
self.loading_problem = True
t0 = time.time()
res = os.popen("cd $MPC_PROJECT_PATH; mtc show problem").read()
t1 = time.time()
self.problemstr = (
f"[{os.environ['MPC_PROJECT_PATH']}] Fetched in {t1-t0:.3f} s\n" + res
)
self.loading_problem = False
def vue_load_problem_1storder(self, data):
self.summary_type = "Problem"
self.problem_show_cmd = "mtc show problem --only-first-order-ufunc"
self.loading_problem = True
t0 = time.time()
res = os.popen(
"cd $MPC_PROJECT_PATH; mtc show problem --only-first-order-ufunc"
).read()
t1 = time.time()
self.problemstr = (
f"[{os.environ['MPC_PROJECT_PATH']}] Fetched in {t1-t0:.3f} s\n" + res
)
self.loading_problem = False
def new(output=None):
"""Creates a new app"""
return App(output=output)
| modulus-toolchain-master | mpc/mpc/app.py |
from .app import new
| modulus-toolchain-master | mpc/demoYAMLv2/__init__.py |
import os, time
import ipywidgets as ipw
import ipyvuetify as v
import time
import traitlets as tr
import os
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
var2name = {"lr": "Learning Rate"}
var2hint = {
"eps": "Numerical threshold below which numbers are considered 0",
"betas": "Need two fo those; inside [0,1]",
}
def load_config():
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path) as f:
return yaml.safe_load(f)
def save_config(conf):
import yaml
conf_path = os.path.join(os.environ["MPC_PROJECT_PATH"], "conf", "config.yaml")
with open(conf_path, "w") as f:
return yaml.safe_dump(conf, f)
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
def config2dict(ctype):
choice = list(config_types[ctype].keys())[0]
conf = config_types[ctype][choice]
d = {k: v["default"] for k, v in conf.items() if "default" in v}
return d
def config2dictV2(ctype):
d = {}
for k, v in ctype.items():
if v["type"] == "option":
assert v["default"] in v["choices"], f"wrong default in {k}"
d[k] = config2dictV2(v["choices"][v["default"]])
d[k]["__selected__"] = v["default"]
elif v["type"] == "group":
d[k] = config2dictV2(v["default"])
else:
d[k] = v["default"]
return d
def config2UI(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-option.vue"
fname_grp = "vue-templates/config-from-yaml-group.vue"
fname_bas = "vue-templates/config-from-yaml-base.vue"
l = []
for k, v in schema.items():
print("[config2UI]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UI(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
else: # base
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
return l
def config2UIfast(schema, cdict, app, indent=""):
fname_opt = "vue-templates/config-from-yaml-fast.vue"
l = []
for k, v in schema.items():
print("[config2UIfast]", indent, k, f'[{v["type"]}]')
if v["type"] == "option":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UIfast(
schema[k]["choices"][cdict[k]["__selected__"]],
cdict[k],
app,
indent=indent + " ",
)
elif v["type"] == "group":
w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
l.append(w)
w.vbox.children = config2UIfast(
schema[k]["default"], cdict[k], app, indent=indent + " "
)
# else: # base
# w = ConfigFromYAMLClass(k, cdict, schema, app=app, template_path=fname_opt)
# l.append(w)
return l
# config_types = load_yaml("config_types.yaml")
class ConfigFromYAMLClass(v.VuetifyTemplate):
template = tr.Unicode(
load_template("vue-templates/config-from-yaml-option.vue")
).tag(sync=True)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
key = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
def __init__(self, key, cdict, schema, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
self.template = load_template(template_path)
self.cdict = cdict
self.schema = schema
self.key = key
self.vbox = ipw.VBox([])
self.components = {"yaml-items": self.vbox}
def vue_update_cdict(self, data):
with self.app.output:
print("update", self.key)
self.app.update_cdict()
def vue_update_choice(self, selection):
with self.app.output:
print("selection", selection)
print("before")
print(self.cdict)
print("-" * 20)
self.schema[self.key]["default"] = selection
nschema = self.schema[self.key]["choices"][selection]
self.cdict = config2dictV2(self.schema)
print(self.cdict)
self.vbox.children = config2UIfast(nschema, self.cdict[self.key], self.app)
class ConfigParent(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/config-parent.vue")).tag(
sync=True
)
cdict = tr.Dict({}).tag(sync=True)
schema = tr.Dict({}).tag(sync=True)
stageid = tr.Unicode("option").tag(sync=True)
visible = tr.Bool(False).tag(sync=True)
show_cdict = tr.Bool(False).tag(sync=True)
def __init__(self, *ag, app=None, template_path="", **kargs):
super().__init__(*ag, **kargs)
self.app = app
schema = load_yaml("config_types_v2.yaml")
self.schema = schema
######
self.cdict = config2dictV2(schema)
self.components = {"stage-config": ipw.VBox(), "stage-config-fast": ipw.VBox()}
t0 = time.time()
# self.components["stage-config"].children = config2UI(
# schema, self.cdict, self.app
# )
t1 = time.time()
self.components["stage-config"].children = config2UIfast(
schema, self.cdict, self.app
)
t2 = time.time()
print(f"stage-config {t1-t0:.3f} s")
print(f"stage-config-fast {t2-t1:.3f} s")
def select_stage(self, stageid):
import json
self.stageid = stageid
stage = self.app.config["modulus_project"]["training"]["stages"][stageid]
if "data" not in stage:
stage_data = config2dictV2(self.schema.copy())
else:
stage_data = stage["data"]
self.cdict = json.loads(json.dumps(stage_data))
self.components["stage-config-fast"].children = config2UIfast(
schema, self.cdict, self.app
)
# self.components["stage-config"].children = config2UIfast(
# self.schema, self.cdict, self.app
# )
def vue_update_cdict(self, data):
with self.app.output:
def update_cdict(w):
"Recursively update the chain of dicts"
if w.schema[w.key]["type"] in ["option", "group"]:
if w.schema[w.key]["type"] == "group":
for skey, sschema in w.schema[w.key]["default"].items():
if sschema["type"] in ["int", "float"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
if w.schema[w.key]["type"] == "option":
for skey, sschema in w.schema[w.key]["choices"][
w.cdict[w.key]["__selected__"]
].items():
if sschema["type"] in ["int", "float"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
sschema = w.schema[w.key]
# print("base case", w.key, sschema["type"])
if sschema["type"] in ["int", "float"]:
fn = eval(sschema["type"])
w.cdict[w.key][skey] = fn(w.cdict[w.key][skey])
# print(w.key, len(w.vbox.children))
for ww in w.vbox.children:
update_cdict(ww)
w.cdict[w.key][ww.key] = ww.cdict[ww.key]
d = self.cdict.copy()
for w in self.components["stage-config"].children:
update_cdict(w)
d[w.key] = w.cdict[w.key]
self.cdict = {}
self.cdict = d
class App(v.VuetifyTemplate):
template = tr.Unicode(load_template("vue-templates/app.vue")).tag(sync=True)
# projname = tr.Unicode(os.environ["MPC_PROJECT_PATH"]).tag(sync=True)
# config = tr.Dict(load_config()).tag(sync=True)
# v2n = tr.Dict(var2name).tag(sync=True)
# v2hint = tr.Dict(var2hint).tag(sync=True)
# loading_problem = tr.Bool(False).tag(sync=True)
problemstr = tr.Unicode("").tag(sync=True)
problem_dialog = tr.Bool(False).tag(sync=True)
def __init__(self, *ag, output=None, **kargs):
super().__init__(*ag, **kargs)
self.output = output # for error and other messages
self.components = {
"config": ConfigParent(app=self),
}
def refresh_config(self):
self.config = load_config()
def vue_load_problem(self, data):
self.loading_problem = True
t0 = time.time()
res = os.popen("cd $MPC_PROJECT_PATH; mtc show problem").read()
t1 = time.time()
self.problemstr = (
f"[{os.environ['MPC_PROJECT_PATH']}] Fetched in {t1-t0:.3f} s\n" + res
)
self.loading_problem = False
def new(output=None):
"""Creates a new app"""
return App(output=output)
| modulus-toolchain-master | mpc/demoYAMLv2/app.py |
from cfg import *
import numpy as np
from sympy import exp, sin, cos, DiracDelta
##########################################################
####################### Neural networks ##################
##########################################################
# Create NN to predict pressure (x,y,t) -> p
[x, y, t], [pressure] = p.add_neural_network(name="pressure", inputs=["x", "y", "t"], outputs=["pressure"])
##########################################################
######################### Constraints ####################
##########################################################
# 3. Data constraints (snapshots taken from t in [0.25, 2.0]
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-fwd-2d-cv-snapshots/data_fd_wave-2d-snapshots-n10-ti-0.2-tf-0.6.hdf5"
p.add_data_constraint(name="observed_data", model=pressure, data_fname=hdf_fname)
| modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-2d-fwd/solutions/we-fwd-2d-cv-snapshots-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x,t) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
# Geometry
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
# Domains
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
# Prior knowledge on velocity
vel_space_invariant = Eq(vel.diff(x, 1), 0)
# p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior))
# Data constraints
# hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel/data_fd_resampled-1000.hdf5"
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel/data_fd_resampled-subx10-subt100.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname)
| modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-noderiv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
################ Neural Network defintion ################
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
######################### Geometry #######################
# Geometry + domains
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# Old method
# Bounds for interior 1
# x1=0.5
# x2=1.5
# # Bounds for interior 2
# x3=1.7
# x4=3
# # Define interior 1 and 2
# interior1 = p.add_interior_subdomain("interior1", geom=geo, criteria=And(x > x1, x < x2), params={t:(0,2*L)})
# interior2 = p.add_interior_subdomain("interior2", geom=geo, criteria=And(x > x3, x < x4), params={t:(0,2*L)})
##
# Interface position
# x_int = 1.565
# eps = 0.02
interior_layers = p.add_interior_subdomain("layers", geom=geo, criteria=Or( (x < 1.5), (x > 1.7) ), params={t:(0,2*L)})
#################### PDEs + constraints ##################
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
vel_space_invariant = Eq(vel.diff(x, 1), 0)
# p.add_constraint("vel_space_invariant1", enforce(equation=vel_space_invariant, on_domain=interior1))
# p.add_constraint("vel_space_invariant2", enforce(equation=vel_space_invariant, on_domain=interior2))
p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior_layers))
# 5. Data constraints
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-2layers-deriv_constraint/data_fd_resampled-subx10-subt100.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-2layers-deriv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
################ Neural Network defintion ################
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
######################### Geometry #######################
# Geometry + domains
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# Interfaces
x_int1 = 1.045
x_int2 = 2.095
eps = 0.2
b1 = 0.8
b2 = 1.2
b3 = 1.9
b4 = 2.3
band1 = p.Line1D("band1", b1, b2)
band2 = p.Line1D("band2", b3, b4)
diff_temp = p.GeometryDifference("diff_temp", geo, band1)
diff = p.GeometryDifference("diff", diff_temp, band2)
interior_layers = p.add_interior_subdomain("layers", geom=diff, params={t:(0, 2*L)})
# interior_layers = p.add_interior_subdomain("layers", geom=geo, criteria=Or( (x < b1), (x x_int1+eps), (x < x_int2-eps), (x > x_int2+eps) ), params={t:(0,2*L)})
# # Bounds for interior 1
# x1=0.0
# x2=1.03
# # Bounds for interior 2
# x3=1.1
# x4=2.0
# # Bounds for interior 3
# x5=2.2
# x6=3.1
# # Define interior 1 and 2
# interior1 = p.add_interior_subdomain("interior1", geom=geo, criteria=And(x > x1, x < x2), params={t:(0,2*L)})
# interior2 = p.add_interior_subdomain("interior2", geom=geo, criteria=And(x > x3, x < x4), params={t:(0,2*L)})
# interior3 = p.add_interior_subdomain("interior3", geom=geo, criteria=And(x > x5, x < x6), params={t:(0,2*L)})
#################### PDEs + constraints ##################
# 1. PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# 2. Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# 3. Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
# 4. Velocity constraints
vel_space_invariant = Eq(vel.diff(x, 1), 0)
# p.add_constraint("vel_space_invariant1", enforce(equation=vel_space_invariant, on_domain=interior1))
# p.add_constraint("vel_space_invariant2", enforce(equation=vel_space_invariant, on_domain=interior2))
# p.add_constraint("vel_space_invariant3", enforce(equation=vel_space_invariant, on_domain=interior3))
p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior_layers))
# 5. Data constraints
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-3layers/data_fd_resampled-subx10-subt100.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-3layers-deriv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
################ Neural Network defintion ################
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
######################### Geometry #######################
# Geometry + domains
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# # Bounds for interior 0
# x1=0.0
# x2=0.7
# # Bounds for interior 1
# x3=0.8
# x4=1.5
# # Bounds for interior 2
# x5=1.6
# x6=2.3
# # Bounds for interior 3
# x7=2.4
# x8=3.1
# # Define interior 1 and 2
# interior1 = p.add_interior_subdomain("interior1", geom=geo, criteria=And(x > x1, x < x2), params={t:(0,2*L)})
# interior2 = p.add_interior_subdomain("interior2", geom=geo, criteria=And(x > x3, x < x4), params={t:(0,2*L)})
# interior3 = p.add_interior_subdomain("interior3", geom=geo, criteria=And(x > x5, x < x6), params={t:(0,2*L)})
# interior4 = p.add_interior_subdomain("interior4", geom=geo, criteria=And(x > x7, x < x8), params={t:(0,2*L)})
# Interfaces
x_int1 = 1.045
x_int2 = 2.095
b1 = 0.65
b2 = 0.9
b3 = 1.4
b4 = 1.7
b5 = 2.2
b6 = 2.7
band1 = p.Line1D("band1", b1, b2)
band2 = p.Line1D("band2", b3, b4)
band3 = p.Line1D("band3", b5, b6)
diff_temp1 = p.GeometryDifference("diff_temp1", geo, band1)
diff_temp2 = p.GeometryDifference("diff_temp2", diff_temp1, band2)
diff = p.GeometryDifference("diff", diff_temp2, band3)
interior_layers = p.add_interior_subdomain("layers", geom=diff, params={t:(0, 2*L)})
#################### PDEs + constraints ##################
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
# Velocity constraints
vel_space_invariant = Eq(vel.diff(x, 1), 0)
p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior_layers))
# 5. Data constraints
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-4layers/data_fd_resampled-subx10-subt100.hdf5"
# hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-4layers/data_fd_resampled-5.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-4layers-deriv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x,t) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
# Geometry
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
# Domains
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
# Prior knowledge on velocity
vel_space_invariant = Eq(vel.diff(x, 1), 0)
p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior))
# Data constraints
# hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel/data_fd_resampled-1000.hdf5"
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-deriv_constraint/data_fd_resampled-subx10-subt100.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname)
| modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-deriv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Create NN to predict pressure (x,t) -> p
[x, t], [u] = p.add_neural_network(name="pressure", inputs=["x", "t"], outputs=["u"])
# Create NN to predict velocity (x,t) -> vel
[x], [vel] = p.add_neural_network(name="velocity", inputs=["x"], outputs=["vel"])
# Geometry
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
# Domains
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary))
# Prior knowledge on velocity
vel_space_invariant = Eq(vel.diff(x, 1), 0)
p.add_constraint("vel_space_invariant", enforce(equation=vel_space_invariant, on_domain=interior))
# Data constraints
# hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel/data_fd_resampled-1000.hdf5"
hdf_fname="/mount/workspace_test/temp/mtc-repo/examples/we-inv-1d-vel-derv_constraint/data_fd_resampled-subx10-subt100.hdf5"
p.add_data_constraint(name="observed_data", model=u, data_fname=hdf_fname)
| modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-inv/solutions/we-inv-1d-vel-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Vel is also an input
[x, t, vel], [u] = p.add_neural_network(name="wave1d", inputs=["x", "t", "vel"], outputs=["u"])
# Geometry + domains
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L), vel:(1,1.5)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0), vel:(1,1.5)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L), vel:(1,1.5)})
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary)) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-fwd/solutions/we-fwd-1d-vel-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Create NN that maps (position,time) -> pressure
[x, t], [u] = p.add_neural_network(name="wave1d", inputs=["x", "t"], outputs=["u"])
# Geometry
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
# Domains
# Do not forget to add the time parametrization
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# Generate constant velocity model
c = 1.0
# Add PDE constraint
wave_eq = Eq(u.diff(t, 2) - c**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Add initial conditions
initial_p = Eq(u, sin(x)) # Pressure field
initial_dp_dt = Eq(u.diff(t), sin(x)) # Time derivative of pressure field
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Add boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary)) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-fwd/solutions/we-fwd-1d-cv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Neural net (position,time) -> pressure
[x, t], [u] = p.add_neural_network(name="wave1d", inputs=["x", "t"], outputs=["u"])
# Geometry
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
# Domains + time parametrization
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L)})
# Generate velocity
k = 10
c = 1.0 + 1.0 / (1.0 + exp(-2*k*(x-1.5)))
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - c**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary)) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-fwd/solutions/we-fwd-1d-ncv-problem.py |
from cfg import *
import numpy as np
from sympy import exp, sin
# Vel is also an input
[x, t, k], [u] = p.add_neural_network(name="wave1d", inputs=["x", "t", "k"], outputs=["u"])
# Geometry + domains
L = float(np.pi)
geo = p.Line1D("geom", 0, L)
interior = p.add_interior_subdomain("interior", geom=geo, params={t:(0,2*L), k:(1,10)})
initial_t0 = p.add_interior_subdomain("initial_t0", geom=geo, params={t:(0,0), k:(1,10)})
boundary = p.add_boundary_subdomain("boundary", geom=geo, params={t:(0,2*L), k:(1,10)})
# Generate velocity
vel = 1.0 + 1.0 / (1.0 + exp(-2*k*(x-1.5)))
# PDE constraints
wave_eq = Eq(u.diff(t, 2) - vel**2 * u.diff(x, 2), 0)
p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# Initial conditions
initial_p = Eq(u, sin(x))
initial_dp_dt = Eq(u.diff(t), sin(x))
p.add_constraint("initial_p", enforce(equation=initial_p, on_domain=initial_t0))
p.add_constraint("initial_dp_dt", enforce(equation=initial_dp_dt, on_domain=initial_t0))
# Boundary conditions
boundary_cond = Eq(u, 0)
p.add_constraint("boundary", enforce(equation=boundary_cond, on_domain=boundary)) | modulus-toolchain-master | examples/PINNs/04-WaveEquationPINNs/we-1d-fwd/solutions/we-fwd-1d-vel-k-problem.py |
from cfg import *
[x, y, rot], [u, pp, v] = p.add_neural_network(
name="NN", inputs=["x", "y", "rot"], outputs=["u", "p", "v"]
)
# geometry
import numpy as np
params = {rot: (0, -np.pi / 6)}
# params = {rot: float(0.0)}
channel_length = 15.0 / 2
channel_height = 10.0 / 2
a = 0.3
channel_rect = p.Rectangle(
"channel_rect",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
)
channel = p.Channel2D(
"channel",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
)
import os
module_path = os.getcwd()
module_name = "CustomAirfoilGeom"
domain_geom = p.CustomGeometry(
"geom",
module_path,
module_name,
"AirfoilInChannel",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
)
# domain_geom = p.GeometryDifference("dg", channel, tri)
interior = p.add_interior_subdomain(
"interior", geom=domain_geom, compute_sdf_derivatives=True, params=params
)
top_bot = p.add_boundary_subdomain(
"top_bot", geom=channel, criteria=Eq(Abs(y), channel_height / 2), params=params
)
inlet = p.add_boundary_subdomain(
"inlet", geom=channel_rect, criteria=Eq(x, -channel_length * a), params=params
)
outlet = p.add_boundary_subdomain(
"outlet", geom=channel_rect, criteria=Eq(x, channel_length * (1 - a)), params=params
)
airfoil_bdry = p.add_boundary_subdomain("airfoil_bdry", geom=domain_geom, params=params)
lower_rec = p.Rectangle(
"lower_rec",
(-channel_length * a, -channel_height / 2),
(-1, channel_height / 2),
params=params,
)
lower_rec = p.add_boundary_subdomain(
"lower_rec", geom=lower_rec, criteria=Eq(x, -channel_length * a) | Eq(x, -1)
)
lower_rec2 = p.Rectangle(
"lower_rec2",
(-channel_length * a, -channel_height / 2),
(2, channel_height / 2),
params=params,
)
lower_rec2 = p.add_boundary_subdomain(
"lower_rec2",
geom=lower_rec2,
criteria=Eq(x, -channel_length * a) | Eq(x, 2),
params=params,
)
inlet_outlet = p.Rectangle(
"inlet_outlet",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
params=params,
)
inlet_outlet = p.add_boundary_subdomain(
"inlet_outlet",
geom=inlet_outlet,
criteria=Eq(x, -channel_length * a) | Eq(x, channel_length * (1 - a)),
params=params,
)
import sympy as sp
normal_x = sp.Symbol("normal_x")
normal_y = sp.Symbol("normal_y")
p.add_constraint(
f"no_flux_1",
enforce(
equation=Eq(Integral((normal_x * u + normal_y * v), x, y), 0),
on_domain=lower_rec,
),
)
p.add_constraint(
f"no_flux_2",
enforce(
equation=Eq(Integral((normal_x * u + normal_y * v), x, y), 0),
on_domain=lower_rec2,
),
)
p.add_constraint(
f"noflux_inlet_outlet",
enforce(
equation=Eq(Integral((normal_x * u + normal_y * v), x, y), 0),
on_domain=inlet_outlet,
),
)
sdf = sympy.Function("sdf")(x, y, rot)
from sympy import sqrt, Min, Abs
# Zero Equation
nu = (
sqrt((u.diff(y) + v.diff(x)) ** 2 + 2 * u.diff(x) ** 2 + 2 * v.diff(y) ** 2)
* Min(0.045, 0.419 * sdf) ** 2
+ 6.25e-6
)
nu = p.add_submodel("nu", nu)
# N-S Momentum equations
m_x = (
-1.0 * nu * u.diff(x).diff(x)
- 1.0 * nu * u.diff(y).diff(y)
+ 1.0 * u * u.diff(x)
+ 1.0 * v * u.diff(y)
- 1.0 * nu.diff(x) * u.diff(x)
- 1.0 * nu.diff(y) * u.diff(y)
+ pp.diff(x)
)
momentum_x = Eq(m_x, 0)
m_y = (
-1.0 * nu * v.diff(x).diff(x)
- 1.0 * nu * v.diff(y).diff(y)
+ 1.0 * u * v.diff(x)
+ 1.0 * v * v.diff(y)
- 1.0 * nu.diff(x) * v.diff(x)
- 1.0 * nu.diff(y) * v.diff(y)
+ pp.diff(y)
)
momentum_y = Eq(m_y, 0)
continuity_eq = Eq(u.diff(x) + v.diff(y), 0)
p.add_constraint(
"interior_continuity", enforce(equation=continuity_eq, on_domain=interior)
)
p.add_constraint(
"interior_momentum_x", enforce(equation=momentum_x, on_domain=interior)
)
p.add_constraint(
"interior_momentum_y", enforce(equation=momentum_y, on_domain=interior)
)
p.add_constraint("airfoil_bdry_u", enforce(equation=Eq(u, 0), on_domain=airfoil_bdry))
p.add_constraint("airfoil_bdry_v", enforce(equation=Eq(v, 0), on_domain=airfoil_bdry))
p.add_constraint("top_bot_u", enforce(equation=Eq(u, 1), on_domain=top_bot))
p.add_constraint("top_bot_v", enforce(equation=Eq(v, 0), on_domain=top_bot))
p.add_constraint("inlet_u", enforce(equation=Eq(u, 1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v, 0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp, 0), on_domain=outlet))
| modulus-toolchain-master | examples/PINNs/03-Airfoil/airfoil-custom-geom-problem.py |
import numpy as np
from modulus.geometry.primitives_2d import Channel2D, Rectangle
import matplotlib.pyplot as plt
import warp as wp
import numpy as np
import os
wp.init()
# Naca implementation modified from https://stackoverflow.com/questions/31815041/plotting-a-naca-4-series-airfoil
# https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4-digit_NACA_airfoil
# @wp.func
# def camber_line(x, m, p, c):
# cl = []
# for xi in x:
# cond_1 = Heaviside(xi, 0) * Heaviside((c * p) - xi, 0)
# cond_2 = Heaviside(-xi, 0) + Heaviside(xi - (c * p), 0)
# v_1 = m * (xi / p ** 2) * (2.0 * p - (xi / c))
# v_2 = m * ((c - xi) / (1 - p) ** 2) * (1.0 + (xi / c) - 2.0 * p)
# cl.append(cond_1 * v_1 + cond_2 * v_2)
# return cl
@wp.func
def Heaviside(a: float, h: float):
if a == 0:
return h
elif a < 0:
return float(0)
elif a > 0:
return float(1)
@wp.func
def camber_line(xi: float, m: float, p: float, c: float):
z = float(0.0)
cond_1 = Heaviside(xi, z) * Heaviside((c * p) - xi, z)
cond_2 = Heaviside(-xi, z) + Heaviside(xi - (c * p), z)
v_1 = m * (xi / p**2.0) * (2.0 * p - (xi / c))
v_2 = m * ((c - xi) / (1.0 - p) ** 2.0) * (1.0 + (xi / c) - 2.0 * p)
r = cond_1 * v_1 + cond_2 * v_2
return r
@wp.func
def thickness(xi: float, t: float, c: float):
term1 = 0.2969 * (sqrt(xi / c))
term2 = -0.1260 * (xi / c)
term3 = -0.3516 * (xi / c) ** 2.0
term4 = 0.2843 * (xi / c) ** 3.0
term5 = -0.1015 * (xi / c) ** 4.0
r = 5.0 * t * c * (term1 + term2 + term3 + term4 + term5)
return r
# signed sphere
@wp.func
def sdf_sphere(p: wp.vec3, r: float):
return wp.length(p) - r
# signed box
@wp.func
def sdf_box(upper: wp.vec3, p: wp.vec3):
qx = wp.abs(p[0]) - upper[0]
qy = wp.abs(p[1]) - upper[1]
qz = wp.abs(p[2]) - upper[2]
e = wp.vec3(wp.max(qx, 0.0), wp.max(qy, 0.0), wp.max(qz, 0.0))
return wp.length(e) + wp.min(wp.max(qx, wp.max(qy, qz)), 0.0)
@wp.func
def sdf_plane(p: wp.vec3, plane: wp.vec4):
return plane[0] * p[0] + plane[1] * p[1] + plane[2] * p[2] + plane[3]
# union
@wp.func
def op_union(d1: float, d2: float):
return wp.min(d1, d2)
# subtraction
@wp.func
def op_subtract(d1: float, d2: float):
return wp.max(-d1, d2)
# intersection
@wp.func
def op_intersect(d1: float, d2: float):
return wp.max(d1, d2)
# simple scene
@wp.func
def sdf(p: wp.vec3):
# intersection of two spheres
sphere_1 = wp.vec3(0.0, 0.0, 0.0)
sphere_2 = wp.vec3(0.0, 0.75, 0.0)
d = op_subtract(sdf_sphere(p - sphere_2, 0.75), sdf_box(wp.vec3(1.0, 0.5, 0.5), p))
# sdf_sphere(p + sphere_1, 1.0))
# ground plane
d = op_union(d, sdf_plane(p, wp.vec4(0.0, 1.0, 0.0, 1.0)))
return d
@wp.func
def normal(p: wp.vec3):
eps = 1.0e-5
# compute gradient of the SDF using finite differences
dx = sdf(p + wp.vec3(eps, 0.0, 0.0)) - sdf(p - wp.vec3(eps, 0.0, 0.0))
dy = sdf(p + wp.vec3(0.0, eps, 0.0)) - sdf(p - wp.vec3(0.0, eps, 0.0))
dz = sdf(p + wp.vec3(0.0, 0.0, eps)) - sdf(p - wp.vec3(0.0, 0.0, eps))
return wp.normalize(wp.vec3(dx, dy, dz))
@wp.func
def shadow(ro: wp.vec3, rd: wp.vec3):
t = float(0.0)
s = float(1.0)
for i in range(64):
d = sdf(ro + t * rd)
t = t + wp.clamp(d, 0.0001, 2.0)
h = wp.clamp(4.0 * d / t, 0.0, 1.0)
s = wp.min(s, h * h * (3.0 - 2.0 * h))
if t > 8.0:
return 1.0
return s
@wp.func
def channel_sdf(x: float, y: float, ytop: float, ybot: float):
if wp.abs(ytop - y) < wp.abs(ybot - y):
return y - ytop
else:
return ybot - y
@wp.func
def naca_sdf(N: int, m: float, p: float, t: float, c: float, tx: float, ty: float):
dx = float(1.0) / float(N)
d = float(1e9)
for pxid in range(N):
xi = float(pxid) * dx
xi_1 = float(pxid) * dx + dx
cli = camber_line(xi, m, p, c)
cli_1 = camber_line(xi_1, m, p, c)
px = xi
py = cli
yt = thickness(xi, t, c)
pd = wp.sqrt((px - tx) * (px - tx) + (py - ty) * (py - ty))
d = wp.min(pd - yt, d)
return d
@wp.func
def naca_bdry(N: int, m: float, p: float, t: float, c: float, tx: float, ty: float):
dx = float(1.0) / float(N)
d = float(1e9)
d0 = float(1e9)
xr = float(1e9)
yr = float(1e9)
bd = float(1e9)
for pxid in range(N):
xi = float(pxid) * dx
xi_1 = float(pxid) * dx + dx
cli = camber_line(xi, m, p, c)
cli_1 = camber_line(xi_1, m, p, c)
px = xi
py = cli
yt = thickness(xi, t, c)
pd = wp.sqrt((px - tx) * (px - tx) + (py - ty) * (py - ty))
d = wp.min(pd - yt, d)
if d < d0:
xr = px
yr = py
d0 = d
bd = yt
if pd < 0.1:
bd = 0.0
nx = (tx - xr) / (d0 + bd)
ny = (ty - yr) / (d0 + bd)
xx = bd * nx + xr
yy = bd * ny + yr
l = 1.0
if bd == 0:
nx = 0.0
ny = 0.0
else:
eps = 1e-5
# d = naca_sdf(N, m,p,t,c, xx, yy)
# dx = naca_sdf(N, m,p,t,c, xx+eps, yy)
# dy = naca_sdf(N, m,p,t,c, xx, yy+eps)
# nx = dx-d
# ny = dy-d
# l = wp.sqrt(nx*nx+ny*ny)
return wp.vec4(xx, yy, nx / l, ny / l)
@wp.kernel
def draw(
N: int,
m: float,
p: float,
t: float,
c: float,
width: int,
height: int,
pixels: wp.array(dtype=float),
):
tid = wp.tid()
x = float(tid % width) / float(width) * 2.0
x = x - 0.5
y = float(tid // width) / float(height) * 2.0
y = y - 1.0
d = naca_sdf(N, m, p, t, c, x, y)
cd = channel_sdf(x, y, 0.5, -0.5)
pixels[tid] = op_subtract(d, cd)
@wp.kernel
def sample_interior(
rand_seed: int,
N: int,
m0: float,
m1: float,
p: float,
t0: float,
t1: float,
c: float,
x0: float,
y0: float,
x1: float,
y1: float,
rot0: float,
rot1: float,
xa: wp.array(dtype=float),
ya: wp.array(dtype=float),
sdf: wp.array(dtype=float),
sdf_x: wp.array(dtype=float),
sdf_y: wp.array(dtype=float),
par_rot: wp.array(dtype=float),
par_t: wp.array(dtype=float),
par_m: wp.array(dtype=float),
):
tid = wp.tid()
rstate = wp.rand_init(rand_seed, tid)
x = wp.randf(rstate, x0, x1)
y = wp.randf(rstate, y0, y1)
# apply rotation
rot = -wp.randf(rstate, rot0, rot1)
par_rot[tid] = rot
xx = wp.cos(rot) * x - wp.sin(rot) * y
yy = wp.sin(rot) * x + wp.cos(rot) * y
xa[tid] = x
ya[tid] = y
m = wp.randf(rstate, m0, m1)
par_m[tid] = m
t = wp.randf(rstate, t0, t1)
par_t[tid] = t
d = naca_sdf(N, m, p, t, c, xx, yy)
cd = channel_sdf(x, y, y1, y0)
d = op_subtract(d, cd)
sdf[tid] = d
eps = 1e-5
xx = wp.cos(rot) * (x + eps) - wp.sin(rot) * y
yy = wp.sin(rot) * (x + eps) + wp.cos(rot) * y
cd = channel_sdf(x + eps, y, y1, y0)
dx = naca_sdf(N, m, p, t, c, xx, yy)
dx = op_subtract(dx, cd)
xx = wp.cos(rot) * x - wp.sin(rot) * (y + eps)
yy = wp.sin(rot) * x + wp.cos(rot) * (y + eps)
cd = channel_sdf(x, y + eps, y1, y0)
dy = naca_sdf(N, m, p, t, c, xx, yy)
dy = op_subtract(dy, cd)
nx = dx - d
ny = dy - d
l = wp.sqrt(nx * nx + ny * ny)
sdf_x[tid] = -nx / l
sdf_y[tid] = -ny / l
@wp.kernel
def sample_boundary(
rand_seed: int,
N: int,
m0: float,
m1: float,
p: float,
t0: float,
t1: float,
c: float,
x0: float,
y0: float,
x1: float,
y1: float,
rot0: float,
rot1: float,
xa: wp.array(dtype=float),
ya: wp.array(dtype=float),
nx: wp.array(dtype=float),
ny: wp.array(dtype=float),
par_rot: wp.array(dtype=float),
par_t: wp.array(dtype=float),
par_m: wp.array(dtype=float),
):
tid = wp.tid()
rstate = wp.rand_init(rand_seed, tid)
x = wp.randf(rstate, x0, x1)
y = wp.randf(rstate, y0, y1)
m = wp.randf(rstate, m0, m1)
par_m[tid] = m
t = wp.randf(rstate, t0, t1)
par_t[tid] = t
ret = naca_bdry(N, m, p, t, c, x, y)
x = ret[0]
y = ret[1]
# apply rotation to points
rot = wp.randf(rstate, rot0, rot1)
par_rot[tid] = rot
xx = wp.cos(rot) * x - wp.sin(rot) * y
yy = wp.sin(rot) * x + wp.cos(rot) * y
# rotate the normals as well
xa[tid] = xx
ya[tid] = yy
x = ret[2]
y = ret[3]
nxx = wp.cos(rot) * x - wp.sin(rot) * y
nyy = wp.sin(rot) * x + wp.cos(rot) * y
nx[tid] = nxx
ny[tid] = nyy
# sdf[tid] = op_subtract(d,cd)
class Example:
def __init__(self, nr=1000000):
self.nr = nr
self.xa = wp.zeros(nr, dtype=float)
self.ya = wp.zeros(nr, dtype=float)
self.nxa = wp.zeros(nr, dtype=float)
self.nya = wp.zeros(nr, dtype=float)
self.sdf = wp.zeros(nr, dtype=float)
self.sdfx = wp.zeros(nr, dtype=float)
self.sdfy = wp.zeros(nr, dtype=float)
self.par_rot = wp.zeros(nr, dtype=float)
self.par_t = wp.zeros(nr, dtype=float)
self.par_m = wp.zeros(nr, dtype=float)
def render(self, m, p, t, c, N, is_live=False):
with wp.ScopedTimer(f"airfoil SDF [{self.width*self.height:,} pts]"):
wp.launch(
kernel=draw,
dim=self.width * self.height,
inputs=[N, m, p, t, c, self.width, self.height, self.pixels],
)
wp.synchronize_device()
parr = self.pixels.numpy().reshape((self.height, self.width))
return parr
def sample_interior(self, x0, y0, x1, y1, m0, m1, p, t0, t1, c, N, rot0, rot1):
import time
with wp.ScopedTimer(f"Sample Interior [{self.nr:,} pts]"):
rand_seed = int(time.time())
wp.launch(
kernel=sample_interior,
dim=self.nr,
inputs=[
rand_seed,
N,
m0,
m1,
p,
t0,
t1,
c,
x0,
y0,
x1,
y1,
rot0,
rot1,
self.xa,
self.ya,
self.sdf,
self.sdfx,
self.sdfy,
self.par_rot,
self.par_t,
self.par_m,
],
)
wp.synchronize_device()
sdf = -self.sdf.numpy()
sel = sdf > 0
rot = self.par_rot.numpy()[sel]
t = self.par_t.numpy()[sel]
m = self.par_m.numpy()[sel]
return (
self.xa.numpy()[sel],
self.ya.numpy()[sel],
sdf[sel],
self.sdfx.numpy()[sel],
self.sdfy.numpy()[sel],
rot,
t,
m,
)
def sample_boundary(self, x0, y0, x1, y1, m0, m1, p, t0, t1, c, N, rot0, rot1):
import time
with wp.ScopedTimer(f"Sample Boundary [{self.nr:,} pts]"):
rand_seed = int(time.time())
wp.launch(
kernel=sample_boundary,
dim=self.nr,
inputs=[
rand_seed,
N,
m0,
m1,
p,
t0,
t1,
c,
x0,
y0,
x1,
y1,
rot0,
rot1,
self.xa,
self.ya,
self.nxa,
self.nya,
self.par_rot,
self.par_t,
self.par_m,
],
)
wp.synchronize_device()
nx = self.nxa.numpy()
ny = self.nya.numpy()
sel = (nx != 0) & (ny != 0)
rot = self.par_rot.numpy()[sel]
t = self.par_t.numpy()[sel]
m = self.par_m.numpy()[sel]
return self.xa.numpy()[sel], self.ya.numpy()[sel], nx[sel], ny[sel], rot, t, m
from modulus.geometry.parameterization import Parameterization, Parameter
class AirfoilInChannel:
def __init__(self, ll, ur, params={}, include_channel_boundary=False):
self.x0, self.y0 = ll
self.x1, self.y1 = ur
self.ch = Channel2D(ll, ur)
self.include_channel_boundary = include_channel_boundary
self.params = Parameterization(
{
Parameter("rot"): float(0.0),
Parameter("m"): float(0.02),
Parameter("t"): float(0.12),
}
)
def _get_param_min_max(self, param, parameterization=None):
if parameterization is None:
parameterization = {}
else:
parameterization = parameterization.param_ranges
ps = param
rot = parameterization.get(ps, self.params.param_ranges[ps])
if isinstance(rot, float):
rot0, rot1 = rot, rot
else:
rot0, rot1 = min(rot), max(rot)
return rot0, rot1
def sample_boundary(
self,
nr_points: int,
criteria=None,
parameterization=None,
quasirandom: bool = False,
):
sb = self.ch.sample_boundary(
nr_points, criteria=criteria, parameterization=parameterization
)
ps = Parameter("rot")
rot0, rot1 = self._get_param_min_max(ps, parameterization)
ps = Parameter("m")
m0, m1 = self._get_param_min_max(ps, parameterization)
ps = Parameter("t")
t0, t1 = self._get_param_min_max(ps, parameterization)
xa = np.zeros((nr_points, 1))
ya = np.zeros((nr_points, 1))
nx = np.zeros((nr_points, 1))
ny = np.zeros((nr_points, 1))
par_rot = np.zeros((nr_points, 1))
par_t = np.zeros((nr_points, 1))
par_m = np.zeros((nr_points, 1))
gotn = 0
while gotn < nr_points:
e = Example(nr=nr_points)
m = 0.02 * 1
p = 0.4
t = 0.12
c = 1.0
N = 501
# m0, m1 = m*9, m*9
# t0, t1 = t*1.9, t*1.9
# rot1, rot0= -0.31, -0.31
x, y, nxt, nyt, rott, tt, mm = e.sample_boundary(
self.x0,
self.y0,
self.x1,
self.y1,
m0,
m1,
p,
t0,
t1,
c,
int(N),
rot0,
rot1,
)
e = min(len(x), nr_points - gotn)
if e > 0:
xa[gotn : gotn + e] = x[:e].reshape(-1, 1)
ya[gotn : gotn + e] = y[:e].reshape(-1, 1)
nx[gotn : gotn + e] = nxt[:e].reshape(-1, 1)
ny[gotn : gotn + e] = nyt[:e].reshape(-1, 1)
par_rot[gotn : gotn + e] = rott[:e].reshape(-1, 1)
par_t[gotn : gotn + e] = tt[:e].reshape(-1, 1)
par_m[gotn : gotn + e] = mm[:e].reshape(-1, 1)
gotn += e
if self.include_channel_boundary:
idx = np.random.choice(np.arange(2 * nr_points), nr_points)
xa = np.vstack([xa, sb["x"]])[idx]
ya = np.vstack([ya, sb["y"]])[idx]
nx = np.vstack([-nx, sb["normal_x"]])[idx]
ny = np.vstack([-ny, sb["normal_y"]])[idx]
return {
"x": xa,
"y": ya,
"normal_x": -nx,
"normal_y": -ny,
"rot": par_rot,
"t": par_t,
"m": par_m,
"area": sb["area"],
}
else:
return {
"x": xa,
"y": ya,
"normal_x": -nx,
"normal_y": -ny,
"rot": par_rot,
"t": par_t,
"m": par_m,
"area": sb["area"],
}
def sample_interior(
self,
nr_points: int,
bounds=None,
criteria=None,
parameterization=None,
compute_sdf_derivatives: bool = False,
quasirandom: bool = False,
):
si = self.ch.sample_interior(
nr_points, criteria=criteria, parameterization=parameterization
)
ps = Parameter("rot")
rot0, rot1 = self._get_param_min_max(ps, parameterization)
ps = Parameter("m")
m0, m1 = self._get_param_min_max(ps, parameterization)
ps = Parameter("t")
t0, t1 = self._get_param_min_max(ps, parameterization)
xa = np.zeros((nr_points, 1))
ya = np.zeros((nr_points, 1))
sdf = np.zeros((nr_points, 1))
sdf__x = np.zeros((nr_points, 1))
sdf__y = np.zeros((nr_points, 1))
par_rot = np.zeros((nr_points, 1))
par_t = np.zeros((nr_points, 1))
par_m = np.zeros((nr_points, 1))
gotn = 0
while gotn < nr_points:
e = Example(nr=nr_points)
m = 0.02 * 1
p = 0.4
t = 0.12
c = 1.0
N = 501
# m0, m1 = m*3, m*3
# t0, t1 = t*1.9, t*1.9
# rot1, rot0= -0.31, -0.931
x, y, sdft, sdfx, sdfy, rott, tt, mm = e.sample_interior(
self.x0,
self.y0,
self.x1,
self.y1,
m0,
m1,
p,
t0,
t1,
c,
int(N),
rot0,
rot1,
)
e = min(len(x), nr_points - gotn)
xa[gotn : gotn + e] = x[:e].reshape(-1, 1)
ya[gotn : gotn + e] = y[:e].reshape(-1, 1)
sdf[gotn : gotn + e] = sdft[:e].reshape(-1, 1)
sdf__x[gotn : gotn + e] = sdfx[:e].reshape(-1, 1)
sdf__y[gotn : gotn + e] = sdfy[:e].reshape(-1, 1)
par_rot[gotn : gotn + e] = rott[:e].reshape(-1, 1)
par_t[gotn : gotn + e] = tt[:e].reshape(-1, 1)
par_m[gotn : gotn + e] = mm[:e].reshape(-1, 1)
gotn += e
return {
"x": xa,
"y": ya,
"sdf": sdf,
"rot": par_rot,
"t": par_t,
"m": par_m,
"sdf__x": sdf__x,
"sdf__y": sdf__y,
"area": si["area"],
}
@property
def dims(self):
return ["x", "y"]
| modulus-toolchain-master | examples/PINNs/03-Airfoil/CustomAirfoilGeom.py |
from cfg import *
[x, y, rot], [u, pp, v] = p.add_neural_network(
name="NN", inputs=["x", "y", "rot"], outputs=["u", "p", "v"]
)
# geometry
import numpy as np
lines = [[0, 0], [1, 0], [1, 1]]
from sympy import Number, Symbol, Heaviside, atan, sin, cos, sqrt
# Naca implementation modified from https://stackoverflow.com/questions/31815041/plotting-a-naca-4-series-airfoil
# https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4-digit_NACA_airfoil
def camber_line(x, m, p, c):
cl = []
for xi in x:
cond_1 = Heaviside(xi, 0) * Heaviside((c * p) - xi, 0)
cond_2 = Heaviside(-xi, 0) + Heaviside(xi - (c * p), 0)
v_1 = m * (xi / p**2) * (2.0 * p - (xi / c))
v_2 = m * ((c - xi) / (1 - p) ** 2) * (1.0 + (xi / c) - 2.0 * p)
cl.append(cond_1 * v_1 + cond_2 * v_2)
return cl
def dyc_over_dx(x, m, p, c):
dd = []
for xi in x:
cond_1 = Heaviside(xi) * Heaviside((c * p) - xi)
cond_2 = Heaviside(-xi) + Heaviside(xi - (c * p))
v_1 = ((2.0 * m) / p**2) * (p - xi / c)
v_2 = (2.0 * m) / (1 - p**2) * (p - xi / c)
dd.append(atan(cond_1 * v_1 + cond_2 * v_2))
return dd
def thickness(x, t, c):
th = []
for xi in x:
term1 = 0.2969 * (sqrt(xi / c))
term2 = -0.1260 * (xi / c)
term3 = -0.3516 * (xi / c) ** 2
term4 = 0.2843 * (xi / c) ** 3
term5 = -0.1015 * (xi / c) ** 4
th.append(5 * t * c * (term1 + term2 + term3 + term4 + term5))
return th
def naca4(x, m, p, t, c=1):
th = dyc_over_dx(x, m, p, c)
yt = thickness(x, t, c)
yc = camber_line(x, m, p, c)
line = []
for (xi, thi, yti, yci) in zip(x, th, yt, yc):
line.append((xi - yti * sin(thi), yci + yti * cos(thi)))
x.reverse()
th.reverse()
yt.reverse()
yc.reverse()
for (xi, thi, yti, yci) in zip(x, th, yt, yc):
line.append((xi + yti * sin(thi), yci - yti * cos(thi)))
return line
m = 0.02
ppp = 0.4
t = 0.12
c = 1.0
# make naca geometry
xx = [x for x in np.linspace(0, 0.2, 20 // 4)] + [x for x in np.linspace(0.2, 1.0, 10)][
1:
] # higher res in front
line = naca4(xx, m, ppp, t, c)[:-1]
# # lines = [x for x in np.linspace(0, 0.2, 20)] + [x for x in np.linspace(0.2, 1.0, 10)][1:]
params = {rot: (0, -np.pi / 6)}
tri = p.Polygon("poly", line, rotate=(rot, "z"), params=params)
# tri = p.Rectangle("poly", (0,0), (1,1), rotate=(rot, "z"),params=params)
channel_length = 15.0 / 2
channel_height = 10.0 / 2
a = 0.3
# inlet = Line((-channel_length*a, -channel_height/2), (-channel_length*a, channel_height/2), normal=1)
# outlet = Line((channel_length*(1-a), -channel_height/2), (channel_length*(1-a), channel_height/2), normal=1)
channel_rect = p.Rectangle(
"channel_rect",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
)
channel = p.Channel2D(
"channel",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
)
domain_geom = p.GeometryDifference("dg", channel, tri)
interior = p.add_interior_subdomain(
"interior", geom=domain_geom, compute_sdf_derivatives=True, params=params
)
top_bot = p.add_boundary_subdomain(
"top_bot", geom=domain_geom, criteria=Eq(Abs(y), channel_height / 2), params=params
)
inlet = p.add_boundary_subdomain(
"inlet", geom=channel_rect, criteria=Eq(x, -channel_length * a), params=params
)
outlet = p.add_boundary_subdomain(
"outlet", geom=channel_rect, criteria=Eq(x, channel_length * (1 - a)), params=params
)
airfoil_bdry = p.add_boundary_subdomain("airfoil_bdry", geom=tri)
lower_rec = p.Rectangle(
"lower_rec",
(-channel_length * a, -channel_height / 2),
(-1, channel_height / 2),
params=params,
)
lower_rec = p.add_boundary_subdomain(
"lower_rec", geom=lower_rec, criteria=Eq(x, -channel_length * a) | Eq(x, -1)
)
lower_rec2 = p.Rectangle(
"lower_rec2",
(-channel_length * a, -channel_height / 2),
(2, channel_height / 2),
params=params,
)
lower_rec2 = p.add_boundary_subdomain(
"lower_rec2", geom=lower_rec2, criteria=Eq(x, -channel_length * a) | Eq(x, 2)
)
inlet_outlet = p.Rectangle(
"inlet_outlet",
(-channel_length * a, -channel_height / 2),
(channel_length * (1 - a), channel_height / 2),
params=params,
)
inlet_outlet = p.add_boundary_subdomain(
"inlet_outlet",
geom=inlet_outlet,
criteria=Eq(x, -channel_length * a) | Eq(x, channel_length * (1 - a)),
)
import sympy as sp
normal_x = sp.Symbol("normal_x")
normal_y = sp.Symbol("normal_y")
# p.add_constraint(f"no_flux_1",
# enforce(equation=Eq(Integral((normal_x*u+normal_y*v), x, y), 0),
# on_domain=lower_rec))
# p.add_constraint(f"no_flux_2",
# enforce(equation=Eq(Integral((normal_x*u+normal_y*v), x, y), 0),
# on_domain=lower_rec2))
# p.add_constraint(f"noflux_inlet_outlet",
# enforce(equation=Eq(Integral((normal_x*u+normal_y*v), x, y), 0),
# on_domain=inlet_outlet))
sdf = sympy.Function("sdf")(x, y, rot)
from sympy import sqrt, Min, Abs
# Zero Equation
nu = (
sqrt((u.diff(y) + v.diff(x)) ** 2 + 2 * u.diff(x) ** 2 + 2 * v.diff(y) ** 2)
* Min(0.045, 0.419 * sdf) ** 2
+ 6.25e-6
)
nu = p.add_submodel("nu", nu)
# N-S Momentum equations
m_x = (
-1.0 * nu * u.diff(x).diff(x)
- 1.0 * nu * u.diff(y).diff(y)
+ 1.0 * u * u.diff(x)
+ 1.0 * v * u.diff(y)
- 1.0 * nu.diff(x) * u.diff(x)
- 1.0 * nu.diff(y) * u.diff(y)
+ pp.diff(x)
)
momentum_x = Eq(m_x, 0)
m_y = (
-1.0 * nu * v.diff(x).diff(x)
- 1.0 * nu * v.diff(y).diff(y)
+ 1.0 * u * v.diff(x)
+ 1.0 * v * v.diff(y)
- 1.0 * nu.diff(x) * v.diff(x)
- 1.0 * nu.diff(y) * v.diff(y)
+ pp.diff(y)
)
momentum_y = Eq(m_y, 0)
continuity_eq = Eq(u.diff(x) + v.diff(y), 0)
p.add_constraint(
"interior_continuity", enforce(equation=continuity_eq, on_domain=interior)
)
p.add_constraint(
"interior_momentum_x", enforce(equation=momentum_x, on_domain=interior)
)
p.add_constraint(
"interior_momentum_y", enforce(equation=momentum_y, on_domain=interior)
)
p.add_constraint("airfoil_bdry_u", enforce(equation=Eq(u, 0), on_domain=airfoil_bdry))
p.add_constraint("airfoil_bdry_v", enforce(equation=Eq(v, 0), on_domain=airfoil_bdry))
p.add_constraint("top_bot_u", enforce(equation=Eq(u, 1), on_domain=top_bot))
p.add_constraint("top_bot_v", enforce(equation=Eq(v, 0), on_domain=top_bot))
p.add_constraint("inlet_u", enforce(equation=Eq(u, 1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v, 0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp, 0), on_domain=outlet))
| modulus-toolchain-master | examples/PINNs/03-Airfoil/airfoil_rot_only_problem.py |
from cfg import *
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
p.add_constraint("diffusion_solid", enforce(equation=Eq(x,0), on_domain=interior_solid))
p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(x,0), on_domain=bdry_solid))
p.add_constraint("heat_source", enforce(equation=Eq(x,0), on_domain=bdry_heat_src))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p1a.py |
from cfg import *
import sympy as sp
# HEAT
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
# Air flow
a_end=5
rect = p.Rectangle("rect_air", (-1,0), (a_end,2))
geo_air = p.GeometryDifference('gd0', rect, geo_solid)
interior_air=p.add_interior_subdomain("interior_air", geom=geo_air, compute_sdf_derivatives=True)
inlet = p.add_boundary_subdomain("inlet", geom=geo_air, criteria=Eq(x,-1))
outlet = p.add_boundary_subdomain("outlet", geom=geo_air, criteria=Eq(x,a_end))
noslip = p.add_boundary_subdomain("noslip", geom=geo_air, criteria= (x>-1) & (x<a_end))
p.add_constraint("continuity", enforce(equation=Eq(ny*T.diff(y),1), on_domain=interior_air))
p.add_constraint("inlet", enforce(equation=Eq(ny*T.diff(y),1), on_domain=inlet))
p.add_constraint("outlet", enforce(equation=Eq(ny*T.diff(y),1), on_domain=outlet))
p.add_constraint("noslip", enforce(equation=Eq(ny*T.diff(y),1), on_domain=noslip))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p2a.py |
from cfg import *
import sympy as sp
# HEAT
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
#p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
# Air flow
[x, y], [u, v, pp] = p.add_neural_network(name="NNflow", inputs=["x", "y"], outputs=["u", "v", "p"])
a_end=5
rect = p.Rectangle("rect_air", (-1,0), (a_end,2))
geo_air = p.GeometryDifference('gd0', rect, geo_solid)
interior_air=p.add_interior_subdomain("interior_air", geom=geo_air, compute_sdf_derivatives=True)
inlet = p.add_boundary_subdomain("inlet", geom=geo_air, criteria=Eq(x,-1))
outlet = p.add_boundary_subdomain("outlet", geom=geo_air, criteria=Eq(x,a_end))
noslip = p.add_boundary_subdomain("noslip", geom=geo_air, criteria= (x>-1) & (x<a_end))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=interior_air))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=interior_air))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=interior_air))
# Adding inlet/outlet Mass balance
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=geo_air,
criteria=Eq(x,-1) | Eq(x,a_end))
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
p.add_constraint("mass_balance_io", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
##########
# Advection Diffusion
##########
[x, y], [Ta] = p.add_neural_network(name="NN_Tair", inputs=["x", "y"], outputs=["Tair"])
# Solid/Air heat transfer
p.add_constraint("TsolidTair_compat", enforce(equation=Eq(T,Ta), on_domain=bdry_solid))
fluxcompat_eq = Eq(5*(nx*T.diff(x)+ny*T.diff(y)), nx*Ta.diff(x)+ny*Ta.diff(y))
p.add_constraint("TsolidTair_flux_compat", enforce(equation=fluxcompat_eq, on_domain=bdry_solid))
# Insulating channel wall
chwalls = p.add_boundary_subdomain("chwalls", geom=geo_air, criteria= (y<=0) | (y>=2))
p.add_constraint("chwalls_grad_c", enforce(equation=Eq(ny*Ta.diff(y),0), on_domain=chwalls))
# Advection Diffusion
D = 0.02 # diffusion coefficient
adv_diff_eq = Eq(u*Ta.diff(x)+v*Ta.diff(y), D*(Ta.diff(x,x)+Ta.diff(y,y)))
p.add_constraint("adv_diff", enforce(equation=adv_diff_eq, on_domain=interior_air))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p2d.py |
from cfg import *
[x], [ua, ub] = p.add_neural_network(name="NN", inputs=["x"], outputs=["ua", "ub"])
geom = p.Line1D("geomA", -1,0)
interior = p.add_interior_subdomain("interiorA", geom=geom)
middle = p.add_boundary_subdomain("middle", geom=geom, criteria=Eq(x,0))
bdry = p.add_boundary_subdomain("bdryA", geom=geom, criteria=Eq(x,-1))
diff_eq = Eq(ua.diff(x,2) + 1, 0)
p.add_constraint("diffusionA", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("middleA", enforce(equation=Eq(ua,0), on_domain=middle))
p.add_constraint("bdryA", enforce(equation=Eq(ua.diff(x),0), on_domain=bdry))
geom = p.Line1D("geomB", 0,1)
interior = p.add_interior_subdomain("interiorB", geom=geom)
bdry = p.add_boundary_subdomain("bdryB", geom=geom, criteria=Eq(x,1))
diff_eq = Eq(ub.diff(x,2) - 1, 0)
p.add_constraint("diffusionB", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("middleB", enforce(equation=Eq(ub,0), on_domain=middle))
p.add_constraint("bdryB", enforce(equation=Eq(ub.diff(x),0), on_domain=bdry))
p.set_model(
"f",
[
{"func": ua, "on": x<=0},
{"func": ub, "on": x>0},
],
) | modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/p2c.py |
from cfg import *
[x, y], [u] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0))
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w))
r_interior = p.add_interior_subdomain("r_interior", geom=r1)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("b1", enforce(equation=Eq(x,0), on_domain=inlet))
p.add_constraint("b2", enforce(equation=Eq(x,0), on_domain=outlet))
p.add_constraint("b3", enforce(equation=Eq(x,0), on_domain=noslip))
p.add_constraint("c2", enforce(equation=Eq(x,0), on_domain=ch_interior))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p1a.py |
from cfg import *
[x, y], [u, v, pp] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u", "v", "p"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
subr = p.Rectangle("subr1", (w/2-.1,0), (w/2+.1,h*.8))
ch1 = p.GeometryDifference("gd", ch1, subr)
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0))
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w))
r_interior = p.add_interior_subdomain("r_interior", geom=r1)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=ch_interior))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=ch_interior))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=ch_interior))
# Adding mass balance
import sympy as sp
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
int_rect = p.Rectangle("int_rect", (0,0), (w/2,h))
int_rect = p.GeometryDifference("gd1", ch1, int_rect)
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=r1, criteria=Eq(x,0) | Eq(x,w))
int_constrict = p.add_boundary_subdomain("int_constrict", geom=int_rect,
criteria=Eq(x,0) | Eq(x,w/2))
p.add_constraint("mass_balance_io", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
p.add_constraint("mass_balance_middle", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=int_constrict))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p2e.py |
from cfg import *
[x], [ua, ub] = p.add_neural_network(name="NN", inputs=["x"], outputs=["ua", "ub"])
geom = p.Line1D("geomA", -1,0)
interior = p.add_interior_subdomain("interiorA", geom=geom)
middle = p.add_boundary_subdomain("middle", geom=geom, criteria=Eq(x,0))
bdry = p.add_boundary_subdomain("bdryA", geom=geom, criteria=Eq(x,-1))
diff_eq = Eq(ua.diff(x,2) + 1, 0)
p.add_constraint("diffusionA", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("middleA", enforce(equation=Eq(ua,0), on_domain=middle))
p.add_constraint("bdryA", enforce(equation=Eq(ua.diff(x),0), on_domain=bdry))
geom = p.Line1D("geomB", 0,1)
interior = p.add_interior_subdomain("interiorB", geom=geom)
bdry = p.add_boundary_subdomain("bdryB", geom=geom, criteria=Eq(x,1))
diff_eq = Eq(ub.diff(x,2) - 1, 0)
p.add_constraint("diffusionB", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("middleB", enforce(equation=Eq(ub,0), on_domain=middle))
p.add_constraint("bdryB", enforce(equation=Eq(ub.diff(x),0), on_domain=bdry))
p.set_model(
"f",
[
{"func": ua, "on": x<=0},
{"func": ub, "on": x>0},
],
) | modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/p1c.py |
from cfg import *
[x], [ua, ub] = p.add_neural_network(name="NN", inputs=["x"], outputs=["ua", "ub"])
DA = 1
DB = 1/100
geom = p.Line1D("geomA", -1,0)
interior = p.add_interior_subdomain("interiorA", geom=geom)
middle = p.add_boundary_subdomain("middle", geom=geom, criteria=Eq(x,0))
bdry = p.add_boundary_subdomain("bdryA", geom=geom, criteria=Eq(x,-1))
diff_eq = Eq(DA*ua.diff(x,2) + 1, 0)
p.add_constraint("diffusionA", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("bdryA", enforce(equation=Eq(ua,0), on_domain=bdry))
geom = p.Line1D("geomB", 0,1)
interior = p.add_interior_subdomain("interiorB", geom=geom)
bdry = p.add_boundary_subdomain("bdryB", geom=geom, criteria=Eq(x,1))
diff_eq = Eq(DB*ub.diff(x,2) - 1/2, 0)
p.add_constraint("diffusionB", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("bdryB", enforce(equation=Eq(ub,0), on_domain=bdry))
p.add_constraint("uAuB", enforce(equation=Eq(ub,ua), on_domain=middle))
p.add_constraint("GraduAuB", enforce(equation=Eq(DB*ub.diff(x), DA*ua.diff(x)),
on_domain=middle))
p.set_model(
"f",
[
{"func": ua, "on": x<=0},
{"func": ub, "on": x>0},
],
) | modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/p2b.py |
from cfg import *
[x, y], [u, v, pp] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u", "v", "p"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0))
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w))
r_interior = p.add_interior_subdomain("r_interior", geom=r1)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=ch_interior))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=ch_interior))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=ch_interior))
# Adding mass balance
import sympy as sp
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=r1, criteria=Eq(x,0) | Eq(x,w))
p.add_constraint("mass_balance", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p1c.py |
from cfg import *
[x], [ua, ub] = p.add_neural_network(name="NN", inputs=["x"], outputs=["ua", "ub"])
geom = p.Line1D("geomA", -1,0)
interior = p.add_interior_subdomain("interiorA", geom=geom)
bdry = p.add_boundary_subdomain("bdryA", geom=geom)
diff_eq = Eq(ua.diff(x,2) + 1, 0)
p.add_constraint("diffusionA", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("bdryA", enforce(equation=Eq(ua,0), on_domain=bdry))
geom = p.Line1D("geomB", 0,1)
interior = p.add_interior_subdomain("interiorB", geom=geom)
bdry = p.add_boundary_subdomain("bdryB", geom=geom)
diff_eq = Eq(ub.diff(x,2) - 1, 0)
p.add_constraint("diffusionB", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("bdryB", enforce(equation=Eq(ub,0), on_domain=bdry))
p.set_model(
"f",
[
{"func": ua, "on": x<=0},
{"func": ub, "on": x>0},
],
) | modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/p1a.py |
from cfg import *
[x, v1,v2], [va, vb] = p.add_neural_network(name="NN", inputs=["x", 'v1', 'v2'], outputs=["va", "vb"])
ua = p.add_submodel("ua", va*(x+1)+v1)
ub = p.add_submodel("ub", vb*(x-1)+v2)
params={v1:(-1,1), v2:(-1,1)}
DA = 1
DB = 1/100
geom = p.Line1D("geomA", -1,0)
interior = p.add_interior_subdomain("interiorA", geom=geom, params=params)
middle = p.add_boundary_subdomain("middle", geom=geom, criteria=Eq(x,0), params=params)
bdry = p.add_boundary_subdomain("bdryA", geom=geom, criteria=Eq(x,-1), params=params)
diff_eq = Eq(DA*ua.diff(x,2) + 1, 0)
p.add_constraint("diffusionA", enforce(equation=diff_eq, on_domain=interior))
geom = p.Line1D("geomB", 0,1)
interior = p.add_interior_subdomain("interiorB", geom=geom, params=params)
bdry = p.add_boundary_subdomain("bdryB", geom=geom, criteria=Eq(x,1), params=params)
diff_eq = Eq(DB*ub.diff(x,2) - 1/2, 0)
p.add_constraint("diffusionB", enforce(equation=diff_eq, on_domain=interior))
p.add_constraint("uAuB", enforce(equation=Eq(ub,ua), on_domain=middle))
p.add_constraint("GraduAuB", enforce(equation=Eq(DB*ub.diff(x), DA*ua.diff(x)),
on_domain=middle))
p.set_model(
"f",
[
{"func": ua, "on": x<=0},
{"func": ub, "on": x>0},
],
) | modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/p3a.py |
from cfg import *
[x, y, oh], [u, v, pp] = p.add_neural_network(name="NN", inputs=["x", "y", "oh"], outputs=["u", "v", "p"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
params = {oh: (0,0.95)}
# subr = p.Rectangle("subr1", (w/2-.1,0), (w/2+.1,h*.8))
subr = p.Rectangle("subr1", (w/2-.1,0), (w/2+.1,oh), params=params)
ch1 = p.GeometryDifference("gd", ch1, subr)
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0), params=params)
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w), params=params)
r_interior = p.add_interior_subdomain("r_interior", geom=r1, params=params)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1, params=params)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1, params=params)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=ch_interior))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=ch_interior))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=ch_interior))
# Adding mass balance
import sympy as sp
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
int_rect = p.Rectangle("int_rect", (0,0), (w/2,h))
# int_rect = p.GeometryDifference("gd1", ch1, int_rect)
int_rect = p.GeometryDifference("gd1", ch1, int_rect)
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=r1, criteria=Eq(x,0) | Eq(x,w), params=params)
int_constrict = p.add_boundary_subdomain("int_constrict", geom=int_rect,
criteria=Eq(x,0) | Eq(x,w/2), params=params)
p.add_constraint("mass_balance_io", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
p.add_constraint("mass_balance_middle", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=int_constrict))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p3.py |
from cfg import *
[x, y], [u, v, pp] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u", "v", "p"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
subr = p.Rectangle("subr1", (w/2-.1,0), (w/2+.1,h*.8))
ch1 = p.GeometryDifference("gd", ch1, subr)
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0))
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w))
r_interior = p.add_interior_subdomain("r_interior", geom=r1)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=ch_interior))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=ch_interior))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=ch_interior))
# Adding mass balance
import sympy as sp
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=r1, criteria=Eq(x,0) | Eq(x,w))
p.add_constraint("mass_balance", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p2c.py |
from cfg import *
import sympy as sp
# HEAT
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
#p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
# Air flow
[x, y], [u, v, pp] = p.add_neural_network(name="NNflow", inputs=["x", "y"], outputs=["u", "v", "p"])
a_end=5
rect = p.Rectangle("rect_air", (-1,0), (a_end,2))
geo_air = p.GeometryDifference('gd0', rect, geo_solid)
interior_air=p.add_interior_subdomain("interior_air", geom=geo_air, compute_sdf_derivatives=True)
inlet = p.add_boundary_subdomain("inlet", geom=geo_air, criteria=Eq(x,-1))
outlet = p.add_boundary_subdomain("outlet", geom=geo_air, criteria=Eq(x,a_end))
noslip = p.add_boundary_subdomain("noslip", geom=geo_air, criteria= (x>-1) & (x<a_end))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=interior_air))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=interior_air))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=interior_air))
# Adding inlet/outlet Mass balance
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=geo_air,
criteria=Eq(x,-1) | Eq(x,a_end))
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
p.add_constraint("mass_balance_io", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
##########
# Advection Diffusion
##########
[x, y], [Ta] = p.add_neural_network(name="NN_Tair", inputs=["x", "y"], outputs=["Tair"])
# Solid/Air heat transfer
p.add_constraint("TsolidTair_compat", enforce(equation=Eq(T,Ta), on_domain=bdry_solid))
fluxcompat_eq = Eq(5*(nx*T.diff(x)+ny*T.diff(y)), nx*Ta.diff(x)+ny*Ta.diff(y))
p.add_constraint("TsolidTair_flux_compat", enforce(equation=fluxcompat_eq, on_domain=bdry_solid))
# Insulating channel wall
chwalls = p.add_boundary_subdomain("chwalls", geom=geo_air, criteria= (y<=0) | (y>=2))
p.add_constraint("chwalls_grad_c", enforce(equation=Eq(ny*Ta.diff(y),0), on_domain=chwalls))
# Advection Diffusion
D = 0.02 # diffusion coefficient
adv_diff_eq = Eq(u*Ta.diff(x)+v*Ta.diff(y), D*(Ta.diff(x,x)+Ta.diff(y,y)))
p.add_constraint("adv_diff", enforce(equation=adv_diff_eq, on_domain=interior_air))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p3.py |
from cfg import *
[x, y], [u, v, pp] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u", "v", "p"])
w,h = 2,1
r1 = p.Rectangle("r1", (0,0), (w,h))
ch1 = p.Channel2D("ch1", (0,0), (w,h))
inlet = p.add_boundary_subdomain("inlet", geom=r1, criteria=Eq(x,0))
outlet = p.add_boundary_subdomain("outlet", geom=r1, criteria=Eq(x,w))
r_interior = p.add_interior_subdomain("r_interior", geom=r1)
ch_interior = p.add_interior_subdomain("ch_interior", geom=ch1)
noslip = p.add_boundary_subdomain("no_slip", geom=ch1)
# p.add_constraint("c1", enforce(equation=Eq(x,0), on_domain=r_interior))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=ch_interior))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=ch_interior))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=ch_interior))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s2p1b.py |
from cfg import *
import sympy as sp
# HEAT
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
# Air flow
[x, y], [u, v, pp] = p.add_neural_network(name="NNflow", inputs=["x", "y"], outputs=["u", "v", "p"])
a_end=5
rect = p.Rectangle("rect_air", (-1,0), (a_end,2))
geo_air = p.GeometryDifference('gd0', rect, geo_solid)
interior_air=p.add_interior_subdomain("interior_air", geom=geo_air, compute_sdf_derivatives=True)
inlet = p.add_boundary_subdomain("inlet", geom=geo_air, criteria=Eq(x,-1))
outlet = p.add_boundary_subdomain("outlet", geom=geo_air, criteria=Eq(x,a_end))
noslip = p.add_boundary_subdomain("noslip", geom=geo_air, criteria= (x>-1) & (x<a_end))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=interior_air))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=interior_air))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=interior_air))
# Adding inlet/outlet Mass balance
inlet_outlet = p.add_boundary_subdomain("inlet_outlet", geom=geo_air,
criteria=Eq(x,-1) | Eq(x,a_end))
nx = sp.Symbol("normal_x")
ny = sp.Symbol("normal_y")
p.add_constraint("mass_balance_io", enforce(equation=Eq(Integral(nx*u+ny*v, x,y),0),
on_domain=inlet_outlet))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p2c.py |
from cfg import *
import sympy as sp
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p1b.py |
from cfg import *
import sympy as sp
# HEAT
[x, y], [T] = p.add_neural_network(name="NN_Tsolid", inputs=["x", "y"], outputs=[ "Tsolid"])
geo_solid = p.Rectangle("rect", (0,0), (1,1))
interior_solid = p.add_interior_subdomain("interior_solid", geom=geo_solid)
bdry_solid = p.add_boundary_subdomain("bdry_solid", geom=geo_solid, criteria=y>0)
bdry_heat_src = p.add_boundary_subdomain("bdry_heat_src", geom=geo_solid,
criteria=Eq(y,0) & (Abs(x-0.5)<0.1) )
Dsolid = 0.0625
p.add_constraint("diffusion_solid", enforce(equation=Eq(Dsolid*(T.diff(x,x)+T.diff(y,y)),0), on_domain=interior_solid))
p.add_constraint("diffusion_solid_bc", enforce(equation=Eq(T,0), on_domain=bdry_solid))
ny = sp.Symbol("normal_y")
p.add_constraint("heat_source", enforce(equation=Eq(ny*T.diff(y),1), on_domain=bdry_heat_src))
# Air flow
[x, y], [u, v, pp] = p.add_neural_network(name="NNflow", inputs=["x", "y"], outputs=["u", "v", "p"])
a_end=5
rect = p.Rectangle("rect_air", (-1,0), (a_end,2))
geo_air = p.GeometryDifference('gd0', rect, geo_solid)
interior_air=p.add_interior_subdomain("interior_air", geom=geo_air, compute_sdf_derivatives=True)
inlet = p.add_boundary_subdomain("inlet", geom=geo_air, criteria=Eq(x,-1))
outlet = p.add_boundary_subdomain("outlet", geom=geo_air, criteria=Eq(x,a_end))
noslip = p.add_boundary_subdomain("noslip", geom=geo_air, criteria= (x>-1) & (x<a_end))
p.add_constraint("inlet_u", enforce(equation=Eq(u,1), on_domain=inlet))
p.add_constraint("inlet_v", enforce(equation=Eq(v,0), on_domain=inlet))
p.add_constraint("outlet_p", enforce(equation=Eq(pp,0), on_domain=outlet))
p.add_constraint("noslip_u", enforce(equation=Eq(u,0), on_domain=noslip))
p.add_constraint("noslip_v", enforce(equation=Eq(v,0), on_domain=noslip))
nu = 0.02
p.add_constraint("continuity", enforce(equation=Eq(u.diff(x)+v.diff(y),0),
on_domain=interior_air))
p.add_constraint("momentum_x", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(x),
nu*(u.diff(x,x) + u.diff(y,y))),
on_domain=interior_air))
p.add_constraint("momentum_y", enforce(equation=Eq(u*u.diff(x)+v.diff(y)+pp.diff(y),
nu*(v.diff(x,x) + v.diff(y,y))),
on_domain=interior_air))
| modulus-toolchain-master | examples/PINNs/01-PracticeProblems/solutions/s3p2b.py |
from cfg import *
[x, y], sOuts = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u", "v"])
u, v = sOuts
e_xx = p.add_submodel("epsilon_xx", u.diff(x))
e_yy = p.add_submodel("epsilon_yy", v.diff(y))
e_xy = p.add_submodel("epsilon_xy", 0.50 * u.diff(y) + 0.50 * v.diff(x))
# https://www.mathworks.com/matlabcentral/fileexchange/70183-elastic-constitutive-law-plane-stress
E = 1 # 2e9
nu = 0.3
C = E / (1 + nu**2)
sigma_xx = p.add_submodel("sigma_xx", C * (e_xx + nu * e_yy))
sigma_yy = p.add_submodel("sigma_yy", C * (nu * e_xx + e_yy))
sigma_xy = p.add_submodel("sigma_xy", C * (1 - nu) * e_xy)
from sympy import Symbol
n_x, n_y, n_z = Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z")
traction_x = n_x * sigma_xx + n_y * sigma_xy
traction_y = n_x * sigma_xy + n_y * sigma_yy
# Geometry
r_sz = 200
g_inner = p.Circle("inner_c", (0, 0), 10)
g_outer = p.Rectangle("outer_c", (-r_sz, -r_sz), (r_sz, r_sz))
geom = p.GeometryDifference("geom", g_outer, g_inner)
pressure = p.add_submodel(
"traction_dot_normal", n_x * (traction_x) + n_y * (traction_y)
)
inner_solid = p.add_interior_subdomain("inner_solid", geom=geom)
equilibrium_x = Eq(sigma_xx.diff(x) + sigma_xy.diff(y), 0)
equilibrium_y = Eq(sigma_xy.diff(x) + sigma_yy.diff(y), 0)
p.add_constraint(
"equilibrium_x", enforce(equation=equilibrium_x, on_domain=inner_solid)
)
p.add_constraint(
"equilibrium_y", enforce(equation=equilibrium_y, on_domain=inner_solid)
)
inner_mesh_bdry = p.add_boundary_subdomain("inner_mesh", geom=g_inner)
# p.add_constraint("inner_traction", enforce(equation=Eq(pressure, 0),
# on_domain=inner_mesh_bdry))
p.add_constraint(
"inner_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=inner_mesh_bdry)
)
p.add_constraint(
"inner_traction_y", enforce(equation=Eq(traction_y, 0), on_domain=inner_mesh_bdry)
)
top_bdry = p.add_boundary_subdomain("top_bdry", geom=g_outer, criteria=Eq(y, r_sz))
p.add_constraint(
"top_traction_y", enforce(equation=Eq(traction_y, 1), on_domain=top_bdry)
)
p.add_constraint(
"top_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=top_bdry)
)
bottom_bdry = p.add_boundary_subdomain(
"bottom_bdry", geom=g_outer, criteria=Eq(y, -r_sz)
)
p.add_constraint(
"bottom_traction_y", enforce(equation=Eq(traction_y, -1), on_domain=bottom_bdry)
)
p.add_constraint(
"bottom_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=bottom_bdry)
)
left_right_bdry = p.add_boundary_subdomain(
"left_right_bdry", geom=g_outer, criteria=Or(Eq(x, r_sz), Eq(x, -r_sz))
)
p.add_constraint(
"left_right_traction_x",
enforce(equation=Eq(traction_x, 0), on_domain=left_right_bdry),
)
p.add_constraint(
"left_right_traction_y",
enforce(equation=Eq(traction_y, 0), on_domain=left_right_bdry),
)
| modulus-toolchain-master | examples/PINNs/02-StructuralAnalysis/struct_2d_problem.py |
from cfg import *
[x, y, z, R], sOuts = p.add_neural_network(
name="NN", inputs=["x", "y", "z", "R"], outputs=["u", "v", "w"]
)
u, v, w = sOuts
params = {R: (10, 20)}
e_xx = p.add_submodel("epsilon_xx", u.diff(x))
e_yy = p.add_submodel("epsilon_yy", v.diff(y))
e_zz = p.add_submodel("epsilon_zz", w.diff(z))
e_xy = p.add_submodel("epsilon_xy", 0.50 * u.diff(y) + 0.50 * v.diff(x))
e_yz = p.add_submodel("epsilon_yz", 0.50 * v.diff(z) + 0.50 * w.diff(y))
e_zx = p.add_submodel("epsilon_zx", 0.50 * w.diff(x) + 0.50 * u.diff(z))
E = 1 # 2e9
nu = 0.3
C = E / (1 + nu**2)
mu = E / ((1 + nu) * 2)
C11 = (E * (1 - nu)) / ((1 + nu) * (1 - 2 * nu))
C12 = (E * nu) / ((1 + nu) * (1 - 2 * nu))
sigma_xx = p.add_submodel("sigma_xx", C11 * e_xx + C12 * e_yy + C12 * e_zz)
sigma_yy = p.add_submodel("sigma_yy", C12 * e_xx + C11 * e_yy + C12 * e_zz)
sigma_zz = p.add_submodel("sigma_zz", C12 * e_xx + C12 * e_yy + C11 * e_zz)
sigma_yz = p.add_submodel("sigma_yz", mu * 2 * e_yz)
sigma_zx = p.add_submodel("sigma_zx", mu * 2 * e_zx)
sigma_xy = p.add_submodel("sigma_xy", mu * 2 * e_xy)
# refer to : https://en.wikiversity.org/wiki/Elasticity/Constitutive_relations
from sympy import Symbol
n_x, n_y, n_z = Symbol("normal_x"), Symbol("normal_y"), Symbol("normal_z")
traction_x = n_x * sigma_xx + n_y * sigma_xy + n_z * sigma_zx
traction_y = n_x * sigma_xy + n_y * sigma_yy + n_z * sigma_yz
traction_z = n_x * sigma_zx + n_y * sigma_yz + n_z * sigma_zz
# Geometry
r_sz = 200 * 1.0
g_inner = p.Sphere("inner_c", (0, 0, 0), R, params=params)
g_outer = p.Box("outer_c", (-r_sz, -r_sz, -r_sz), (r_sz, r_sz, r_sz))
geom = p.GeometryDifference("geom", g_outer, g_inner)
pressure = p.add_submodel(
"traction_dot_normal", n_x * (traction_x) + n_y * (traction_y) + n_z * (traction_z)
)
# ----- Dirichlet
rect = p.Rectangle("rect", (-r_sz, -r_sz), (r_sz, r_sz))
bdry = p.add_boundary_subdomain(
"bdry", geom=rect, params={z: float(r_sz), **params}, criteria=Eq(y, r_sz)
)
p.add_constraint("bdry_w0", enforce(equation=Eq(w, 0), on_domain=bdry))
# ------
inner_solid = p.add_interior_subdomain("inner_solid", geom=geom, params=params)
equilibrium_x = Eq(sigma_xx.diff(x) + sigma_xy.diff(y) + sigma_zx.diff(z), 0)
equilibrium_y = Eq(sigma_xy.diff(x) + sigma_yy.diff(y) + sigma_yz.diff(z), 0)
equilibrium_z = Eq(sigma_zx.diff(x) + sigma_yz.diff(y) + sigma_zz.diff(z), 0)
p.add_constraint(
"equilibrium_x", enforce(equation=equilibrium_x, on_domain=inner_solid)
)
p.add_constraint(
"equilibrium_y", enforce(equation=equilibrium_y, on_domain=inner_solid)
)
p.add_constraint(
"equilibrium_z", enforce(equation=equilibrium_z, on_domain=inner_solid)
)
inner_mesh_bdry = p.add_boundary_subdomain("inner_mesh", geom=g_inner, params=params)
# p.add_constraint("inner_traction", enforce(equation=Eq(pressure, 0),
# on_domain=inner_mesh_bdry))
p.add_constraint(
"inner_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=inner_mesh_bdry)
)
p.add_constraint(
"inner_traction_y", enforce(equation=Eq(traction_y, 0), on_domain=inner_mesh_bdry)
)
p.add_constraint(
"inner_traction_z", enforce(equation=Eq(traction_z, 0), on_domain=inner_mesh_bdry)
)
top_bdry = p.add_boundary_subdomain(
"top_bdry", geom=g_outer, criteria=Eq(y, r_sz), params=params
)
p.add_constraint(
"top_traction_y", enforce(equation=Eq(traction_y, 1), on_domain=top_bdry)
)
p.add_constraint(
"top_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=top_bdry)
)
p.add_constraint(
"top_traction_z", enforce(equation=Eq(traction_z, 0), on_domain=top_bdry)
)
bottom_bdry = p.add_boundary_subdomain(
"bottom_bdry", geom=g_outer, criteria=Eq(y, -r_sz), params=params
)
p.add_constraint(
"bottom_traction_y", enforce(equation=Eq(traction_y, -1), on_domain=bottom_bdry)
)
p.add_constraint(
"bottom_traction_x", enforce(equation=Eq(traction_x, 0), on_domain=bottom_bdry)
)
p.add_constraint(
"bottom_traction_z", enforce(equation=Eq(traction_z, 0), on_domain=bottom_bdry)
)
left_right_bdry = p.add_boundary_subdomain(
"left_right_bdry",
geom=g_outer,
criteria=Or(Eq(x, r_sz), Eq(x, -r_sz), Eq(z, r_sz), Eq(z, -r_sz)),
params=params,
)
p.add_constraint(
"left_right_traction_x",
enforce(equation=Eq(traction_x, 0), on_domain=left_right_bdry),
)
p.add_constraint(
"left_right_traction_y",
enforce(equation=Eq(traction_y, 0), on_domain=left_right_bdry),
)
p.add_constraint(
"left_right_traction_z",
enforce(equation=Eq(traction_z, 0), on_domain=left_right_bdry),
)
| modulus-toolchain-master | examples/PINNs/02-StructuralAnalysis/struct_3d_problem.py |
modulus-toolchain-master | mtc/__init__.py |
|
from sympy import Symbol, Function, Or, And, Eq, Abs, Integral, expand
import sympy
import os, sys
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
class PDParam:
def __init__(self, geom, param_ranges=None):
self._geom = geom
self._params = param_ranges
def sample_interior(self, *args, **kwargs):
d = self._geom.sample_interior(*args, **kwargs)
shp = d["x"].shape
nr = shp[0]
for param, rng in self._params.items():
data = np.random.rand(nr).reshape(shp)
delta = rng[1] - rng[0]
d[param] = data * delta + rng[0]
return d
def sample_boundary(self, *args, **kwargs):
d = self._geom.sample_boundary(*args, **kwargs)
for k, v in d.items():
d[k] = np.vstack([v, v, v])
shp = d["x"].shape
nr = shp[0]
for param, rng in self._params.items():
data = np.random.rand(nr).reshape(shp)
delta = rng[1] - rng[0]
d[param] = data * delta + rng[0]
return d
class InteriorSubdomain:
def __init__(self, geom, criteria):
self._geom = geom
self._criteria = criteria
def sample(self, *args, **kwargs):
self._geom.sample_interior(*args, **kwargs)
class BoundarySubdomain:
def __init__(self, geom, criteria):
self._geom = geom
self._criteria = criteria
def sample(self, *args, **kwargs):
self._geom.sample_boundary(*args, **kwargs)
def get_variables(self):
d = {
"normal_x": 1,
"normal_y": 1,
"normal_z": 1,
} # self._geom.sample_boundary(1)
return {v: Symbol(v) for v in d.keys()}
def enforce(equation=None, on_domain=None):
return {"equation": equation, "on_domain": on_domain}
class PINNProblem:
def __init__(self, name="PINN problem", cfg=None):
self._problem_name = name
self._vars = {} # str -> sympy.Symbol
self._nns = {}
self._nn_outs = set()
self.domain = None # Domain()
self._nodes = []
self._model = []
self._submodels = {}
self._constraints = {}
self._data_constraints = {}
self._geom = {}
self._interior_subdomains = {}
self._boundary_subdomains = {}
self._no_modulus_main = False
def load_conf(self):
import yaml
with open(os.path.join("conf", "config.yaml")) as f:
conf = yaml.safe_load(f)
return conf
def save_conf(self, conf):
import yaml
with open(os.path.join("conf", "config.yaml"), "w") as f:
yaml.safe_dump(conf, f)
def to_hdf(self, hdf_fname, data):
import h5py
with h5py.File(hdf_fname, "w") as f:
for k, v in data.items():
f.create_dataset(k, data=v)
def init_config(self, only1storder=False, max_steps=1000):
if only1storder:
self.compile_to_firstorder()
conf = self.load_conf()
def mkdeqc(dc):
d = {k: str(v) for k, v in dc.items()}
if dc["on_domain"] in self._interior_subdomains:
d["domain_type"] = "interior"
else:
d["domain_type"] = "boundary"
d["batch_size"] = 1000
return d
def make_training_stages():
d = {
"stage-dag": [],
"stages": {
"stage1": {
"description": "Default stage",
"data": {},
},
},
}
return d
# if "modulus_project" not in conf:
nn_type = "fully_connected"
# nn_type = "fourier_net"
nn = load_yaml(os.path.join("..", "mpc", "mpc", "config_types.yaml"))["arch"][
nn_type
]
conf["modulus_project"] = {
"project_name": self._problem_name,
"submodels": {k: str(v) for k, v in self._submodels.items()},
"equation_constraints": {
k: mkdeqc(v) for k, v in self._constraints.items()
},
"neural_networks": {
nn_name: {
"nn_type": nn_type,
"_target_": nn["_target_"],
**{k: v["default"] for k, v in nn.items() if k != "_target_"},
}
for nn_name in self._nns
},
}
conf["modulus_project"]["training"] = make_training_stages()
self.save_conf(conf)
from mtc.config_utils import customize_schema, config2dictV2
conf["modulus_project"]["training"]["stages"]["stage1"]["data"] = config2dictV2(
customize_schema()
)
s1data = conf["modulus_project"]["training"]["stages"]["stage1"]["data"]
s1data["training"]["max_steps"] = max_steps
self.save_conf(conf)
def get_variables(self, name):
assert (
name in self._boundary_subdomains
), "variable must be a boundary subdomain"
d = {
"normal_x": 1,
"normal_y": 1,
"normal_z": 1,
} # self._geom.sample_boundary(1)
return {v: Symbol(v) for v in d.keys()}
def GeometryCustomWarp(self, name, code_str, func, param_list, params=None):
self._custom_warp_code = code_str
assert name not in self._geom
self._geom[name] = {
"type": "GeometryCustomWarp",
"args": param_list,
"func": func,
"params": params,
}
return name
def Rectangle(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Rectangle",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Box(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Box",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Line2D(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Line",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Channel2D(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Channel2D",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Polygon(self, name, line, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Polygon",
"args": (line,),
"params": params,
"rotate": rotate,
}
return name
def Circle(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Circle",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Sphere(self, name, a, b, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Sphere",
"args": (a, b),
"params": params,
"rotate": rotate,
}
return name
def Cylinder(self, name, center, radius, height, params=None, rotate=None):
assert name not in self._geom
self._geom[name] = {
"type": "Cylinder",
"args": (center, radius, height),
"params": params,
"rotate": rotate,
}
return name
def Line1D(self, name, a, b, params=None):
assert name not in self._geom
self._geom[name] = {"type": "Line1D", "args": (a, b), "params": params}
return name
def Point1D(self, name, a, params=None):
assert name not in self._geom
self._geom[name] = {"type": "Point1D", "args": (a,), "params": params}
return name
def GeometryFromSTL(self, name, fname, airtight=False):
assert name not in self._geom
self._geom[name] = {"type": "STL", "args": (fname, airtight)}
return name
def GeometryDifference(self, name, g, og):
assert g in self._geom
assert og in self._geom
self._geom[name] = {"type": "GeometryDifference", "g": g, "og": og}
return name
def GeometryUnion(self, name, g, og):
assert g in self._geom
assert og in self._geom
self._geom[name] = {"type": "GeometryUnion", "g": g, "og": og}
return name
def GeometryIntersection(self, name, g, og):
assert g in self._geom
assert og in self._geom
self._geom[name] = {"type": "GeometryIntersection", "g": g, "og": og}
return name
def CustomGeometry(
self, name, module_path, module_name, class_name, *args, **kwargs
):
self._geom[name] = {
"type": "CustomGeometry",
"module_path": module_path,
"module_name": module_name,
"class_name": class_name,
"args": args,
"kwargs": kwargs,
}
return name
def compile_to_firstorder(self):
print("compile_to_firstorder")
# collect the unknown functions for which second and higher
# order derivatives are used
# collect all unknown functions across all NN definitions
u_vars = [u for d in self._nns.values() for u in d["invars"]]
u_fns = [u for d in self._nns.values() for u in d["outvars"]]
# print(u_fns)
ufn2order = {u: {v: 0 for v in u_vars} for u in u_fns}
expanded_constraints = {}
for cname, eq_constraint in self._constraints.items():
eq = eq_constraint["equation"]
eq = eq.lhs - eq.rhs
# expand submodels in equation
for sname, expr in self._submodels.items():
sm_fn = Function(sname)(*self._vars.values())
eq = sympy.simplify(eq.replace(sm_fn, expr))
expanded_constraints[cname] = {
"equation": Eq(eq, 0),
"on_domain": eq_constraint["on_domain"],
}
for u in u_fns:
if len(eq.find(u)) > 0:
for o in range(1, 5):
for v in ufn2order[u]:
if eq.find(u.diff(v, o)):
ufn2order[u][v] = max(o, ufn2order[u][v])
# print("\n", expanded_constraints)
# print()
# print(self._submodels)
# print()
# for u, uo in ufn2order.items():
# print(u, ":", uo)
# collect all required auxiliary unknown functions
aux_ufns = []
current_fn2aux_fn = {}
aux_compat = {} # compatibility constraints; e.g., d_dx1_u = u.diff(x)
for u, v2o in ufn2order.items():
for v, o in v2o.items():
if o > 1:
fnname = str(type(u))
new_ufns = [f"d_d{v}{i+1}_{fnname}" for i in range(o - 1)]
aux_ufns += new_ufns
current_fn2aux_fn[u.diff(v)] = Function(new_ufns[0])(
*self._vars.values()
)
for io in range(o - 1):
e = u.diff(v, io + 1 + 1)
sname = new_ufns[io]
current_fn2aux_fn[e] = Function(sname)(
*self._vars.values()
).diff(v)
ufn = u
new_u = Function(new_ufns[0])(*self._vars.values())
aux_compat[new_u] = Eq(ufn.diff(v), new_u)
for sname in new_ufns:
new_u = Function(sname)(*self._vars.values())
aux_compat[new_u.diff(v)] = Eq(ufn.diff(v), new_u)
ufn = new_u
# print("\nNeed new aux u funs", aux_ufns)
# print(current_fn2aux_fn)
# print(aux_compat)
# add new ufuncs
for aufunc in aux_ufns:
self.add_neural_network(
name=f"{aufunc}NN", inputs=[v for v in self._vars], outputs=[aufunc]
)
# now rewrite the equations using the new auxiliary unknown functions
rewritten_constraints = {}
new_constraints = set()
ci = 0
handled = []
for cname, eq_c in expanded_constraints.items():
eq = eq_c["equation"]
for cur_fn, new_fn in current_fn2aux_fn.items():
if eq.find(cur_fn):
eq = eq.replace(cur_fn, new_fn)
new_constraints.add(aux_compat[new_fn])
rewritten_constraints[cname] = {
"equation": eq,
"on_domain": eq_c["on_domain"],
}
eqc_str = str(aux_compat[new_fn]) + str(eq_c["on_domain"])
if eqc_str not in handled:
handled += [eqc_str]
ci += 1
rewritten_constraints[cname + f"{ci}"] = {
"equation": aux_compat[new_fn],
"on_domain": eq_c["on_domain"],
}
from pprint import pprint
# print("rewritten_constraints")
# pprint(rewritten_constraints)
for k, v in rewritten_constraints.items():
self._constraints[k] = v
def compile_symbols(self):
sl = ["", "# " + "-" * 40, "# Symbols for variables", ""]
for v in self._vars:
sl += [f"{v} = Symbol('{v}')"]
return "\n".join(sl)
def compile_neural_networks(self):
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
# conf = load_yaml("../mpc/mpc/config_types_v2.yaml")
conf = load_yaml("config_types_v2.yaml")
sl = ["", "# " + "-" * 40, "# Neural Networks", ""]
pvars = ",".join(self._vars.keys())
for nnname, nn in self._nns.items():
for ov in nn["outputs"]:
sl += [f"{ov} = Function('{ov}')({pvars})"]
sl += [""]
for nnname, nn in self._nns.items():
ins = ",".join(f"Key('{v}')" for v in nn["inputs"])
outs = ",".join(f"Key('{v}')" for v in nn["outputs"])
# nn_conf = self._conf["modulus_project"]["neural_networks"][nnname]
nn_conf = self._conf["modulus_project"]["training"]["stages"][
self._stage_id
]["data"][nnname]
nn_target = nn_conf["_target_"].split(".")
NNArch = nn_target[-1]
sl += ["from " + ".".join(nn_target[:-1]) + " import " + NNArch]
def get_hint(k):
try:
return conf["arch"]["choices"][nn_conf["__selected__"]][k]["hint"]
except:
return ""
other_nn_args = "\n ".join(
[
f"{k}={v}, # {get_hint(k)}"
for k, v in nn_conf.items()
if k not in ["_target_", "nn_type"] and not k.startswith("__")
]
)
s = f"""net = {NNArch}(
input_keys= [{ins}],
output_keys=[{outs}],
{other_nn_args}
)"""
sl += [s]
nntrainable = self._conf["modulus_project"]["training"]["stages"][
self._stage_id
]["data"]["Neural Networks"][f"{nnname} Trainable"]
sl += [
f"nodes += [net.make_node(name='{nnname}', jit=cfg.jit, optimize={nntrainable})]"
]
sl += [""]
return "\n".join(sl)
def compile_submodels(self):
sl = ["", "# " + "-" * 40, "# SubModels", ""]
for smname, expr in self._submodels.items():
sl += [f"{smname} = sympy.sympify('{expr}')"]
sl += [f"nodes += [Node.from_sympy({smname}, '{smname}')]", ""]
return "\n".join(sl)
def compile_geometries(self):
sl = ["", "# " + "-" * 40, "# Geometries", ""]
for gname, g in self._geom.items():
if g["type"] == "CustomGeometry":
# HACK -- Move module code to training/stageX
import os
path = os.path.join("training", self._stage_id)
orig_module = os.path.join(g["module_path"], g["module_name"] + ".py")
os.system(f"cp {orig_module} {path}")
# End HACK
sl += [f"from {g['module_name']} import {g['class_name']}"]
print(g["args"])
args = "(" + ",".join(list(g["args"])) + ")"
sl += [f"{gname} = {g['class_name']}{args}"]
continue
if g["type"] == "GeometryDifference":
sl += [f"{gname}={g['g']}-{g['og']}"]
elif g["type"] == "GeometryUnion":
sl += [f"{gname}={g['g']}+{g['og']}"]
elif g["type"] == "GeometryIntersection":
sl += [f"{gname}={g['g']} & {g['og']}"]
elif g["type"] == "STL":
fname, airtight = g["args"]
sl += [
"from modulus.sym.geometry.tessellation import Tessellation",
f"{gname}=Tessellation.from_stl('{fname}', airtight={airtight}) ",
]
else: # CSG like Line1D, Circle, Box, etc.
rotate_str = ""
if "rotate" in g and g["rotate"] is not None:
rotate_str = f".rotate{g['rotate']}"
if "params" in g and g["params"] is not None:
sl += [
"from modulus.sym.geometry.parameterization import Parameterization, Parameter"
]
# pstr = "Parameterization({"
# for k, v in g["params"].items():
# pstr += f'Parameter("{k}"): {v}, '
# pstr += "})"
# # pstr = f"Parameterization({g['params']})"
# rotate_str = rotate_str[:-1] + f", parameterization={pstr})"
if "params" in g and g["params"] is not None:
sl += [
"from modulus.sym.geometry.parameterization import Parameterization"
]
pstr = f"Parameterization({g['params']})"
args = (
"("
+ ",".join([str(e) for e in g["args"]])
+ f", parameterization={pstr})"
)
sl += [f"{gname}={g['type']}{args}{rotate_str}"]
else:
sl += [f"{gname}={g['type']}{g['args']}{rotate_str}"]
# if g["type"] != "GeometryDifference":
# sl += [f"{gname}={g['type']}{g['args']}"]
# else:
# sl += [f"{gname}={g['g']}-{g['og']}"]
return "\n".join(sl)
def compile_interior_subdomains(self):
sl = ["", "# " + "-" * 40, "# Interior SubDomains", ""]
return "\n".join(sl)
def compile_boundary_subdomains(self):
sl = ["", "# " + "-" * 40, "# Boundary SubDomains", ""]
return "\n".join(sl)
def compile_equations(self):
sl = ["", "# " + "-" * 40, "# Equations", ""]
for cname, c in self._constraints.items():
if isinstance(c["equation"].lhs, Integral):
eqs = str(c["equation"].lhs.args[0])
sl += [f"eq=sympy.sympify('{eqs}')"]
sl += [f"{cname} = eq"]
sl += [f"nodes += [Node.from_sympy({cname}, '{cname}')]"]
else:
eqs = str(c["equation"])
sl += [f"eq=sympy.sympify('{eqs}')"]
sl += [f"{cname} = eq.rhs - eq.lhs"]
sl += [f"nodes += [Node.from_sympy({cname}, '{cname}')]"]
# self._nodes += [Node.from_sympy(eq.lhs - eq.rhs, str(c_name))]
sl += [""]
sl += [""]
return "\n".join(sl)
def compile_equation_constraints(self):
sl = ["", "# " + "-" * 40, "# Equation Constraints", ""]
from pprint import pprint
eq_cstr = self._conf["modulus_project"]["training"]["stages"][self._stage_id][
"data"
]["Equation Constraints"]
def validate_lambda_weighting_interior(cname, c, ls):
import sys
unavailable = ["normal_x", "normal_y", "normal_z"]
for v in unavailable:
if v in ls:
print(
f"[error][constraint: {cname}] variable `{v}` not available in Interior sub-domain, only in Boundary subdomains."
)
sys.exit(1)
def validate_lambda_weighting_bdry(cname, c, ls):
import sys
if "sdf" in ls:
print(
f"[error][constraint: {cname}] variable `sdf` not available in Boundary sub-domain"
)
sys.exit(1)
for cname, c in self._constraints.items():
if not eq_cstr[cname]["include"]:
continue
domain = c["on_domain"]
if c["on_domain"] in self._interior_subdomains:
sd = self._interior_subdomains[domain]
geom = sd["geom"]
ps = "None"
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
geom = f"PDParam(geom={geom}, param_ranges={ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
outvar = "{" + f"'{cname}': 0" + "}"
validate_lambda_weighting_interior(
cname, c, eq_cstr[cname]["lambda_weighting"]
)
s = f"""
pic = PointwiseInteriorConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
compute_sdf_derivatives={sd["compute_sdf_derivatives"]},
lambda_weighting={{'{cname}': sympy.sympify('{eq_cstr[cname]["lambda_weighting"]}')}}
)
domain.add_constraint(pic, '{cname}')
"""
sl += [s]
if c["on_domain"] in self._boundary_subdomains:
sd = self._boundary_subdomains[domain]
geom = sd["geom"]
ps = "None"
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
geom = f"PDParam(geom={geom}, param_ranges={ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
outvar = "{" + f"'{cname}': 0" + "}"
# check for errors in lambda_weightint expression
validate_lambda_weighting_bdry(
cname, c, eq_cstr[cname]["lambda_weighting"]
)
if isinstance(c["equation"].lhs, Integral):
eq = c["equation"]
print("Integral constraint")
print(c)
lambda_exp = eq_cstr[cname]["lambda_weighting"]
lambda_w = "{" + f"'{cname}': sympy.sympify('{lambda_exp}'), " + "}"
outvar = "{" + f"'{cname}': {eq.rhs}" + "}"
s = f"""
ibc = IntegralBoundaryConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size=1,
integral_batch_size={eq_cstr[cname]["batch_size"]},
outvar={outvar},
lambda_weighting={lambda_w}
)
domain.add_constraint(ibc, '{cname}')
"""
sl += [s]
continue
s = f"""
pbc = PointwiseBoundaryConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
lambda_weighting={{'{cname}': sympy.sympify('{eq_cstr[cname]["lambda_weighting"]}')}}
)
domain.add_constraint(pbc, '{cname}')
"""
sl += [s]
return "\n".join(sl)
def compile_equation_constraints_opt(self):
sl = ["", "# " + "-" * 40, "# Equation Constraints", ""]
from pprint import pprint
print("compile_equation_constraints_opt")
eq_cstr = self._conf["modulus_project"]["training"]["stages"][self._stage_id][
"data"
]["Equation Constraints"]
def validate_lambda_weighting_interior(cname, c, ls):
import sys
unavailable = ["normal_x", "normal_y", "normal_z"]
for v in unavailable:
if v in ls:
print(
f"[error][constraint: {cname}] variable `{v}` not available in Interior sub-domain, only in Boundary subdomains."
)
sys.exit(1)
def validate_lambda_weighting_bdry(cname, c, ls):
import sys
if "sdf" in ls:
print(
f"[error][constraint: {cname}] variable `sdf` not available in Boundary sub-domain"
)
sys.exit(1)
## group constraints
c_dict = {}
for cname, c in self._constraints.items():
if not eq_cstr[cname]["include"]:
continue
domain = c["on_domain"]
batch_size = eq_cstr[cname]["batch_size"]
k = (domain, batch_size)
if k not in c_dict:
c_dict[k] = []
c_dict[k] += [(cname, c)]
pprint({k: len(c_dict[k]) for k in c_dict.keys()})
##
for (domain, batch_size), constraints in c_dict.items():
# for cname, c in self._constraints.items():
# if not eq_cstr[cname]["include"]:
# continue
parameterization = ""
if domain in self._interior_subdomains:
sd = self._interior_subdomains[domain]
geom = sd["geom"]
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
# geom = f"PDParam(geom={geom}, param_ranges={ps})"
ps = (
"{"
+ ",".join(
[f"Parameter('{k}'):{v}" for k, v in sd["params"].items()]
)
+ "}"
)
parameterization = f", parameterization=Parameterization({ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
outvar = "{" + f"'{cname}': 0" + "}"
outvar = "{"
lambda_w = "{"
for cname, c in constraints:
lambda_exp = eq_cstr[cname]["lambda_weighting"]
validate_lambda_weighting_interior(cname, c, lambda_exp)
outvar += f"'{cname}': 0, "
lambda_w += f"'{cname}': sympy.sympify('{lambda_exp}'), "
outvar += "}"
lambda_w += "}"
s = f"""
pic = PointwiseInteriorConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
compute_sdf_derivatives={sd["compute_sdf_derivatives"]},
lambda_weighting={lambda_w}{parameterization}
)
domain.add_constraint(pic, '{cname}')
"""
sl += [s]
if domain in self._boundary_subdomains:
sd = self._boundary_subdomains[domain]
geom = sd["geom"]
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
# geom = f"PDParam(geom={geom}, param_ranges={ps})"
ps = (
"{"
+ ",".join(
[f"Parameter('{k}'):{v}" for k, v in sd["params"].items()]
)
+ "}"
)
parameterization = f", parameterization=Parameterization({ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
cname, c = constraints[0]
if isinstance(c["equation"].lhs, Integral):
eq = c["equation"]
print("Integral constraint")
print(c)
lambda_exp = eq_cstr[cname]["lambda_weighting"]
lambda_w = "{" + f"'{cname}': sympy.sympify('{lambda_exp}'), " + "}"
outvar = "{" + f"'{cname}': {eq.rhs}" + "}"
s = f"""
ibc = IntegralBoundaryConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size=1,
integral_batch_size={eq_cstr[cname]["batch_size"]},
outvar={outvar},
lambda_weighting={lambda_w}{parameterization}
)
domain.add_constraint(ibc, '{cname}')
"""
sl += [s]
continue
outvar = "{" + f"'{cname}': 0" + "}"
outvar = "{"
lambda_w = "{"
for cname, c in constraints:
lambda_exp = eq_cstr[cname]["lambda_weighting"]
validate_lambda_weighting_interior(cname, c, lambda_exp)
outvar += f"'{cname}': 0, "
lambda_w += f"'{cname}': sympy.sympify('{lambda_exp}'), "
outvar += "}"
lambda_w += "}"
s = f"""
pbc = PointwiseBoundaryConstraint(
nodes=nodes,
geometry={geom},
criteria={criteria},
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
lambda_weighting={lambda_w}{parameterization}
)
domain.add_constraint(pbc, '{cname}')
"""
sl += [s]
return "\n".join(sl)
def compile_equation_constraints_sampled(self):
sl = ["", "# " + "-" * 40, "# Equation Constraints Sampled", ""]
sl += ["import h5py # to load HDF5 samples"]
from pprint import pprint
eq_cstr = self._conf["modulus_project"]["training"]["stages"][self._stage_id][
"data"
]["Equation Constraints"]
for cname, c in self._constraints.items():
if not eq_cstr[cname]["include"]:
continue
domain = c["on_domain"]
if c["on_domain"] in self._interior_subdomains:
sd = self._interior_subdomains[domain]
geom = sd["geom"]
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
geom = f"PDParam(geom={geom}, param_ranges={ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
# vlist = list(self._vars) + ["sdf"]
# invar = "{" + ", ".join([f"'{v}': dv['{v}'][:]" for v in vlist]) + "}"
outvar = "{" + f"'{cname}': np.zeros_like(dv['x'])" + "}"
fname = f"../samples/{domain}.hdf5"
# fname = f"../samples/{cname}.hdf5"
s = f"""
dv = dict()
with h5py.File('{fname}', 'r') as f:
for k in f.keys():
dv[k] = f[k][:]
if k == 'area':
dv[k] *= 1000 # HACK
pic = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=dv,
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
)
domain.add_constraint(pic, '{cname}')
"""
sl += [s]
if c["on_domain"] in self._boundary_subdomains:
sd = self._boundary_subdomains[domain]
geom = sd["geom"]
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
geom = f"PDParam(geom={geom}, param_ranges={ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
# invar = "{" + ", ".join([f"'{v}': dv['{v}'][:]" for v in vlist]) + "}"
outvar = "{" + f"'{cname}': np.zeros_like(dv['x'])" + "}"
fname = f"../samples/{domain}.hdf5"
# fname = f"../samples/{cname}.hdf5"
s = f"""
dv = dict()
with h5py.File('{fname}', 'r') as f:
for k in f.keys():
dv[k] = f[k][:]
if k == 'area':
dv[k] *= 1000 # HACK
pic = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=dv,
batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
outvar={outvar},
)
domain.add_constraint(pic, '{cname}')
"""
# s = f"""
# pbc = PointwiseBoundaryConstraint(
# nodes=nodes,
# geometry={geom},
# criteria={criteria},
# batch_size={eq_cstr[cname]["batch_size"]}, #cfg.modulus_project.equation_constraints.{cname}.batch_size,
# outvar={outvar},
# )
# domain.add_constraint(pbc, '{cname}')
# """
sl += [s]
return "\n".join(sl)
def compile_equation_constraints_sampler(self):
sl = ["", "# " + "-" * 40, "# Equation Constraints Sampler", ""]
sl += ["import time, os", "ecs_t0 = time.time()"]
sl += ["if not os.path.exists('samples'):", " os.system('mkdir samples')"]
from pprint import pprint
eq_cstr = self._conf["modulus_project"]["training"]["stages"][self._stage_id][
"data"
]["Equation Constraints"]
for cname, c in self._constraints.items():
if not eq_cstr[cname]["include"]:
continue
domain = c["on_domain"]
if c["on_domain"] in self._interior_subdomains:
sd = self._interior_subdomains[domain]
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
geom = sd["geom"]
parameterization = ""
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
# geom = f"PDParam(geom={geom}, param_ranges={ps})"
ps = (
"{"
+ ",".join(
[f"Parameter('{k}'):{v}" for k, v in sd["params"].items()]
)
+ "}"
)
parameterization = f", parameterization=Parameterization({ps})"
outvar = "{" + f"'{cname}': 0" + "}"
nr = eq_cstr[cname]["batch_size"] * 1000
# fname = f"samples/{cname}.hdf5"
fname = f"samples/{domain}.hdf5"
s = f"""
import h5py
from modulus.sym.geometry.parameterization import Parameterization, Parameter
subdomain = {geom}
samples = subdomain.sample_interior({nr}, criteria={criteria}, compute_sdf_derivatives={sd["compute_sdf_derivatives"]}{parameterization})
with h5py.File("{fname}", "w") as f:
for k,v in samples.items():
f.create_dataset(k, data = v)
print(f'wrote {fname}')
"""
sl += [s]
if c["on_domain"] in self._boundary_subdomains:
sd = self._boundary_subdomains[domain]
geom = sd["geom"]
parameterization = ""
if sd["params"] is not None:
ps = (
"{"
+ ",".join([f"'{k}':{v}" for k, v in sd["params"].items()])
+ "}"
)
# geom = f"PDParam(geom={geom}, param_ranges={ps})"
ps = (
"{"
+ ",".join(
[f"Parameter('{k}'):{v}" for k, v in sd["params"].items()]
)
+ "}"
)
parameterization = f", parameterization=Parameterization({ps})"
criteria = sd["criteria"]
if criteria is not None:
criteria = f"sympy.sympify('{str(criteria)}')"
outvar = "{" + f"'{cname}': 0" + "}"
nr = eq_cstr[cname]["batch_size"] * 1000
fname = f"samples/{domain}.hdf5"
# fname = f"samples/{cname}.hdf5"
s = f"""
import h5py
from modulus.sym.geometry.parameterization import Parameterization, Parameter
subdomain = {geom}
t0=time.time()
samples = subdomain.sample_boundary({nr}, criteria={criteria}{parameterization})
t1=time.time()
print(f"sampled {cname} in", t1-t0, "s")
with h5py.File("{fname}", "w") as f:
for k,v in samples.items():
f.create_dataset(k, data = v)
print(f'wrote {fname}')
"""
sl += [s]
sl += [
"ecs_t1 = time.time()",
"print('sampled in ', f'{ecs_t1-ecs_t0:.3f}s')",
]
return "\n".join(sl)
def compile_data_constraints(self):
sl = ["", "# " + "-" * 40, "# Data Constraints", ""]
for cname, v in self._data_constraints.items():
data_cstr = self._conf["modulus_project"]["training"]["stages"][
self._stage_id
]["data"]["dconstraint"]
invars = ",".join([f"'{v}': f['{v}']" for v in self._vars])
invars = "{" + invars + "}"
s = f"""
import h5py
with h5py.File("{v['data_fname']}", 'r') as f:
pbc = PointwiseConstraint.from_numpy(
nodes=nodes,
invar={invars},
batch_size=int(np.min([f['x'].shape[0], {data_cstr['batch_size']}])),
outvar={{'{v['outvar']}':f["{v['outvar']}"]}},
)
domain.add_constraint(pbc, '{v["data_fname"]}')
"""
sl += [s]
return "\n".join(sl)
def compile_target_training(self, constraint_opt):
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
sl = ["", "# " + "-" * 40, "# General Variables", ""]
sl += ["nodes = []", "domain=Domain()"]
sl += [self.compile_symbols()]
sl += [self.compile_neural_networks()]
sl += [self.compile_submodels()]
sl += [self.compile_geometries()]
sl += [self.compile_interior_subdomains()]
sl += [self.compile_boundary_subdomains()]
sl += [self.compile_equations()]
if constraint_opt:
sl += [self.compile_equation_constraints_opt()]
else:
sl += [self.compile_equation_constraints()]
sl += [self.compile_data_constraints()]
sl += ["", "# " + "-" * 40, "# Start Training Loop", ""]
sl += ["slv = Solver(cfg, domain)"]
sl += ["slv.solve()"]
preamble = "# Generated by `mtc compile inference`\n\n"
t = env.from_string(
load_template(os.path.join("templates", "train-imports.py"))
)
preamble += t.render(no_modulus_main=self._no_modulus_main, conf_path=".")
body = "\n".join(sl)
body = "\n".join([" " * 4 + line for line in body.split("\n")])
comp_str = preamble + "\n" + body + "\nrun()"
return comp_str
def compile_target_training_sampled(self):
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
sl = ["", "# " + "-" * 40, "# General Variables", ""]
sl += ["nodes = []", "domain=Domain()"]
sl += [self.compile_symbols()]
sl += [self.compile_neural_networks()]
sl += [self.compile_submodels()]
# sl += [self.compile_geometries()]
# sl += [self.compile_interior_subdomains()]
# sl += [self.compile_boundary_subdomains()]
sl += [self.compile_equations()]
sl += [self.compile_equation_constraints_sampled()]
sl += [self.compile_data_constraints()]
sl += ["", "# " + "-" * 40, "# Start Training Loop", ""]
sl += ["slv = Solver(cfg, domain)"]
sl += ["slv.solve()"]
preamble = "# Generated by `mtc compile inference`\n\n"
t = env.from_string(
load_template(os.path.join("templates", "train-imports.py"))
)
preamble += t.render(no_modulus_main=self._no_modulus_main, conf_path=".")
body = "\n".join(sl)
body = "\n".join([" " * 4 + line for line in body.split("\n")])
comp_str = preamble + "\n" + body + "\nrun()"
return comp_str
def compile_inference_section(self, stageid):
from jinja2 import Environment, PackageLoader, select_autoescape
template = load_template(os.path.join("templates", "inference_section.py"))
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
t = env.from_string(template)
outvars = [e for e in self._nn_outs]
# if len(self._submodels) > 0:
# outvars += list(self._submodels.keys())
possible_outvars = outvars + list(self._constraints.keys())
coll_models = []
try:
if len(self._model["name"]):
coll_models = [self._model["name"]]
outvars += coll_models
except:
pass
return t.render(
stageid=stageid,
_vars=self._vars,
_submodels=outvars,
self_model=self._model,
possible_outvars=possible_outvars,
coll_models=coll_models,
)
def make_infer_info(self, stage):
template = """
info = {
"stage": "{{stage}}",
"__file__":__file__,
"inputs": [{% for item in _vars %}'{{ item }}',{% endfor %}],
"default-outputs": [{% for item in outputs %}'{{ item }}',{% endfor %}],
"possible-outputs": [{% for item in possible_outvars %}'{{ item}}', {% endfor %} ]
}
"""
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
t = env.from_string(template)
outvars = [e for e in self._nn_outs]
if len(self._submodels) > 0:
outvars += list(self._submodels.keys())
possible_outvars = outvars + list(self._constraints.keys())
try:
if len(self._model["name"]):
outvars.append(self._model["name"])
except:
pass
print(outvars)
return t.render(
stage=stage,
_vars=self._vars,
outputs=outvars,
possible_outvars=possible_outvars,
)
def compile_target_inference(self, stageid):
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
sl = ["", "# " + "-" * 40, "# General Variables", ""]
sl += ["nodes = []", "domain=Domain()"]
sl += [self.compile_symbols()]
sl += [self.compile_neural_networks()]
sl += [self.compile_submodels()]
sl += [self.compile_geometries()]
sl += [self.compile_interior_subdomains()]
sl += [self.compile_boundary_subdomains()]
sl += [self.compile_equations()]
sl += ["", "# " + "-" * 40, "# Inference", "#" + "-" * 40, ""]
sl += [self.compile_inference_section(stageid)]
# no need to include constraints in inference
sl += [self.compile_equation_constraints_opt()]
sl += [self.compile_data_constraints()]
sl += ["", "# " + "-" * 40, "# Start Training Loop", ""]
sl += ["slv = Solver(cfg, domain)"]
sl += ["slv._eval()"]
preamble = "# Generated by `mtc compile inference`\n\n"
t = env.from_string(
load_template(os.path.join("templates", "train-imports.py"))
)
conf_path = os.path.join("training", stageid)
preamble += t.render(no_modulus_main=self._no_modulus_main, conf_path=conf_path)
body = "\n".join(sl)
body = "\n".join([" " * 4 + line for line in body.split("\n")])
comp_str = preamble + "\n" + body + "\nrun()\n"
comp_str += self.make_infer_info(stageid)
return comp_str
def compile_target_sampler(self, stageid):
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
sl = ["", "# " + "-" * 40, "# General Variables", ""]
sl += ["nodes = []", "domain=Domain()"]
sl += [self.compile_symbols()]
# sl += [self.compile_neural_networks()]
sl += [self.compile_submodels()]
sl += [self.compile_geometries()]
sl += [self.compile_interior_subdomains()]
sl += [self.compile_boundary_subdomains()]
# sl += [self.compile_equations()]
# sl += ["", "# " + "-" * 40, "# Inference", "#" + "-" * 40, ""]
# sl += [self.compile_inference_section(stageid)]
# no need to include constraints in inference
# sl += [self.compile_equation_constraints()]
# sl += [self.compile_data_constraints()]
sl += [self.compile_equation_constraints_sampler()]
# sl += ["", "# " + "-" * 40, "# Start Training Loop", ""]
# sl += ["slv = Solver(cfg, domain)"]
# sl += ["slv._eval()"]
preamble = "# Generated by `mtc compile --target sampler`\n\n"
t = env.from_string(
load_template(os.path.join("templates", "train-imports.py"))
)
conf_path = os.path.join("training", stageid)
preamble += t.render(no_modulus_main=self._no_modulus_main, conf_path=conf_path)
body = "\n".join(sl)
body = "\n".join([" " * 4 + line for line in body.split("\n")])
comp_str = preamble + "\n" + body + "\nrun()\n"
# comp_str += self.make_infer_info(stageid)
return comp_str
def compile(
self,
compile_type="training",
stageid="stage1",
only1storder=False,
constraint_opt=False,
):
if only1storder:
self.compile_to_firstorder()
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
compile_types = ["training", "inference", "sampler", "geometry"]
assert (
compile_type in compile_types
), f"Got {compile_type}... Allowed compiling tragets: {compile_types}"
self._conf = self.load_conf()
self._stage_id = stageid
if not os.path.exists("training"):
os.makedirs("training")
stagedir = os.path.join("training", stageid)
if not os.path.exists(stagedir):
os.makedirs(stagedir)
if compile_type == "training":
target_file = os.path.join(stagedir, "train.py")
print(f"[mtc] compiling {target_file}")
trainpy = self.compile_target_training(constraint_opt)
with open(target_file, "w") as f:
f.write(trainpy)
print(f"[mtc] wrote {target_file}")
if compile_type == "inference":
self._no_modulus_main = True
target_file = os.path.join(stagedir, "infer.py")
print(f"[mtc] compiling {target_file}")
inferpy = self.compile_target_inference(stageid)
with open(target_file, "w") as f:
f.write(inferpy)
print(f"[mtc] wrote {target_file}")
if compile_type == "geometry":
from .compile.target_geometry import compile
self._no_modulus_main = True
target_file = os.path.join(stagedir, "geometry.py")
print(f"[mtc] compiling {target_file}")
inferpy = compile(self) # self.compile_target_sampler(stageid)
with open(target_file, "w") as f:
f.write(inferpy)
print(f"[mtc] wrote {target_file}")
if compile_type == "sampler":
self._no_modulus_main = True
target_file = os.path.join(stagedir, "sample.py")
print(f"[mtc] compiling {target_file}")
inferpy = self.compile_target_sampler(stageid)
with open(target_file, "w") as f:
f.write(inferpy)
print(f"[mtc] wrote {target_file}")
self._no_modulus_main = False
target_file = os.path.join(stagedir, "train_sampled.py")
print(f"[mtc] compiling {target_file}")
trainpy = self.compile_target_training_sampled()
with open(target_file, "w") as f:
f.write(trainpy)
print(f"[mtc] wrote {target_file}")
def get_infer_fn(self):
# load NNs
slv = Solver(_cfg, self.domain)
slv._eval()
# create Inference object
invals = {str(v): np.array([0]).reshape(-1, 1) for v in self._vars.keys()}
inferencer = PointwiseInferencer(
invar=invals,
output_names=[submodel for submodel in self._submodels],
nodes=self._nodes,
batch_size=256 * 4 * 4 * 4,
)
self.domain.add_inferencer(inferencer)
# create inference function
def infer_fn(*args, **kargs):
from modulus.sym.domain.constraint import Constraint
invals = {str(v): kargs[v].reshape(-1, 1) for v in self._vars.keys()}
invar0 = invals
invar = Constraint._set_device(
invar0, requires_grad=False, device=inferencer.device
)
pred_outvar = inferencer.forward(invar)
result = {}
for submodel in self._submodels:
ret = pred_outvar[submodel].cpu().detach().numpy()
ret_val = np.array([v for v in ret[:, 0]])
result[submodel] = ret_val
# now build the main model
model = self._model
main_result = ret_val.copy().reshape(-1, 1)
invars, invals = [], []
for varn, varval in invar0.items():
invars.append(Symbol(varn))
invals.append(varval)
for smodel in model["conditions"]:
func = smodel["func"]
cond = smodel["on"]
submodel_result = result[str(func)].reshape(-1, 1)
from sympy import lambdify
sel = lambdify(invars, cond)(*invals)
main_result[sel] = submodel_result[sel]
result[model["name"]] = main_result
return result
return infer_fn
def train(self):
self.pprint()
slv = Solver(_cfg, self.domain)
# start solver
slv.solve()
def set_model(self, name, func_on_list):
self._model = {"name": name, "conditions": func_on_list}
def add_submodel(self, name, expr):
self._submodels[name] = expr
## CREATE compile_submodels()
# self._nodes += [Node.from_sympy(expr, str(name))]
return Function(name)(*self._vars.values())
def add_neural_network(self, name="", nn_type="", inputs=[], outputs=[]):
assert len(name) > 0, "must provide a network name"
assert name not in self._nns, f"{name} NN already exists"
assert len(inputs) > 0, "must include a non-empty list of input vars"
assert len(outputs) > 0, "must include a non-empty list of output vars"
# assert nn_type in allowed_NN_types, f"{nn_type} not in list {allowed_NN_types}"
# if len(self._vars) > 0:
# assert set(self._vars.keys()) == set(
# inputs
# ), "New NN definition does not match the input variables already defined"
for v in inputs:
self._vars[v] = Symbol(v)
# self._vars = {v: Symbol(v) for v in inputs}
ocheck = self._nn_outs.intersection(set(outputs))
assert len(ocheck) == 0, f"Redefining output variables {ocheck}"
self._nn_outs = self._nn_outs.union(set(outputs))
## CREATE a compile_neural_networks()
# net = instantiate_arch(
# input_keys=[Key(v) for v in inputs],
# output_keys=[Key(v) for v in outputs],
# cfg=allowed_NN_types[nn_type],
# )
# self._nodes += [net.make_node(name=name, jit=_cfg.jit)]
net = None
invars = [self._vars[v] for v in inputs]
outvars = [Function(v)(*invars) for v in outputs]
self._nns[name] = {
"net": net,
"inputs": inputs,
"outputs": outputs,
"nn_type": nn_type,
"invars": invars,
"outvars": outvars,
}
return invars, outvars
def add_interior_subdomain(
self, name, geom=None, params=None, criteria=None, compute_sdf_derivatives=False
):
# geom is required, the other two are optional
assert geom is not None, "geom argument is required"
assert (
name not in self._interior_subdomains
and name not in self._boundary_subdomains
), "subdomains must have unique names"
self._interior_subdomains[name] = {
"geom": geom,
"params": params,
"criteria": criteria,
"compute_sdf_derivatives": compute_sdf_derivatives,
}
return name
def add_boundary_subdomain(self, name, geom=None, params=None, criteria=None):
# geom is required, the other two are optional
assert geom is not None, "geom argument is required"
assert (
name not in self._interior_subdomains
and name not in self._boundary_subdomains
), "subdomains must have unique names"
self._boundary_subdomains[name] = {
"geom": geom,
"params": params,
"criteria": criteria,
}
return name
def add_data_constraint(self, name=None, model=None, data_fname=None):
import h5py
with h5py.File(data_fname) as f:
vset = set(self._vars.keys())
r = vset.difference(set(f.keys()))
assert len(r) == 0, f"some variables are not represented in dataset: {r}"
m = str(model)
m = m[: m.find("(")]
r = set([m]).difference(set(f.keys()))
assert len(r) == 0, f"model not represented in dataset {r}"
assert (
str(type(model)) in self._submodels or str(m) in self._nn_outs
), f"undefined model {model}"
# now add the constraint
self._data_constraints[name] = {
"model": model,
"data_fname": data_fname,
"outvar": m,
}
def add_constraint(self, cname, eq_constraint):
"cname -- name of constraint"
assert (
cname not in self._constraints
), f"\n\n[{self.__class__.__name__}.add_constraint] '{cname}' constraint already defined\n\n"
self._constraints[cname] = eq_constraint
def set_constraints(self, cdict):
self._constraints = cdict
variables_in_constraints = []
# create the nodes first
for c_name, c_v in cdict.items():
eq = c_v["equation"]
eq0 = str(eq.lhs - eq.rhs)
for v in self._nn_outs:
if v in eq0:
variables_in_constraints.append(v)
# self._nodes += [Node.from_sympy(eq.lhs - eq.rhs, str(c_name))]
nodes = self._nodes
def pprint_constraints(self):
# print("=" * 80)
print("Constraints")
print("-" * 80)
varstr = ", ".join(self._vars)
varstr = f"({varstr})"
if len(varstr) > 3:
print("| ", f"(.) = {varstr}\n|")
for c_name, c_v in self._constraints.items():
eq = c_v["equation"]
eq0 = str(eq.lhs - eq.rhs)
pre = f"| {c_name}: "
eq = f"{eq.lhs} = {eq.rhs}"
if len(varstr) > 3:
eq = eq.replace(varstr, "(.)")
domain_type = (
"int." if c_v["on_domain"] in self._interior_subdomains else "bdry"
)
domain_str = f"[on {domain_type} subdomain]"
print(pre, eq)
print("| " + domain_str, c_v["on_domain"])
print("|")
print("| Data", "-" * 40)
for dc_name in self._data_constraints:
pre = f" {dc_name}: "
cd = self._data_constraints[dc_name]
print("|", pre, f"[model]", cd["model"])
print("|", " " * len(pre), f" [file]", cd["data_fname"])
import h5py
with h5py.File(cd["data_fname"]) as f:
k = list(f.keys())[0]
n = f[k].shape[0]
print(
"|", " " * len(pre), f" [info] {n:,} pts | keys: {list(f.keys())}"
)
print("-" * 80, "\n")
def pprint_models(self):
# print("=" * 80)
print("Models")
print("-" * 80)
for mname, model in self._submodels.items():
print("| ", mname, ":", model)
model = self._model
print("|---")
if len(model) > 0:
vars_str = ",".join(self._vars.keys())
print(f"| {model['name']}({vars_str}) = ")
for cond in model["conditions"]:
print(f"|", " " * 10, f"{cond['func']} if {cond['on']}")
print("-" * 80, "\n")
def pprint_nns(self):
# print("=" * 80)
print("Neural Networks")
print("-" * 80)
fmt = lambda l: [Symbol(v) for v in l]
for nnn, nn in self._nns.items():
# spref = f"{nnn} = {nn['nn_type']}("
spref = f"{nnn} = NeuralNetwork("
s = spref + f"inputs={fmt(nn['inputs'])},"
s += "\n" + " " * len(spref) + f"outputs={fmt(nn['outputs'])})"
nns = s
nns = "\n".join(["| " + line for line in nns.split("\n")])
print(nns)
print("-" * 80, "\n")
def pprint(self):
print("------------")
print("PINN Problem")
print("------------")
print()
self.pprint_nns()
self.pprint_models()
self.pprint_constraints()
| modulus-toolchain-master | mtc/problem.py |
import click
import os, sys
# def load_template(filename):
# with open(os.path.join(os.path.dirname(__file__), filename)) as f:
# return f.read()
MTC_ROOT = os.path.dirname(__file__)
@click.group()
def cli():
pass
@cli.command()
@click.argument("project-name")
def create(project_name):
"""Create new Modulus project"""
import os, shutil
basedir = os.path.split(__file__)[0]
newdir = os.path.join(os.path.curdir, project_name)
if not os.path.exists(newdir):
os.makedirs(newdir)
os.makedirs(os.path.join(newdir, "conf"))
src = os.path.join(basedir, "templates", "conf", "config.yaml")
dst = os.path.join(newdir, "conf", "config.yaml")
shutil.copyfile(src, dst)
src = os.path.join(basedir, "templates", "conf", "config_PINO.yaml")
dst = os.path.join(newdir, "conf", "config_PINO.yaml")
shutil.copyfile(src, dst)
src = os.path.join(basedir, "templates", "conf", "__init__.py")
dst = os.path.join(newdir, "conf", "__init__.py")
shutil.copyfile(src, dst)
src = os.path.join(basedir, "templates", "configurator.ipynb")
dst = os.path.join(newdir, "configurator.ipynb")
shutil.copyfile(src, dst)
os.system(f"cp -r {basedir}/templates/docs {newdir}")
probtpl = os.path.join(basedir, "templates", "problem.py")
with open(probtpl) as f:
probstr = f.read().replace("%%projname%%", project_name)
with open(os.path.join(newdir, "problem.py"), "w") as f:
f.write(probstr)
with open(os.path.join(basedir, "templates", "cfg.py")) as f:
probstr = f.read().replace("%%projname%%", project_name)
with open(os.path.join(newdir, "cfg.py"), "w") as f:
f.write(probstr)
else:
print(f"error: {project_name} already exists")
def run_system_cmd(s):
import sys, os
from click import ClickException
r = os.system(s)
if r != 0:
raise ClickException(f"system call [{s}] failed with exit code {r}")
@cli.command()
@click.argument("fname")
def hdf5_info(fname):
"Prints HDF5 top-level keys and array size"
import h5py
with h5py.File(fname) as f:
keys = list(f.keys())
print("HDF5 file", fname)
print("keys", keys)
print("shape", f[keys[0]].shape)
@cli.command()
@click.argument("info_type")
@click.option(
"--only-first-order-ufunc/--not-only-first-order-ufunc",
default=False,
help="ensure that only first order derivatives are taken for all unknown functions (will introduce auxiliary ufuncs if necessary)",
)
def show(info_type, only_first_order_ufunc):
"""Show information info_type= problem | training"""
import os
subcmds = ["problem", "training"]
assert info_type in subcmds, f"allowed commands {subcmds}"
if info_type in ["problem"]:
if only_first_order_ufunc:
run_system_cmd(
'python -c "from problem import p; p.compile_to_firstorder(); p.pprint()"'
)
else:
run_system_cmd('python -c "from problem import p; p.pprint()"')
elif info_type == "training":
import yaml
def printinfo(pre, s):
n = len("Optimize NNs")
desc = s.split("\n")
desc = desc[0] + "\n" + "\n".join([" " * (3 + n) + l for l in desc[1:]])
if desc[-1] == "\n":
desc = desc[:-1]
print(f"[{pre.rjust(n)}]", desc)
with open(os.path.join("conf", "config.yaml")) as f:
conf = yaml.safe_load(f)
tr = conf["modulus_project"]["training"]
print("Stage DAG:", ", ".join([f"{a}->{b}" for a, b in tr["stage-dag"]]))
print()
for stageid, stage in tr["stages"].items():
sdata = stage["data"]
print(stageid, "-" * 40)
# desc = stage["description"].split("\n")
# print(pre, desc)
printinfo("description", stage["description"])
optinfo = f', lr = {sdata["optimizer"]["lr"]}'
optinfo += ", steps < " + str(sdata["training"]["max_steps"])
sel_opt = sdata["optimizer"]["__selected__"]
if sel_opt == "lbfgs":
optinfo = f', lr = {sdata["optimizer"]["lr"]}, max_iter = {sdata["optimizer"]["max_iter"]}'
printinfo("optimizer", sel_opt + optinfo)
nnopt = []
for nn_name, optimizing in sdata["Neural Networks"].items():
nnopt += [f"{nn_name}={optimizing}"]
nntype = []
for nn in conf["modulus_project"]["neural_networks"].keys():
nntype += [
f"[{'training' if sdata['Neural Networks'][nn+' Trainable'] else 'not training'}] {nn}: {sdata[nn]['__selected__']}"
]
printinfo("NNs", "\n".join(nntype))
import sympy
cs = [
f"{c} | bs={v['batch_size']:,} | weight = {sympy.sympify(v['lambda_weighting'])}"
for c, v in sdata["Equation Constraints"].items()
if v["include"]
]
printinfo("constraints", "\n".join(cs))
print("=" * 40)
else:
print("error: allowed subcommands are", subcmds)
@cli.command()
@click.option(
"--target",
default="training",
help="one of: training (default), inference, sampler",
)
@click.option(
"--stage", default="stage1", help="default=stage1 (ignored if target=sampler)"
)
@click.option(
"--only-first-order-ufunc/--not-only-first-order-ufunc",
default=False,
help="ensure that only first order derivatives are taken for all unknown functions (will introduce auxiliary ufuncs if necessary)",
)
@click.option(
"--constraint-opt/--no-constraint-opt",
default=False,
help="Optimize constraints by grouping like constraints",
)
def compile(target, stage, only_first_order_ufunc, constraint_opt):
"""Compile problem into a sampler.py, train.py, or infer.py"""
print(f"[compile] {target} {stage}")
s = f"'{target}'"
sid = f"'{stage}'"
run_system_cmd(
f'python -c "from problem import p; p.compile(compile_type={s},stageid={sid}, only1storder={only_first_order_ufunc}, constraint_opt={constraint_opt})"'
)
@cli.command()
@click.option("--stage", default="stage1")
@click.option(
"--compile/--no-compile",
default=True,
help="problem.py is compiled into train.py by default, use this to avoid compilation",
)
def sample(stage, compile):
"""Sample and save the point cloud for each domain in an HDF5 file in the training/stageX/samples/ dir. Splits problem into sample.py and train_sampled.py. Use `mtc train --sampled` to train."""
print(f"[train] {stage}")
if compile:
os.system(f"mtc compile --target sampler --stage {stage}")
# create stage conf subdir if needed
stagedir = os.path.join("training", stage)
# if not os.path.exists(os.path.join({stagedir}, "conf")):
if True:
os.system(f"cp -r conf {stagedir}")
os.system(f"touch {stagedir}/__init__.py")
os.system(f"touch training/__init__.py")
print(f"[mtc] running {stagedir}/sample.py ")
os.system(f"cd {stagedir}; python sample.py")
@cli.command()
@click.option("--stage", default="stage1")
@click.option(
"--compile/--no-compile",
default=True,
help="problem.py is compiled into train.py by default, use this to avoid compilation",
)
@click.option(
"--sampled/--no-sampled",
default=False,
help="run the pre-sampled (load from disk) domains (train_sampled.py) or sample on the fly (train.py)",
)
@click.option(
"--only-first-order-ufunc/--not-only-first-order-ufunc",
default=False,
help="ensure that only first order derivatives are taken for all unknown functions (will introduce auxiliary ufuncs if necessary)",
)
@click.option(
"--constraint-opt/--no-constraint-opt",
default=True,
help="Optimize constraints by grouping like constraints",
)
@click.option(
"--ngpus",
default=1,
help="Multi-gpu training (default=1)",
)
def train(stage, compile, sampled, only_first_order_ufunc, constraint_opt, ngpus):
"""Train models"""
print(f"[train] {stage}")
copt = "--constraint-opt" if constraint_opt else "--no-constraint-opt"
if compile:
if sampled:
run_system_cmd(f"mtc compile --target sampler --stage {stage}")
else:
if only_first_order_ufunc:
run_system_cmd(
f"mtc compile --target training {copt} --stage {stage} --only-first-order-ufunc"
)
run_system_cmd(
f"mtc compile --target inference --stage {stage} --only-first-order-ufunc"
)
else:
run_system_cmd(f"mtc compile --target training {copt} --stage {stage}")
run_system_cmd(f"mtc compile --target inference --stage {stage}")
# create stage conf subdir if needed
stagedir = os.path.join("training", stage)
# if not os.path.exists(os.path.join({stagedir}, "conf")):
if True:
run_system_cmd(f"cp -r conf {stagedir}")
run_system_cmd(f"touch {stagedir}/__init__.py")
run_system_cmd(f"touch training/__init__.py")
# update the target conf
import yaml
with open(os.path.join("conf", "config.yaml")) as f:
conf = yaml.safe_load(f)
stage_conf = {k: v for k, v in conf.items()}
for k, v in conf["modulus_project"]["training"]["stages"][stage]["data"].items():
if "__selected__" in v:
del v["__selected__"]
stage_conf[k] = v
src = ""
for s, e in conf["modulus_project"]["training"]["stage-dag"]:
if e == stage:
src = os.path.join("training", s, "outputs")
run_system_cmd(f"mkdir training/{stage}/outputs")
print("copying NN models from", s)
run_system_cmd(
f"cp {src}/*pth training/{stage}/outputs; rm training/{stage}/outputs/optim*"
)
with open(os.path.join("training", stage, "conf", "config.yaml"), "w") as f:
yaml.safe_dump(stage_conf, f)
python_start_str = "python"
if ngpus > 1:
python_start_str = f"mpirun --allow-run-as-root -np {ngpus} python"
if sampled:
print(f"[mtc] starting pre-sampled training session in: {stagedir}")
if not os.path.exists(os.path.join(stagedir, "samples")):
print("Need to sample first, run:\n mtc sample --stage", stage)
else:
run_system_cmd(f"cd {stagedir}; {python_start_str} train_sampled.py")
else:
run_cmd = f"cd {stagedir}; {python_start_str} train.py"
print(f"[mtc] starting training session in: {stagedir} | {run_cmd}")
run_system_cmd(run_cmd)
@cli.command()
@click.option(
"--only-first-order-ufunc/--not-only-first-order-ufunc",
default=False,
help="ensure that only first order derivatives are taken for all unknown functions (will introduce auxiliary ufuncs if necessary)",
)
@click.option("--max-steps", default=1000, help="Max training steps (default=1000)")
def init_conf(only_first_order_ufunc, max_steps):
"""DESTRUCTIVE! Initialize configuration file for problem. Run every time a new constraint, sub-model, or neural network is introduced."""
os.system(
f'python -c "from problem import p; p.init_config(only1storder={only_first_order_ufunc}, max_steps={max_steps})"'
)
# @cli.command()
# @click.option("--port", default=7777, help="default=7777")
# def configurator(port):
# """Start the Modulus Project Configurator server"""
# os.system(f"sh $MPC_PATH/start-app.sh `pwd` {port}")
# @cli.command()
# @click.option(
# "--stage",
# default="stage1",
# help="Use a stage ID (like 'stage1') to target a specific stage",
# )
# @click.option("--port", default=7777, help="default=7777")
# @click.option(
# "--compile/--no-compile",
# default=True,
# help="problem.py is compiled into infer.py by default, use this to avoid compilation",
# )
# def inference_server(stage, port, compile):
# "start an inference server"
# if compile:
# os.system(f"mtc compile --target inference --stage {stage}")
# # start server
# os.system(f"cd training/{stage}; python -m mpc.rest_server.start {stage}")
@cli.command()
@click.option(
"--static-doc-dir",
default=os.path.join(MTC_ROOT, "docs", "static"),
help=f"Location to start serving docs (default={MTC_ROOT})",
)
@click.option("--port", default=7777, help="default=7777")
def docs(static_doc_dir, port):
"Modulus Simplified API Docs Server"
print("[mtc] serving docs from", static_doc_dir)
os.system(f"cd {static_doc_dir}; python -m http.server {port}")
@cli.command()
@click.option(
"--stage",
default="all",
help="Use a stage ID (like 'stage1') to target a specific stage. Default=all and removes all training.",
)
def clean(stage):
"DESTRUCTIVE! Remove training data for all stages (default) or for a specific stage using the optional --stage stageX"
if stage == "all":
os.system("rm -rf training")
else:
path = f"training/{stage}"
if os.path.exists(path):
os.system(f"rm -rf {path}")
else:
print(f"Stage [{stage}] does not exist or has not been used in training.")
@cli.command(help="Prints the version string")
def version():
import os
fname = os.path.join(os.environ["MTC_PATH"], "MTC_VERSION")
with open(fname, "r") as f:
print(f.read())
# @cli.command()
# def problem_to_first_order():
# "Transforms problem to ensure that only first order derivatives of the unknown functions are used"
# os.system(f'python -c "from problem import p; p.compile_to_firstorder()"')
if __name__ == "__main__":
cli()
| modulus-toolchain-master | mtc/mtc.py |
import os
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
def load_config(path="./"):
import yaml
conf_path = os.path.join(path, "conf", "config.yaml")
with open(conf_path) as f:
return yaml.safe_load(f)
def customize_schema(path="./"):
fname = os.path.join(os.path.dirname(__file__), "config_types_v2.yaml")
schema = load_yaml(fname)
config = load_config(path=path)
# customize schema to reflect project
arch = schema["arch"].copy()
del schema["arch"]
for nn_var in config["modulus_project"]["neural_networks"]:
schema[nn_var] = arch.copy()
schema[nn_var]["label"] = f"[{nn_var}] NN Architecture"
# equation constraints
constraints = schema["constraints"].copy()
del schema["constraints"]
cstr = {}
eqc = {"type": "group", "default": {}}
import json
for eqn in config["modulus_project"]["equation_constraints"].keys():
eqc["default"][eqn] = json.loads(json.dumps(constraints))
eqc["default"][eqn]["label"] = f"{eqn}"
schema["Equation Constraints"] = eqc
eqc = {"type": "group", "default": {}}
for nn_var in config["modulus_project"]["neural_networks"]:
eqc["default"][f"{nn_var} Trainable"] = {"type": "bool", "default": True}
schema["Neural Networks"] = eqc
return schema
def config2dictV2(ctype):
d = {}
for k, v in ctype.items():
if v["type"] == "option":
assert v["default"] in v["choices"], f"wrong default in {k}"
d[k] = config2dictV2(v["choices"][v["default"]])
d[k]["__selected__"] = v["default"]
elif v["type"] == "group":
d[k] = config2dictV2(v["default"])
else:
d[k] = v["default"]
return d
| modulus-toolchain-master | mtc/config_utils.py |
from sympy import Symbol, Function, Or, And, Eq, Abs, Integral, expand
import sympy
import sympy as sp
from jinja2 import Template
import os, sys
import h5py
def load_template(filename):
import jinja2 as j
path = os.path.join(os.path.dirname(__file__), "templates", "fno-problem")
env = j.Environment(loader=j.FileSystemLoader(path))
return env.get_template(filename)
def load_yaml(filename):
"""Loads a YAML file using a path relative to where this module resides"""
import yaml
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return yaml.safe_load(f)
class FNOProblem:
def __init__(self, name="FNO problem", cfg=None):
self._problem_name = name
self._nns = {}
self._nn_outs = set()
self._vars = {}
self._interior_subdomains = {}
self._constraints = {}
self._distributions = {}
self._grid = {}
def to_hdf(self, hdf_fname, data):
import h5py
with h5py.File(hdf_fname, "w") as f:
for k, v in data.items():
f.create_dataset(k, data=v)
def load_conf(self):
import yaml
with open(os.path.join("conf", "config.yaml")) as f:
conf = yaml.safe_load(f)
return conf
def save_conf(self, conf):
import yaml
with open(os.path.join("conf", "config.yaml"), "w") as f:
yaml.safe_dump(conf, f)
def add_neural_network(self, name="", nn_type="", inputs=[], outputs=[]):
assert "vars" in self._grid, "Must set grid before defining NNs"
assert len(name) > 0, "must provide a network name"
assert name not in self._nns, f"{name} NN already exists"
assert len(inputs) > 0, "must include a non-empty list of input vars"
assert len(outputs) > 0, "must include a non-empty list of output vars"
for v in inputs:
self._vars[v] = Symbol(v)
# self._vars = {v: Symbol(v) for v in inputs}
ocheck = self._nn_outs.intersection(set(outputs))
assert len(ocheck) == 0, f"Redefining output variables {ocheck}"
self._nn_outs = self._nn_outs.union(set(outputs))
gridvars = self._grid["vars"].keys()
invars = [Function(v)(*gridvars) for v in inputs]
outvars = [Function(v)(*gridvars) for v in outputs]
net = None
self._nns[name] = {
"net": net,
"inputs": inputs,
"outputs": outputs,
"nn_type": nn_type,
"invars": invars,
"outvars": outvars,
}
gridvarfuncs = [Function(str(v).upper())(*gridvars) for v in gridvars]
return invars, gridvarfuncs, outvars
def set_grid_domain(self, N: int, extent):
assert self._grid == {}, "Grid may be defined only once!"
assert len(extent) == 2, "Exactly 2 dimensions are required for the grid"
N = int(N)
self._grid["N"] = N
self._grid["vars"] = {
sp.Symbol(vn): {
"var_id": vid,
"extent": (float(ve[0]), float(ve[1])),
"delta": (float(ve[1]) - float(ve[0])) / float(N - 1),
}
for vid, (vn, ve) in enumerate(extent.items())
}
vars = list(self._grid["vars"].keys())
return vars + [self._grid["vars"][v]["delta"] for v in vars]
def add_distribution(self, name, hdf5_file=""):
assert "vars" in self._grid, "Must set grid before defining NNs"
assert (
name not in self._distributions
), f"[{name}] already defined (change name?)"
assert hdf5_file != "", "Must provide full-path HDF5 file with data"
# check file for valid data
with h5py.File(hdf5_file, "r") as f:
keys = list(f.keys())
N = self._grid["N"]
for k in f.keys():
sh = f[k].shape
assert (
sh[-2] == N and sh[-1] == N
), f"{hdf5_file}\nWrong dimensions of data -- last two must be ({N}, {N}), found {sh[-2:]} instead.\n"
self._distributions[name] = {"file": hdf5_file, "nsamples": sh[0], "keys": keys}
return name
def add_data_constraint(self, name, over=None):
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
assert (
over is not None
), "Must provide distribution that includes input/output data"
self._constraints[name] = {"type": "data", "distribution": over}
def add_interior_constraint(
self, name, equation, on=None, over=None, criteria=None
):
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
assert (
over is not None
), "Must provide distribution that includes input/output data"
if criteria is not None:
srepr_criteria = sp.srepr(criteria)
else:
srepr_criteria = ""
self._constraints[name] = {
"type": "interior",
"equation": equation,
"eq_srepr": sp.srepr(equation.lhs - equation.rhs),
"distribution": over,
"srepr_criteria": srepr_criteria,
"criteria": criteria,
"onFunc": str(on.func),
}
def add_dirichlet_gen_constraint(self, name, on=None, equation=None, at=None):
assert on is not None and equation is not None and at is not None
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
# varinfo = self._grid["vars"][at.lhs]
# if float(at.rhs) == float(varinfo["extent"][0]):
# var_offset = 0
# elif float(at.rhs) == float(varinfo["extent"][1]):
# var_offset = -1
# else:
# assert (
# False
# ), f"The 'at' value must be either end of the extent {varinfo['extent']}"
self._constraints[name] = {
"type": "boundary-dirichlet-gen",
"equation": equation,
"expr_srepr": sp.srepr(equation.rhs),
"at": sp.srepr(at),
"condition": at,
"onFunc": str(on.func),
}
def add_dirichlet_constraint(self, name, on=None, equation=None, at=None):
assert on is not None and equation is not None and at is not None
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
varinfo = self._grid["vars"][at.lhs]
if float(at.rhs) == float(varinfo["extent"][0]):
var_offset = 0
elif float(at.rhs) == float(varinfo["extent"][1]):
var_offset = -1
else:
assert (
False
), f"The 'at' value must be either end of the extent {varinfo['extent']}"
self._constraints[name] = {
"type": "boundary-dirichlet",
"equation": equation,
"expr_srepr": sp.srepr(equation.rhs),
"at": at,
"onFunc": str(on.func),
"var_id": varinfo["var_id"],
"var_offset": var_offset,
}
def add_neumann_constraint(self, name, on=None, equation=None, at=None):
assert on is not None and equation is not None and at is not None
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
varinfo = self._grid["vars"][at.lhs]
if float(at.rhs) == float(varinfo["extent"][0]):
var_offset = 1
elif float(at.rhs) == float(varinfo["extent"][1]):
var_offset = self._grid["N"] - 2
else:
assert (
False
), f"The 'at' value must be either end of the extent {varinfo['extent']}"
self._constraints[name] = {
"type": "boundary-neumann",
"equation": equation,
"expr_srepr": sp.srepr(equation.rhs),
"at": at,
"onFunc": on.func,
"var_id": varinfo["var_id"],
"var_offset": var_offset,
}
def add_boundary_constraint(self, name, equation, over=None, criteria=None):
assert (
name not in self._constraints
), f"Constraint named [{name}] already exists, choose a different name"
assert (
over is not None
), "Must provide distribution that includes input/output data"
if criteria is not None:
srepr_criteria = sp.srepr(criteria)
else:
srepr_criteria = ""
self._constraints[name] = {
"type": "boundary",
"equation": equation,
"eq_srepr": sp.srepr(equation.lhs - equation.rhs),
"distribution": over,
"srepr_criteria": srepr_criteria,
"criteria": criteria,
}
def init_config(self, only1storder=False, max_steps=1000):
if only1storder:
self.compile_to_firstorder()
conf = self.load_conf()
def mkdeqc(dc):
d = {k: str(v) for k, v in dc.items()}
# if dc["on_domain"] in self._interior_subdomains:
# d["domain_type"] = "interior"
# else:
# d["domain_type"] = "boundary"
d["batch_size"] = 1000
return d
def make_training_stages():
d = {
"stage-dag": [],
"stages": {
"stage1": {
"description": "Default stage",
"data": {},
},
},
}
return d
# if "modulus_project" not in conf:
nn_type = "fully_connected"
# nn_type = "fourier_net"
nn = load_yaml(os.path.join("..", "mpc", "mpc", "config_types.yaml"))["arch"][
nn_type
]
conf["modulus_project"] = {
"project_name": self._problem_name,
# "submodels": {k: str(v) for k, v in self._submodels.items()},
"equation_constraints": {
k: mkdeqc(v) for k, v in self._constraints.items()
},
"neural_networks": {
nn_name: {
"nn_type": nn_type,
"_target_": nn["_target_"],
**{k: v["default"] for k, v in nn.items() if k != "_target_"},
}
for nn_name in self._nns
},
}
conf["modulus_project"]["training"] = make_training_stages()
self.save_conf(conf)
from mtc.config_utils import customize_schema, config2dictV2
conf["modulus_project"]["training"]["stages"]["stage1"]["data"] = config2dictV2(
customize_schema()
)
s1data = conf["modulus_project"]["training"]["stages"]["stage1"]["data"]
s1data["training"]["max_steps"] = max_steps
self.save_conf(conf)
def compile_target_inference(self, stageid):
T = load_template("infer.py")
data = {
"problem_name": self._problem_name,
"nns": self._nns,
"distributions": self._distributions,
"constraints": self._constraints,
"grid": self._grid,
"nn_ins": self._vars,
"nn_outs": self._nn_outs,
"stageid": stageid,
}
return T.render(data)
def compile_target_training(self):
T = load_template("train.py")
data = {
"problem_name": self._problem_name,
"nns": self._nns,
"distributions": self._distributions,
"constraints": self._constraints,
"grid": self._grid,
"nn_ins": self._vars,
"nn_outs": self._nn_outs,
}
return T.render(data)
def compile(
self,
compile_type="training",
stageid="stage1",
only1storder=False,
constraint_opt=False,
):
from jinja2.nativetypes import NativeEnvironment
env = NativeEnvironment()
compile_types = ["training", "inference"]
assert (
compile_type in compile_types
), f"Got {compile_type}... Allowed compiling tragets: {compile_types}"
self._conf = self.load_conf()
self._stage_id = stageid
if not os.path.exists("training"):
os.makedirs("training")
stagedir = os.path.join("training", stageid)
if not os.path.exists(stagedir):
os.makedirs(stagedir)
if compile_type == "training":
target_file = os.path.join(stagedir, "train.py")
trainpy = self.compile_target_training()
with open(target_file, "w") as f:
f.write(trainpy)
print(f"[mtc] wrote {target_file}")
elif compile_type == "inference":
target_file = os.path.join(stagedir, "infer.py")
print(f"[mtc] compiling {target_file}")
inferpy = self.compile_target_inference(stageid)
with open(target_file, "w") as f:
f.write(inferpy)
print(f"[mtc] wrote {target_file}")
def pprint_nns(self):
# print("=" * 80)
print("Neural Networks")
print("-" * 80)
fmt = lambda l: [Symbol(v) for v in l]
for nnn, nn in self._nns.items():
# spref = f"{nnn} = {nn['nn_type']}("
spref = f"{nnn} = FNO("
s = spref + f"input_funcs={fmt(nn['inputs'])},"
s += "\n" + " " * len(spref) + f"output_funcs={fmt(nn['outputs'])})"
nns = s
nns = "\n".join(["| " + line for line in nns.split("\n")])
print(nns)
print("-" * 80, "\n")
def pprint_discrete_domain(self):
print("Discrete Domain (Grid)")
print("-" * 80)
gvars = self._grid["vars"]
vars = ",".join([str(v) for v in gvars.keys()])
dtype = "x".join([f"{gvars[v]['extent']}" for v in gvars.keys()])
gdelta = ", ".join([f"d{v}={gvars[v]['delta']}" for v in gvars.keys()])
gsize = ", ".join([f"N_{v}={self._grid['N']}" for v in gvars.keys()])
print("| " + f"({vars})" + " in " + dtype)
print("| " + gdelta + " | " + gsize)
print("-" * 80, "\n")
def pprint_distributions(self):
print("Distributions")
print("-" * 80)
for dname, d in self._distributions.items():
print(
f"| {dname}: #samples = {d['nsamples']} | entries = {', '.join(d['keys'])}"
)
print("-" * 80, "\n")
def pprint_constraints(self):
print("Constraints")
print("-" * 80)
for cn, c in self._constraints.items():
more = ""
if c["type"] != "data":
eq = c["equation"]
more = f"\n| {eq.lhs} = {eq.rhs}"
print(f"| {cn} [{c['type']}] over '{c['distribution']}'" + more + "\n|")
print("-" * 80, "\n")
def pprint(self):
print("----------------")
print("FNO/PINO Problem")
print("----------------")
print()
self.pprint_discrete_domain()
self.pprint_nns()
self.pprint_distributions()
self.pprint_constraints()
def latex_constraints(self):
s = ""
for cn, c in self._constraints.items():
s += f"\\text{{{cn} }} : \quad & "
if c["type"] == "boundary-dirichlet-gen":
eq = c["equation"]
s += sp.latex(eq.lhs) + " = " + sp.latex(eq.rhs)
s += ", \\quad " + sp.latex(c["condition"]) + " "
elif c["type"] == "boundary-dirichlet":
eq = c["equation"]
s += sp.latex(eq.lhs) + " = " + sp.latex(eq.rhs)
s += ", \\quad " + sp.latex(c["at"]) + " "
elif c["type"] == "boundary-neumann":
eq = c["equation"]
s += sp.latex(eq.lhs) + " = " + sp.latex(eq.rhs)
s += ", \\quad " + sp.latex(c["at"]) + " "
elif c["type"] != "data":
eq = c["equation"]
s += sp.latex(eq.lhs) + " = " + sp.latex(eq.rhs)
if "criteria" in c and c["criteria"] is not None:
s += " ; \quad " + sp.latex(c["criteria"])
typestr = c["type"].replace("boundary-", "")
s += f"& \\text{{ {typestr} }} \\\\ \\\\\n"
r = "\\begin{split}\\begin{aligned}\n" + s + "\n\\end{aligned}\\end{split}"
return r
def _repr_latex_(self):
s = ""
s += f"\\text{{ Project: }} & \\text{{ {self._problem_name} }} \\\\\n"
svars = ", ".join([str(v) for v in self._grid["vars"]])
ss = "\\times".join(
f"{v['extent']}^{{ {self._grid['N']} }}"
for vs, v in self._grid["vars"].items()
)
deltas = ", ".join(
f"d{vs}={v['delta']}" for vs, v in self._grid["vars"].items()
)
s += f"\\text{{Grid:}}& \\quad ({svars}) \in {ss} \\quad {deltas} \\\\\n"
for nnn, nn in self._nns.items():
domain = ", ".join([sp.latex(e) for e in nn["invars"]])
image = ", ".join([sp.latex(e) for e in nn["outvars"]])
ss = f"\\text{{ {nnn} [FNO]:}}& \\quad ({domain}) \mapsto {image} \\\\ \n"
s += ss
s += "\\\\ \n"
s = "\\begin{split}\\begin{aligned}\n" + s + "\n\\end{aligned}\\end{split}"
s += self.latex_constraints()
return s
| modulus-toolchain-master | mtc/fno_problem.py |
modulus-toolchain-master | mtc/docs/__init__.py |
|
import os
from jinja2 import Template
def load_template(filename):
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def compile(problem):
assert (
"x" in problem._vars and "y" in problem._vars
), "Problem geometry must be 2D or 3D"
t_file = "warp_geometry_2d.py"
if "z" in problem._vars:
print("3D geometry detected")
t_file = "warp_geometry.py"
T = Template(load_template(os.path.join("templates", t_file)))
data = {}
if "_custom_warp_code" in dir(problem):
data["custom_warp_code"] = problem._custom_warp_code
data["geometries"] = [{"name": name, "g": g} for name, g in problem._geom.items()]
params = {}
for name, g in problem._geom.items():
p = g.get("params", {})
if p is not None:
for k, v in p.items():
try:
if len(v) > 0:
v = v[0]
except:
v = float(v)
params[k] = v
print(params)
data["params"] = params
paramstr = ", ".join([f"{k}={v}" for k, v in params.items()])
if len(paramstr) > 0:
paramstr = ", " + paramstr
data["paramstr"] = paramstr
paramcallstr = ", ".join([f"{k}" for k, v in params.items()])
if len(paramcallstr) > 0:
paramcallstr = ", " + paramcallstr
data["paramcallstr"] = paramcallstr
return T.render(data)
| modulus-toolchain-master | mtc/compile/target_geometry.py |
modulus-toolchain-master | mtc/compile/__init__.py |
|
import warp as wp
wp.init()
## SDF Helpers
# subtraction
@wp.func
def op_subtract(d1: float, d2: float):
return -wp.min(-d1, d2)
# intersection
@wp.func
def op_intersect(d1: float, d2: float):
return wp.max(d1, d2)
# union
@wp.func
def op_union(d1: float, d2: float):
return wp.min(d1, d2)
# signed sphere
@wp.func
def sdf_sphere(p: wp.vec3, r: float):
return wp.length(p) - r
# signed box
@wp.func
def sdf_box(upper: wp.vec3, p: wp.vec3):
qx = wp.abs(p[0])-upper[0]
qy = wp.abs(p[1])-upper[1]
qz = wp.abs(p[2])-upper[2]
e = wp.vec3(wp.max(qx, 0.0), wp.max(qy, 0.0), wp.max(qz, 0.0))
return wp.length(e) + wp.min(wp.max(qx, wp.max(qy, qz)), 0.0)
@wp.func
def v3abs(v: wp.vec3):
return wp.vec3(wp.abs(v[0]), wp.abs(v[1]),wp.abs(v[2]))
@wp.func
def v3max(v: wp.vec3):
return wp.max(wp.max(v[0], v[1]),v[2])
@wp.func
def v3min(v: wp.vec3):
return wp.min(wp.min(v[0], v[1]),v[2])
##########
#
########## -- Geometry SDF wp.func's
{% for geom in geometries %}
@wp.func
def _sdf_{{geom.name}}(p: wp.vec3, {% for k,v in params.items() %}{{k}}: float, {% endfor %}):
# type: {{geom.g.type}}
{% if geom.g.type=="GeometryDifference" %}
return op_subtract(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="GeometryUnion" %}
return op_union(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="GeometryIntersection" %}
return op_intersect(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="Cylinder" %}
center = wp.vec3({% for v in geom.g.args[0] %}float({{v}}),{% endfor %})
radius = {{geom.g.args[1]}}
height = {{geom.g.args[2]}}
{% if geom.g.rotate %}
angle = {{geom.g.rotate[0]}}
c = wp.cos(angle)
s = wp.sin(angle)
x = p[0]-center[0]
y = p[1]-center[1]
z = p[2]-center[2]
{% if geom.g.rotate[1]=='x' %}
p = wp.vec3(p[0], c*y +s*z+center[1], -s*y +c*z+center[2])
{% elif geom.g.rotate[1]=='y' %}
p = wp.vec3(c*x +s*z+center[0], p[1], -s*x +c*z+center[2])
{% else %}
p = wp.vec3(c*x +s*y+center[0], -s*x +c*y+center[1], p[2])
{% endif %}
{% endif %}
r_dist = wp.sqrt((p[0] - center[0])*(p[0] - center[0]) + (p[1] - center[1])*(p[1] - center[1]))
z_dist = wp.abs(p[2] - center[2])
outside = wp.sqrt(wp.min(0.0, radius-r_dist)**2.0 + wp.min(0.0, 0.5*height-z_dist)**2.0)
inside = -wp.min(wp.abs(wp.min(0.0, r_dist-radius)), wp.abs(wp.min(0.0, z_dist-0.5*height)))
sdf = (outside+inside)
return sdf
{% elif geom.g.type=="ModBox" %}
p1 = wp.vec3({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
p2 = wp.vec3({% for v in geom.g.args[1]%}float({{v}}),{% endfor %})
side = p2-p1
center = (p1+p2)/2.0
{% if geom.g.rotate %}
angle = {{geom.g.rotate[0]}}
c = wp.cos(angle)
s = wp.sin(angle)
x = p[0]-center[0]
y = p[1]-center[1]
z = p[2]-center[2]
{% if geom.g.rotate[1]=='x' %}
p = wp.vec3(p[0], c*y +s*z+center[1], -s*y +c*z+center[2])
{% elif geom.g.rotate[1]=='y' %}
p = wp.vec3(c*x +s*z+center[0], p[1], -s*x +c*z+center[2])
{% else %}
p = wp.vec3(c*x +s*y+center[0], -s*x +c*y+center[1], p[2])
{% endif %}
{% endif %}
c_dist = v3abs(p - center) - 0.5 * side
ov = wp.vec3(wp.max(c_dist[0], 0.), wp.max(c_dist[1], 0.), wp.max(c_dist[2], 0.))
# outside = wp.min(c_dist[0], 0.)**2. + wp.min(c_dist[1], 0.)**2. + wp.min(c_dist[2], 0.)**2.
outside = wp.length(ov) #wp.sqrt(outside)
inside = wp.min(v3max(c_dist), 0.0)
sdf = (outside + inside)
# sdf = inside #outside
return sdf
# upper = (p2-p1)*0.5
# return sdf_box(upper, p-upper-p1)
{% elif geom.g.type=="Box" %}
p1 = wp.vec3({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
p2 = wp.vec3({% for v in geom.g.args[1]%}float({{v}}),{% endfor %})
side = p2-p1
center = (p1+p2)*0.5
{% if geom.g.rotate %}
angle = {{geom.g.rotate[0]}}
c = wp.cos(angle)
s = wp.sin(angle)
x = p[0]-center[0]
y = p[1]-center[1]
z = p[2]-center[2]
{% if geom.g.rotate[1]=='x' %}
p = wp.vec3(p[0], c*y +s*z+center[1], -s*y +c*z+center[2])
{% elif geom.g.rotate[1]=='y' %}
p = wp.vec3(c*x +s*z+center[0], p[1], -s*x +c*z+center[2])
{% else %}
p = wp.vec3(c*x +s*y+center[0], -s*x +c*y+center[1], p[2])
{% endif %}
{% endif %}
sdf = sdf_box((p2-p1)/2.0, p-center)
return sdf
# upper = (p2-p1)*0.5
# return sdf_box(upper, p-upper-p1)
{% elif geom.g.type=="Sphere" %}
c = wp.vec3({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
r = float({{geom.g.args[1]}})
return sdf_sphere(p-c, r)
{% else %}
return 0.0
{% endif %}
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_{{geom.name}}(field: wp.array3d(dtype=float),
dim: float,
scale: float,
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
i, j, k = wp.tid()
p = wp.vec3(float(i), float(j), float(k))
p = (p/dim - wp.vec3(1.0,1.0,1.0)*0.5)*scale
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
field[i,j,k] = sdf
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_adjust_points_{{geom.name}}(points: wp.array(dtype=wp.vec3),
dim: float,
scale: float,
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
i = wp.tid()
p = points[i]
p = (p/dim - wp.vec3(1.0,1.0,1.0)*0.5)*scale
eps = 1.e-5
# compute gradient of the SDF using finite differences
dx = _sdf_{{geom.name}}(p + wp.vec3(eps, 0.0, 0.0){{paramcallstr}}) - _sdf_{{geom.name}}(p - wp.vec3(eps, 0.0, 0.0){{paramcallstr}})
dy = _sdf_{{geom.name}}(p + wp.vec3(0.0, eps, 0.0){{paramcallstr}}) - _sdf_{{geom.name}}(p - wp.vec3(0.0, eps, 0.0){{paramcallstr}})
dz = _sdf_{{geom.name}}(p + wp.vec3(0.0, 0.0, eps){{paramcallstr}}) - _sdf_{{geom.name}}(p - wp.vec3(0.0, 0.0, eps){{paramcallstr}})
normal = wp.normalize(wp.vec3(dx, dy, dz))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
points[i] = p - normal*sdf
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_sdf_{{geom.name}}(points: wp.array(dtype=wp.vec3),
sdf: wp.array(dtype=float), # return values
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
i = wp.tid()
p = points[i]
sdf[i] = _sdf_{{geom.name}}(p{{paramcallstr}})
{% endfor %}
class Geometry:
def __init__(self, dim=128):
self.dim = dim
self.max_verts = 10**6
self.max_tris = 10**6
self.time = 0.0
self.field = wp.zeros(shape=(self.dim, self.dim, self.dim), dtype=float)
self.sdf_return = wp.zeros(shape=(dim*10,), dtype=float)
self.iso = wp.MarchingCubes(nx=self.dim,
ny=self.dim,
nz=self.dim,
max_verts=self.max_verts,
max_tris=self.max_tris)
self._geoms = [{% for geom in geometries %}"{{geom.name}}",{% endfor %}]
def list_geometries(self):
return self._geoms.copy()
def update(self):
pass
def render_sdf(self, geom_name, scale=3.0{{paramstr}}):
assert geom_name in self._geoms
kernel = globals()["_kernel_"+geom_name]
self.scale = scale
with wp.ScopedTimer(f"Updated SDF volume with {geom_name}"):
wp.launch(kernel,
dim=self.field.shape, inputs=[self.field, self.dim, scale, {% for k,v in params.items() %}{{k}}, {% endfor %}])
self._last_kernel = geom_name
def adjust_points(self{{paramstr}}):
kernel = globals()["_kernel_adjust_points_"+self._last_kernel]
with wp.ScopedTimer(f"Adjusted mesh points from {self._last_kernel}"):
wp.launch(kernel,
dim=self.iso.verts.shape,
inputs=[self.iso.verts, self.dim, self.scale, {% for k,v in params.items() %}{{k}}, {% endfor %}])
verts = self.iso.verts.numpy()#(self.iso.verts.numpy()/float(self.dim) - 0.5)*self.scale
indices=self.iso.indices.numpy()
print(f"geometry: {self._last_kernel} | {verts.shape[0]:,} verts | {indices.shape[0]:,} tris")
return {"verts": verts, "indices":indices}
def sdf(self, geom_name, xyz{{paramstr}}):
assert geom_name in self._geoms
dim = xyz.shape[0]
if self.sdf_return.shape[0] < xyz.shape[0]:
self.sdf_return = wp.zeros(shape=(xyz.shape[0],), dtype=float)
kernel = globals()["_kernel_sdf_"+geom_name]
with wp.ScopedTimer(f"SDF compute for {geom_name}"):
wp.launch(kernel,
dim=dim, inputs=[wp.array(xyz, dtype=wp.vec3), self.sdf_return, {% for k,v in params.items() %}{{k}}, {% endfor %}])
return self.sdf_return.numpy()[:dim]
def get_mesh_data(self):
with wp.ScopedTimer(f"get_mesh_data [geometry: {self._last_kernel}]"):
self.iso.surface(field=self.field, threshold=0)
verts = (self.iso.verts.numpy()/float(self.dim) - 0.5)*self.scale
indices=self.iso.indices.numpy()
print(f"geometry: {self._last_kernel} | {verts.shape[0]:,} verts | {indices.shape[0]:,} tris")
return {"verts": verts, "indices":indices}
| modulus-toolchain-master | mtc/compile/templates/warp_geometry.py |
import warp as wp
wp.init()
## Custom Code (from problem.py)
{{custom_warp_code}}
## SDF Helpers
# subtraction
@wp.func
def op_subtract(d1: float, d2: float):
return -wp.min(-d1, d2)
# intersection
@wp.func
def op_intersect(d1: float, d2: float):
return wp.max(d1, d2)
# union
@wp.func
def op_union(d1: float, d2: float):
return wp.min(d1, d2)
# signed sphere
@wp.func
def sdf_circle(p: wp.vec2, r: float):
return wp.length(p) - r
# signed box
@wp.func
def sdf_rectangle(upper: wp.vec2, p: wp.vec2):
qx = wp.abs(p[0])-upper[0]
qy = wp.abs(p[1])-upper[1]
e = wp.vec2(wp.max(qx, 0.0), wp.max(qy, 0.0))
return wp.length(e) + wp.min( wp.max(qy, qx), 0.0)
@wp.func
def v3abs(v: wp.vec2):
return wp.vec2(wp.abs(v[0]), wp.abs(v[1]))
##########
#
########## -- Geometry SDF wp.func's
{% for geom in geometries %}
@wp.func
def _sdf_{{geom.name}}(p: wp.vec2, {% for k,v in params.items() %}{{k}}: float, {% endfor %}):
# type: {{geom.g.type}}
{% if geom.g.type=="GeometryDifference" %}
return op_subtract(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="GeometryUnion" %}
return op_union(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="GeometryIntersection" %}
return op_intersect(_sdf_{{geom.g.g}}(p{{paramcallstr}}), _sdf_{{geom.g.og}}(p{{paramcallstr}}))
{% elif geom.g.type=="Rectangle" %}
p1 = wp.vec2({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
p2 = wp.vec2({% for v in geom.g.args[1]%}float({{v}}),{% endfor %})
side = p2-p1
center = (p1+p2)*0.5
{% if geom.g.rotate %}
angle = {{geom.g.rotate[0]}}
c = wp.cos(angle)
s = wp.sin(angle)
x = p[0]-center[0]
y = p[1]-center[1]
z = p[2]-center[2]
p = wp.vec2(c*x +s*y+center[0], -s*x +c*y+center[1], p[2])
{% endif %}
sdf = sdf_rectangle((p2-p1)/2.0, p-center)
return sdf
{% elif geom.g.type=="Circle" %}
c = wp.vec2({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
r = float({{geom.g.args[1]}})
return sdf_circle(p-c, r)
{% elif geom.g.type=="Channel2D" %}
point_1 = wp.vec2({% for v in geom.g.args[0]%}float({{v}}),{% endfor %})
point_2 = wp.vec2({% for v in geom.g.args[1]%}float({{v}}),{% endfor %})
dist_x = point_2[0] - point_1[0]
dist_y = point_2[1] - point_1[1]
center_y = point_1[1] + (dist_y) / 2.0
y_diff = wp.abs(p[1] - center_y) - (point_2[1] - center_y)
outside_distance = wp.sqrt(wp.max(y_diff, 0.0) ** 2.0)
inside_distance = wp.min(y_diff, 0.0)
sdf = (outside_distance + inside_distance)
return sdf
{% elif geom.g.type=="Polygon" %}
{% if geom.g.rotate %}
angle = {{geom.g.rotate[0]}}
c = wp.cos(angle)
s = wp.sin(angle)
center = wp.vec2(0.0,0.0)
x = p[0]#-center[0]
y = p[1]#-center[1]
p = wp.vec2(c*x +s*y+center[0], -s*x +c*y+center[1])
{% endif %}
{% for pp in geom.g.args[0] %}
p{{loop.index0}} = wp.vec2({% for v in pp %}float({{v}}),{% endfor %}){% endfor %}
# Distance to line segment involves distance to line from a point
# t = (normalized) tangent between p0 and p1
# n = normal (perp to t)
#
# Solve [ t_x -n_x ] [s] = p_x - p0_x
# [ t_y -n_y ] [t] = p_y - p0_y
# inverting the matrix
sdf = wp.length(p0-p)
{% for pp in geom.g.args[0] %}
sdf = wp.min(wp.length(p{{loop.index0}}-p), sdf){% endfor %}
sign = 1.0{% for pp in geom.g.args[0] %}
{% if loop.last %}po = p0{% else %}
po = p{{loop.index}}{% endif %}
sdf = wp.min(wp.length(p{{loop.index0}}-p), sdf)
tangent = po-p{{loop.index0}}
t = wp.normalize(tangent)
n = wp.vec2(t[1], -t[0])
det = 1.0 #/ (-t[0]*n[1]+t[1]*n[0])
vx = p[0] - p{{loop.index0}}[0]
vy = p[1] - p{{loop.index0}}[1]
s = det * ((-n[1])*vx+n[0]*vy)
d = det * ((-t[1])*vx+t[0]*vy)
if s>=0. and s <= wp.length(tangent):
sdf = wp.min(sdf, wp.abs(d))
if sdf == wp.abs(d):
sign = wp.sign(d)
{% endfor %}
return sign*sdf
{% elif geom.g.type=="GeometryCustomWarp" %}
sdf = {{geom.g.func}}(p, {% for v in geom.g.args %}float({{v}}),{% endfor %})
return sdf
{% else %}
return 0.0
{% endif %}
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_sdf_{{geom.name}}(points: wp.array(dtype=wp.vec2),
sdf: wp.array(dtype=float), # return values
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
i = wp.tid()
p = points[i]
sdf[i] = _sdf_{{geom.name}}(p{{paramcallstr}})
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_sample_interior_{{geom.name}}(rand_seed: int,
points: wp.array(dtype=wp.vec2),
bbox: wp.vec4,
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
tid = wp.tid()
rstate = wp.rand_init(rand_seed, tid)
p = wp.vec2(wp.randf(rstate, bbox[0], bbox[1]), wp.randf(rstate, bbox[2], bbox[3]))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
count = int(0)
while count < 1_000_000 and sdf>0.0:
p = wp.vec2(wp.randf(rstate, bbox[0], bbox[1]), wp.randf(rstate, bbox[2], bbox[3]))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
count += 1
points[tid] = p
{% endfor %}
{% for geom in geometries %}
@wp.kernel
def _kernel_sample_boundary_{{geom.name}}(rand_seed: int,
points: wp.array(dtype=wp.vec2),
bbox: wp.vec4,
tol: float,
{% for k,v in params.items() %}{{k}}: float, {% endfor %}):
tid = wp.tid()
rstate = wp.rand_init(rand_seed, tid)
p = wp.vec2(wp.randf(rstate, bbox[0], bbox[1]), wp.randf(rstate, bbox[2], bbox[3]))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
count = int(0)
while count < 1_000_000 and (sdf<-tol or sdf>0):
p = wp.vec2(wp.randf(rstate, bbox[0], bbox[1]), wp.randf(rstate, bbox[2], bbox[3]))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
count += 1
# compute gradient of the SDF using finite differences
eps = 1.e-4
dx = _sdf_{{geom.name}}(p + wp.vec2(eps, 0.0){{paramcallstr}}) - _sdf_{{geom.name}}(p - wp.vec2(eps, 0.0){{paramcallstr}})
dy = _sdf_{{geom.name}}(p + wp.vec2(0.0, eps){{paramcallstr}}) - _sdf_{{geom.name}}(p - wp.vec2(0.0, eps){{paramcallstr}})
normal = wp.normalize(wp.vec2(dx, dy))
sdf = _sdf_{{geom.name}}(p{{paramcallstr}})
points[tid] = p- normal*sdf
{% endfor %}
class Geometry:
def __init__(self, dim=128):
self.dim = dim
self.max_verts = 10**6
self.max_tris = 10**6
self.sdf_return = wp.zeros(shape=(dim*10,), dtype=float)
self.sample_points = wp.zeros(shape=(10,), dtype=wp.vec2)
self._geoms = [{% for geom in geometries %}"{{geom.name}}",{% endfor %}]
def list_geometries(self):
return self._geoms.copy()
def sdf(self, geom_name, xy{{paramstr}}):
assert geom_name in self._geoms
dim = xy.shape[0]
if self.sdf_return.shape[0] < xy.shape[0]:
self.sdf_return = wp.zeros(shape=(xy.shape[0],), dtype=float)
kernel = globals()["_kernel_sdf_"+geom_name]
with wp.ScopedTimer(f"SDF compute for {geom_name}"):
wp.launch(kernel,
dim=dim, inputs=[wp.array(xy, dtype=wp.vec2), self.sdf_return, {% for k,v in params.items() %}{{k}}, {% endfor %}])
wp.synchronize()
return self.sdf_return.numpy()[:dim]
def sample_interior(self, geom_name, n, bbox=[-2.0, 2.0, -2.0, 2.0]{{paramstr}}):
if self.sample_points.shape[0] < n:
self.sample_points = wp.zeros(shape=(n,), dtype=wp.vec2)
kernel = globals()["_kernel_sample_interior_"+geom_name]
with wp.ScopedTimer(f"Sampling interior of {geom_name}"):
import numpy as np
rand_seed = np.random.randint(1_000_000_000)
bbox = wp.vec4(*bbox)
wp.launch(kernel,
dim=n, inputs=[rand_seed, self.sample_points, bbox,
{% for k,v in params.items() %}{{k}}, {% endfor %}])
wp.synchronize()
return self.sample_points.numpy()[:n]
def sample_boundary(self, geom_name, n, bbox=[-2.0, 2.0, -2.0, 2.0], tol=1e-2{{paramstr}}):
if self.sample_points.shape[0] < n:
self.sample_points = wp.zeros(shape=(n,), dtype=wp.vec2)
kernel = globals()["_kernel_sample_boundary_"+geom_name]
with wp.ScopedTimer(f"Sampling boundary of {geom_name}"):
import numpy as np
rand_seed = np.random.randint(1_000_000_000)
bbox = wp.vec4(*bbox)
wp.launch(kernel,
dim=n, inputs=[rand_seed, self.sample_points, bbox, tol,
{% for k,v in params.items() %}{{k}}, {% endfor %}])
wp.synchronize()
return self.sample_points.numpy()[:n]
| modulus-toolchain-master | mtc/compile/templates/warp_geometry_2d.py |
def make_infer_fn(outputs=[{% for item in _submodels %}'{{ item }}',{% endfor %}]):
coll_models=[{% for item in coll_models %}'{{ item }}',{% endfor %}]
invals = {str(v): np.array([0]).reshape(-1, 1) for v in [{% for item in _vars %}'{{ item }}',{% endfor %}]}
# requires_grad = False
requires_grad = True
for v in invals:
for o in outputs:
if f"__{v}" in o:
requires_grad=True
output_names = set(outputs).difference(set(coll_models))
inferencer = PointwiseInferencer(
invar=invals,
output_names=output_names, #[submodel for submodel in self._submodels],
nodes=nodes,
batch_size=256 * 4 * 4 * 4,
requires_grad=requires_grad
)
domain.add_inferencer(inferencer)
# create inference function
def infer_fn(*args, **kargs):
"[{{ stageid }}] infer: ({% for item in _vars %}{{ item }},{% endfor %}) -> ({% for item in _submodels %}{{ item }}, {% endfor %}{{self_model['name']}})"
from modulus.sym.domain.constraint import Constraint
invals = {str(v): kargs[v].reshape(-1, 1) for v in [{% for item in _vars %}'{{ item }}',{% endfor %}]}
invar0 = invals
invar = Constraint._set_device(
invar0, requires_grad=requires_grad, device=inferencer.device
)
pred_outvar = inferencer.forward(invar)
result = {}
for submodel in output_names: #[{% for item in _submodels %}'{{ item }}',{% endfor %}]:
ret = pred_outvar[submodel].cpu().detach().numpy()
ret_val = np.array([v for v in ret[:, 0]])
result[submodel] = ret_val
# now build the main model
model = {"name": '{{ self_model["name"] }}', "conditions": [ {% for item in self_model['conditions'] %} { "func":sympy.sympify('{{item['func']}}'),"on":sympy.sympify('{{item['on']}}')}, {% endfor %} ]}
main_result = ret_val.copy().reshape(-1, 1)
invars, invals = [], []
for varn, varval in invar0.items():
invars.append(Symbol(varn))
invals.append(varval)
if model['name'] in outputs:
for smodel in model["conditions"]:
func = smodel["func"]
cond = smodel["on"]
submodel_result = result[str(func)].reshape(-1, 1)
from sympy import lambdify
sel = lambdify(invars, cond)(*invals)
main_result[sel] = submodel_result[sel]
result[model["name"]] = main_result
return result
return infer_fn
infer_fn=make_infer_fn()
def infer_dispatch(*args, **kargs):
if "outputs" not in kargs:
return infer_fn(*args, **kargs)
std = ({% for item in _submodels %}'{{ item }}', {% endfor %}"{{self_model['name']}}")
o2fn = {std: infer_fn}
not_found=True
for k, fn in o2fn.items():
if len(set(kargs['outputs']).difference(set(k))) == 0:
return fn(*args, **kargs)
fn = make_infer_fn(kargs['outputs'])
o2fn[tuple(kargs['outputs'])] = fn
return fn(*args, **kargs)
global infer
infer = infer_dispatch | modulus-toolchain-master | mtc/templates/inference_section.py |
from mtc.problem import PINNProblem
from mtc.fno_problem import FNOProblem
from mtc.problem import *
PINN = PINNProblem("[PINN] %%projname%%")
FNO = FNOProblem("[PINO] %%projname%%")
PINO = FNO
p = PINN
| modulus-toolchain-master | mtc/templates/cfg.py |
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_1d import Line1D, Point1D
from modulus.sym.geometry.primitives_2d import Rectangle, Circle, Polygon, Line, Channel2D
from modulus.sym.geometry.primitives_3d import Box, Sphere, Cylinder
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
PointwiseConstraint,
IntegralBoundaryConstraint
)
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.geometry.parameterization import Parameterization, Parameter
from modulus.sym.key import Key
from modulus.sym.node import Node
import modulus
from modulus.sym.hydra import to_absolute_path, to_yaml, instantiate_arch
from modulus.sym.hydra.config import ModulusConfig
import numpy as np
import os
# from modulus.sym.geometry.tessellation import Tessellation
###############
from sympy import Symbol, Eq, Or, And, Function
import sympy
import torch
class PDParam:
def __init__(self, geom, param_ranges=None):
self._geom = geom
self._params = param_ranges
def sample_interior(self, *args, **kwargs):
d = self._geom.sample_interior(*args, **kwargs)
shp = d["x"].shape
nr = shp[0]
for param, rng in self._params.items():
data = np.random.rand(nr).reshape(shp)
delta = rng[1] - rng[0]
d[param] = data * delta + rng[0]
return d
def sample_boundary(self, *args, **kwargs):
d = self._geom.sample_boundary(*args, **kwargs)
for k, v in d.items():
d[k] = np.vstack([v, v, v])
shp = d["x"].shape
nr = shp[0]
for param, rng in self._params.items():
data = np.random.rand(nr).reshape(shp)
delta = rng[1] - rng[0]
d[param] = data * delta + rng[0]
return d
{% if no_modulus_main %}
from modulus.sym.hydra.utils import compose
cfg = compose(config_path="conf", config_name="config")
cfg.network_dir = "{{conf_path}}/outputs"
def run(cfg=cfg) -> None:
{% else %}
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg) -> None:
{% endif %} | modulus-toolchain-master | mtc/templates/train-imports.py |
from cfg import *
# Select Problem Type
p = PINN
# p = FNO # uncomment to select a PINO/FNO problem type
# -------------------
# Suggested structure (PINN)
#
# 1. Define problem variables and unknown functions; e.g.,
# [x, y], [u] = p.add_neural_network(name="NN", inputs=["x", "y"], outputs=["u"])
#
# 2. Define geometries; e.g.,
# geo = p.Line1D("geom", 0,1)
#
# 3. Define sub-domains (geometries plus non-geometric variables goe here); e.g.
#
# interior = p.add_interior_subdomain("interior",
# geom=geo,
# params={t:(0,2*L)})
# boundary = p.add_boundary_subdomain("boundary",
# geom=geo,
# params={t:(0,2*L)})
#
# 4. (Optionally) Sub-models; e.g,
# g_air = p.add_submodel("g_air", u_air * zf + airT)
#
#
# 5. Define constraints; e.g.,
#
# wave_eq = Eq(u.diff(t, 2), (c**2 * u.diff(x)).diff(x))
# p.add_constraint("wave_equation", enforce(equation=wave_eq, on_domain=interior))
# p.add_data_constraint(name="calculated", model=u, data_fname=hdf_fname)
#
#
# 6. (optionally) Define piecewise-models (collecting submodels)
#
# p.set_model(
# "T",
# [
# {"func": g_metal, "on": And(y > mny, y < mxy)},
# {"func": g_air, "on": ~And(y > mny, y < mxy)},
# ],
# )
# Suggested structure (FNO)
#
# 1. Define the discrete spatial domain (the extent, the grid, and grid spacing)
# x, y, dx, dy = p.set_grid_domain(N=100, extent={"x": (0.0, 1.0), "y": (0.0, 1.0)})
#
# Note that the spatial variables are defined in this way (x and y are sympy.Symbol) and the
# grid spacing is now computed based on the number of grid points N and returned for each variable.
#
# 2. Define problem input and output functions; e.g.,
# [K, ],[X,Y], [U] = p.add_neural_network(name="NN", inputs=["K"], outputs=["U"])
#
# 3. If a data driven FNO, add input/output distribution
# ddata = p.add_distribution(name="Ddata", {"K": Kvals, "U": Uvals})
#
# 4. If PINO, an input distribution may be sufficient (i.e. with no output values)
# dinput = p.add_distribution(name="Dinputs", {"K": Kvals})
#
# 5. If data-driven, add a data constraint
# p.add_data_constraint("data", over=ddata) # input is a distribution that has input/output entries
#
# 6. If PINO, add an interior and boundary equation constraint (interior/boundary grid points)
# p.add_interior_constraint("heat eq", equation=Eq(U.diff(x,x)+U.diff(y, y), 1), over=dinput)
# p.add_boundary_constraint("bdry", equation=Eq(U=0), over=dinput)
| modulus-toolchain-master | mtc/templates/problem.py |
from typing import Dict
import modulus
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
import torch
import h5py
import numpy as np
#from utilities import download_FNO_dataset, load_FNO_dataset
{% include "constraints.py" %}
@modulus.sym.main(config_path="conf", config_name="config_PINO")
def run(cfg: ModulusConfig) -> None:
"{{problem_name}}"
{% filter indent(width=4) %}
domain = Domain()
nodes = []
{% for nn_name, nn in nns.items() %}
from modulus.sym.models.fully_connected import FullyConnectedArch
decoder_net = FullyConnectedArch(
input_keys=[Key("z")], # input to decoder
output_keys=[{% for v in nn.outputs %}Key("{{v}}"),{% endfor %}], # output keys of decoder
nr_layers=1, # number of layers
layer_size=32 # layer size
)
from modulus.sym.models.fno import FNOArch
model{{nn_name}} = FNOArch(
input_keys=[{% for v in nn.inputs %}Key("{{v}}"),{% endfor %}],
decoder_net=decoder_net,
dimension=2
)
nodes += [model{{nn_name}}.make_node(name="{{nn_name}}_FNO")]
{% endfor %}
####################################################
# Define constraints
####################################################
ins_outs = [{% for v in nn_ins %}"{{v}}",{% endfor %}]+[{% for v in nn_outs %}"{{v}}"{% endfor %}]
_constraints = {}
dirichlet_conds = []
dirichlet_gen_conds = []
neumann_conds = []
{% for cn, c in constraints.items() %}
{% if c.type == "boundary-dirichlet" %}
var_id={{c.var_id}}
offset = {{c.var_offset}}
expr_srepr= "{{c.expr_srepr}}"
dirichlet_conds += [(var_id, offset, expr_srepr)]
{% elif c.type == "boundary-dirichlet-gen" %}
at_srepr = "{{c.at}}"
expr_srepr= "{{c.expr_srepr}}"
dirichlet_gen_conds += [(expr_srepr, at_srepr)]
{% elif c.type == "boundary-neumann" %}
var_id={{c.var_id}}
offset = {{c.var_offset}}
expr_srepr= "{{c.expr_srepr}}"
neumann_conds += [(var_id, offset, expr_srepr)]
{% endif %}
{% endfor %}
{% for cn, c in constraints.items() %}
# Constraint: {{cn}} [{{c.type}}]
{% if c.type == "interior" %}
srepr = "{{c.eq_srepr}}"
ctype = "interior"
gridvarfuncs={ {% for gv in grid.vars %}"{{gv|upper}}":{{grid.vars[gv]}},{% endfor %} }
_constraints["{{cn}}"] = FNOEquationConstraint(ins_outs, gridvarfuncs, srepr, "{{cn}}", "{{c.onFunc}}", dirichlet_gen_conds, dirichlet_conds, neumann_conds,
ctype=ctype, criteria="{{c.srepr_criteria}}")
# distribution = {{c.distribution}}
inputs = [{% for v in nn_ins %}"{{v}}",{% endfor %}]
_node = Node(
inputs=ins_outs,
outputs=["{{cn}}"],
evaluate=_constraints["{{cn}}"],
name="[{{cn}}] Node",
)
# add constraints to domain
with h5py.File("{{distributions[c.distribution].file}}", "r") as f:
invars = {}
for k in f.keys():
invars[k] = f[k][:]
sh = invars[k].shape
invars[k]= invars[k].reshape(sh[0], 1, sh[1], sh[2])
print(k, invars[k].shape, invars.keys())
train_dataset = DictGridDataset(invars, {"{{cn}}": np.zeros_like(invars[k])})
supervised = SupervisedGridConstraint(
nodes=nodes + [_node],
dataset=train_dataset,
batch_size=16,
)
domain.add_constraint(supervised, "supervised{{cn}}")
#nodes += [_node]
{% endif %}
{% endfor %}
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
{% endfilter %}
if __name__ == "__main__":
run()
| modulus-toolchain-master | mtc/templates/fno-problem/train.py |
from typing import Dict
# import modulus
from modulus.sym.hydra import instantiate_arch, ModulusConfig
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.graph import Graph
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.domain.constraint import SupervisedGridConstraint
from modulus.sym.domain.validator import GridValidator
from modulus.sym.dataset import DictGridDataset
from modulus.sym.utils.io.plotter import GridValidatorPlotter
import torch
import h5py
import numpy as np
global struct
from modulus.sym.hydra.utils import compose
cfg = compose(config_path="conf", config_name="config_PINO")
cfg.network_dir = "outputs/darcy_PINO"
def run(cfg=cfg) -> None:{% filter indent(width=4) %}
{% for nn_name, nn in nns.items() %}
nodes=[]
from modulus.sym.models.fully_connected import FullyConnectedArch
decoder_net = FullyConnectedArch(
input_keys=[Key("z")], # input to decoder
output_keys=[{% for v in nn.outputs %}Key("{{v}}"),{% endfor %}], # output keys of decoder
nr_layers=1, # number of layers
layer_size=32 # layer size
)
from modulus.sym.models.fno import FNOArch
model{{nn_name}} = FNOArch(
input_keys=[{% for v in nn.inputs %}Key("{{v}}"),{% endfor %}],
decoder_net=decoder_net,
dimension=2
)
nodes += [model{{nn_name}}.make_node(name="{{nn_name}}_FNO")]
for node in nodes:
node.evaluate.load("training/{{stageid}}/outputs")
gmodel = Graph(nodes,
[{% for v in nn.inputs %}Key("{{v}}"),{% endfor %}],
[{% for v in nn.outputs %}Key("{{v}}"),{% endfor %}])
global struct
struct = {"graph": gmodel}
{% endfor %}{% endfilter %}
run()
info = {"__file__":__file__,
"grid": {"N": {{grid.N}}, "vars": { {% for vn,v in grid.vars.items() %}"{{vn}}":{{v}}, {% endfor %} } },
"input_grids": [{% for v in nn_ins %}Key("{{v}}"),{% endfor %}],
"output_grids": [{% for v in nn_outs %}Key("{{v}}"),{% endfor %}]}
def infer({% for v in nn_ins %}{{v}},{% endfor %}):
{% for v in nn_ins %}sh={{v}}.shape;{% endfor %}
g =struct['graph']
def arr2_tensor(a):
sh = a.shape
return torch.Tensor(a.reshape(sh[0], 1, sh[1], sh[2]))
result = g.forward({ {% for v in nn_ins %}"{{v}}":arr2_tensor({{v}}),{% endfor %} })
return {k: r.cpu().detach().numpy().reshape(sh) for k,r in result.items()} | modulus-toolchain-master | mtc/templates/fno-problem/infer.py |
class FNOEquationConstraint(torch.nn.Module):
"Custom Equation Constraint"
def __init__(self, vars, gridvarfuncs, eq_srepr, outvar, onFunc, dirichlet_gen_conds, dirichlet_conds, neumann_conds, ctype="interior", criteria=None):
"ctype in ['interior', 'boundary']"
ctypes=['interior', 'boundary']
assert ctype in ctypes, f"Invalid ctype={ctype} -- Constraint type must be one of {ctypes}"
super().__init__()
self.vars = vars
self.gridvarfuncs = gridvarfuncs
self.eq_srepr = eq_srepr
self.outvar = outvar
self.ctype=ctype
self.onFunc = onFunc
# the format of the Dirichlet and Neumann conditions is a list of:
# (var_id, offset, expr_srepr)
# var_id: the variable number (e.g. 0 for x, ...)
# offset: in grid point id units (usually 0, 1 or -1, -2)
# expr_srepr: the string to evaluate to get the value
self.dirichlet_conds=dirichlet_conds
self.dirichlet_gen_conds=dirichlet_gen_conds
self.neumann_conds=neumann_conds
self.criteria=criteria
def forward(self, input_var: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
_vars = {
"_dh4id": { {% for vn,v in grid.vars.items() %} {{v.var_id}} :{{v.delta}}, {% endfor %} }, #{"x": 1.0, "y": 1.0},
"_dh": { {% for vn,v in grid.vars.items() %}"{{vn}}":{{v.delta}}, {% endfor %} }, #{"x": 1.0, "y": 1.0},
"_symbol2order": { {% for vn,v in grid.vars.items() %}"{{vn}}":{{v.var_id}}, {% endfor %} },
}
for v in self.vars:
_vars[v] = input_var[v].clone()
inT = input_var[v]
# mgargs = [ np.arange(gv['extent'][0], gv['extent'][1], gv['delta']) for gvn, gv in self.gridvarfuncs.items()]
mgargs = [ np.linspace(gv['extent'][0], gv['extent'][1], inT.shape[-1]) for gvn, gv in self.gridvarfuncs.items()]
# Set up the grid variable functions to allow for, e.g., sin(X)
r = np.meshgrid(*mgargs)
for gvn, _V in zip(self.gridvarfuncs, r):
_V = torch.Tensor( _V.reshape([1,1]+list(_V.shape)) ).to(inT.device)
_V = torch.repeat_interleave(_V, inT.shape[0], dim=0)
_vars[gvn] = _V
import torch.nn.functional as F
# requirements
# 1. need to pas a _vars object
# 2. _vars["_symbol2order"] is a dict mapping grid variables (like x,y,t) to dimension
# 3. _vars should include the tensors for each function; e.g. _vars["u"] = tensor
# 4. _vars["_dh"] is a dict mapping grid vars to grid size for numerical differentiation
class TorchEvalExp:
def ln(x):
return torch.ln(x)
def log(x):
return torch.log(x)
def Pow(x, p):
return torch.pow(x, p)
def Abs(x):
return torch.abs(x)
def exp(x):
return torch.exp(x)
def sin(x):
return torch.sin(x)
def cos(x):
return torch.cos(x)
def LessThan(a,b):
return a<=b
def StrictLessThan(a,b):
return a<b
def StrictGreaterThan(a,b):
return a>b
def GreaterThan(a,b):
return a>=b
def Not(a):
return ~a
def Or(*args):
r = args[0]
for a in args[1:]:
r = r|a
return r
def And(*args):
r = args[0]
for a in args[1:]:
r = r&a
return r
def Equality(a,b):
return a==b
def Unequality(a,b):
return a!=b
def Integer(v):
return float(v)
def Float(v, precision=53):
return float(v)
def Tuple(*args):
return args
def Add(*args):
r = args[0]
for a in args[1:]:
r = r + a
return r
def Mul(*args):
r = args[0]
for a in args[1:]:
r = r * a
return r
def Symbol(s):
return str(s)
class Function:
def __init__(self, name):
self.name = name
self.args = []
def __call__(self, *args):
for a in args:
assert isinstance(a, str)
self.args = [a for a in args]
return _vars[self.name]
def Derivative(t, p):
vnum = _vars["_symbol2order"][p[0]]
deriv_order = p[1]
dh = _vars["_dh"][p[0]]
if deriv_order == 1:
stencil = torch.Tensor([-0.5, 0.0, 0.5]).to(t.device)
elif deriv_order == 2:
stencil = torch.Tensor([1.0, -2.0, 1.0]).to(t.device)
dh = dh * dh
else:
print("ERROR: only derivatives up to order 2 are supported")
dim = (vnum+1)%2 # len(_vars["_symbol2order"])
stencilD = torch.reshape(stencil, [1, 1] + dim * [1] + [-1] + (1 - dim) * [1])
# var = F.pad(t, 4 * [(stencil.shape[0] - 1) // 2], "replicate") # "constant", 0)
var=t
output = F.conv2d(var, stencilD, padding="same") / dh
return output
def Integral(t, p):
dim = int(_vars["_symbol2order"][p[0]])
dh = _vars["_dh"][p[0]]
# nT=torch.zeros_like(T)
if dim == 0:
sT = dh*torch.sum(t,2+dim, keepdim=True)#.shape
# nT[0,0,:,:]=sT[0,0,0,:]
return sT.repeat(1,1,t.shape[2],1)
elif dim == 1:
sT = dh*torch.sum(t,2+dim, keepdim=True)#.shape
return sT.repeat(1,1,1,t.shape[3])
return nT
fns = [e for e in dir(TorchEvalExp) if not e.startswith("__")]
ctxt = {fn: getattr(TorchEvalExp, fn) for fn in fns}
# Select the target tensor to prepare with boundary info
T = _vars[self.onFunc]
# First, evaluate all Dirichlet General conditions
for expr_srepr, at_srepr in self.dirichlet_gen_conds:
Tc = eval(expr_srepr,{}, ctxt)
if isinstance(Tc, float):
Tc = 0*T + Tc
atCond = eval(at_srepr,{}, ctxt)
T[atCond] = Tc[atCond]
# First, evaluate all Dirichlet conditions
for var_id, offset, expr_srepr in self.dirichlet_conds:
Tc = eval(expr_srepr,{}, ctxt)
if isinstance(Tc, float):
Tc = 0*T + Tc
if var_id == 0:
T[:,:,:, offset] = Tc[:,:,:, offset]
else:
T[:,:,offset,:] = Tc[:,:,offset,:]
# Then, evaluate all Neumann conditions
for var_id, offset, expr_srepr in self.neumann_conds:
dh = _vars["_dh4id"][var_id]
Tc = -eval(expr_srepr,{}, ctxt)
if isinstance(Tc, float):
Tc = 0*T + Tc
off = -1
if offset > 1:
off = 1
dh = -dh
if var_id == 0:
T[:,:,1:-1, offset] = T[:,:,1:-1, offset+off] + off*Tc[:,:,1:-1, offset]*dh
else:
T[:,:, offset,1:-1] = T[:,:,offset+off,1:-1] + off*Tc[:,:,offset,1:-1]*dh
result = eval(self.eq_srepr,{}, ctxt)
## must ignore boundary in loss function when using 3-point stencils
result[:,:,0,:]=0
result[:,:,-1,:]=0
result[:,:,:,0]=0
result[:,:,:,-1]=0
return {self.outvar: result}
## ----------------
| modulus-toolchain-master | mtc/templates/fno-problem/constraints.py |
modulus-toolchain-master | mtc/templates/conf/__init__.py |
|
from __future__ import print_function
from setuptools import setup, find_packages, Command
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
from setuptools.command.egg_info import egg_info
from subprocess import check_call
import os
import sys
import platform
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'js')
is_repo = os.path.exists(os.path.join(here, '.git'))
npm_path = os.pathsep.join([
os.path.join(node_root, 'node_modules', '.bin'),
os.environ.get('PATH', os.defpath),
])
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'A widget for interactive server-side ParaView rendering'
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(os.path.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not os.path.exists(t)]
if strict or missing:
log.warn('rebuilding js and css failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('rebuilding js and css failed (not a problem)')
log.warn(str(e))
command.run(self)
update_package_data(self.distribution)
return DecoratedCommand
def update_package_data(distribution):
"""update package_data to catch changes during setup"""
build_py = distribution.get_command_obj('build_py')
# distribution.package_data = find_package_data()
# re-init build_py options which load package_data
build_py.finalize_options()
class NPM(Command):
description = 'install package.json dependencies using npm'
user_options = []
node_modules = os.path.join(node_root, 'node_modules')
targets = [
os.path.join(here, 'ipyparaview', 'static', 'extension.js'),
os.path.join(here, 'ipyparaview', 'static', 'index.js')
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def get_npm_name(self):
npmName = 'npm';
if platform.system() == 'Windows':
npmName = 'npm.cmd';
return npmName;
def has_npm(self):
npmName = self.get_npm_name();
try:
check_call([npmName, '--version'])
return True
except:
return False
def should_run_npm_install(self):
package_json = os.path.join(node_root, 'package.json')
node_modules_exists = os.path.exists(self.node_modules)
return self.has_npm()
def run(self):
has_npm = self.has_npm()
if not has_npm:
log.error("`npm` unavailable. If you're running this command using sudo, make sure `npm` is available to sudo")
env = os.environ.copy()
env['PATH'] = npm_path
if self.should_run_npm_install():
log.info("Installing build dependencies with npm. This may take a while...")
npmName = self.get_npm_name();
# NOTE: this is a dirty hack to get around permissions issues with npm in docker
# It's not portable, or reliable. It may kill your dog without warning.
import getpass
if getpass.getuser() == 'root':
check_call([npmName, 'install', '--unsafe-perm'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
else:
check_call([npmName, 'install'], cwd=node_root, stdout=sys.stdout, stderr=sys.stderr)
os.utime(self.node_modules, None)
for t in self.targets:
if not os.path.exists(t):
msg = 'Missing file: %s' % t
if not has_npm:
msg += '\nnpm is required to build a development version of a widget extension'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
version_ns = {}
with open(os.path.join(here, 'ipyparaview', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'ipyparaview',
'version': version_ns['__version__'],
'description': 'A widget for interactive server-side ParaView rendering',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'data_files': [
('share/jupyter/nbextensions/ipyparaview', [
'ipyparaview/static/extension.js',
'ipyparaview/static/index.js',
'ipyparaview/static/index.js.map',
],),
('etc/jupyter/nbconfig/notebook.d' ,['ipyparaview.json'])
],
'install_requires': [
'ipywidgets>=7.0.0',
'pillow>=7.0.0'
],
'packages': find_packages(),
'zip_safe': False,
'cmdclass': {
'build_py': js_prerelease(build_py),
'egg_info': js_prerelease(egg_info),
'sdist': js_prerelease(sdist, strict=True),
'jsdeps': NPM,
},
'author': 'Nick Leaf',
'author_email': '[email protected]',
'url': 'https://github.com/NVIDIA/ipyparaview',
'keywords': [
'ipython',
'jupyter',
'widgets',
],
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
}
setup(**setup_args)
| ipyparaview-master | setup.py |
# Module version
version_info = (0, 1, 2, 'beta', 0)
# Module version stage suffix map
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
# Module version accessible using ipyparaview.__version__
__version__ = '%s.%s.%s%s'%(version_info[0], version_info[1], version_info[2],
'' if version_info[3]=='final' else _specifier_[version_info[3]]+str(version_info[4]))
| ipyparaview-master | ipyparaview/_version.py |
import math
import numpy as np
__all__ = ['rotateCameraTurntable', 'panCameraTurntable', 'zoomCameraTurntable']
def _normalize(v):
return v/np.linalg.norm(v)
def _cartToSphr(p):
#cartesian position into spherical
r = np.linalg.norm(p)
return np.array([r,
math.atan2(p[0], p[2]),
math.asin(p[1]/r)])
def _sphrToCart(p):
#spherical coordinate position into cartesian
return np.array([p[0]*math.sin(p[1])*math.cos(p[2]),
p[0]*math.sin(p[2]),
p[0]*math.cos(p[1])*math.cos(p[2])])
def rotateCameraTurntable(d, p, f, u, scale, phiLimit):
f = np.array(f)
p = np.array(p) - f
#compute orthonormal basis corresponding to current view and up vectors
b1 = _normalize(np.array(u))
b0 = _normalize(np.cross(b1, p))
b2 = np.cross(b0, b1)
#compute matrices to convert to and from the up-vector basis
fromU = np.column_stack([b0,b1,b2])
toU = np.linalg.inv(fromU)
#rotate around the focus in spherical:
# - convert focus-relative camera pos to up vector basis, then spherical
# - apply mouse deltas as movements in spherical
# - convert back to cartesian, then to standard basis, then to absolute position
cp = _cartToSphr( np.matmul(toU,p) )
cp[1] -= scale*d['x']
cp[2] = max(-phiLimit, min(phiLimit, cp[2]-scale*d['y']))
p = np.matmul( fromU, _sphrToCart(cp) )
#self.render()
return (p+f, f, u)
def panCameraTurntable(d, p, f, u, angle):
#translates pan delta into a translation vector at the focal point
f = np.array(f)
p = np.array(p)-f
u = np.array(u)
h = _normalize(np.cross(p, u))
v = _normalize(np.cross(p, h))
f += (d['x']*h + d['y']*v)*np.linalg.norm(p)*2*math.tan(math.pi*angle/360)
#self.render()
return (p+f, f, u)
def zoomCameraTurntable(d, p, f, u, rlimit):
#zooms by scaling the distance between camera and focus
f = np.array(f)
p = np.array(p)-f
r = np.linalg.norm(p)
p *= max(rlimit, r*d)/r
return (p+f, f, u)
| ipyparaview-master | ipyparaview/camera_models.py |
###############################################################################
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from ._version import version_info, __version__
from .widgets import *
from .camera_models import *
def _jupyter_nbextension_paths():
"""Called by Jupyter Notebook Server to detect if it is a valid nbextension and
to install the widget
Returns
=======
section: The section of the Jupyter Notebook Server to change.
Must be 'notebook' for widget extensions
src: Source directory name to copy files from. Webpack outputs generated files
into this directory and Jupyter Notebook copies from this directory during
widget installation
dest: Destination directory name to install widget files to. Jupyter Notebook copies
from `src` directory into <jupyter path>/nbextensions/<dest> directory
during widget installation
require: Path to importable AMD Javascript module inside the
<jupyter path>/nbextensions/<dest> directory
"""
return [{
'section': 'notebook',
'src': 'static',
'dest': 'ipyparaview',
'require': 'ipyparaview/extension'
}]
class PVRenderActor:
"""A class for rendering data using ParaView as a Dask Actor"""
framenum = 0
frametime = 0 #time to render the latest frame
rank,size = 0, 1
def __init__(self, x):
#NOTE: 'x' is required in order to instantiate an actor across all nodes by passing
#a sequence of variables
import paraview
paraview.options.batch = True
paraview.options.symmetric = True
import paraview.simple as pvs
self.pvs = pvs
from mpi4py import MPI
self.rank,self.size = MPI.COMM_WORLD.Get_rank(), MPI.COMM_WORLD.Get_size()
import vtk
from vtk import vtkWindowToImageFilter
# Create render view and image transfer filter objects
self.renv = pvs.CreateRenderView()
self.w2i = vtkWindowToImageFilter()
self.w2i.ReadFrontBufferOff()
self.w2i.ShouldRerenderOff()
self.w2i.SetInput(self.renv.SMProxy.GetRenderWindow())
# Make sure all ranks have initialized
MPI.COMM_WORLD.Barrier()
if self.rank == 0:
print("All ranks ready for rendering")
def rotateCam(self, mouseDelta, rotateScale, phiLim):
"""Rotates the camera using the given mouse delta"""
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = rotateCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
rotateScale,
phiLim)
def panCam(self, mouseDelta):
"""Pans the camera using the given mouse delta"""
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = panCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
self.renv.CameraViewAngle)
def zoomCam(self, mouseDelta, rlim):
"""Zooms the camera using the given mouse delta"""
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = zoomCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
rlim)
def render(self):
"""Render a frame and return it as a numpy array"""
import time
ts = time.time()
self.pvs.Render(view=self.renv)
if self.rank == 0:
self.frametime = time.time()-ts
self.framenum += 1
def fetchFrame(self):
# Mathias's magic frame fetching snippet
self.w2i.Modified()
self.w2i.Update()
imagedata = self.w2i.GetOutput()
w,h,_ = imagedata.GetDimensions()
import numpy as np
from vtk.util.numpy_support import vtk_to_numpy
imagedata_np = vtk_to_numpy(imagedata.GetPointData().GetScalars()).reshape((h,w,3))
return np.flipud(np.pad(imagedata_np, ((0,0),(0,0),(0,1)), mode='constant', constant_values=255))
def run(self, fun, args):
"""Run the given function on the Actor's worker node"""
return fun(self, *args)
| ipyparaview-master | ipyparaview/__init__.py |
###############################################################################
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
#Functions for handling camera interaction
from .camera_models import *
import ipywidgets as widgets
from traitlets import Unicode, Int, Float, Bytes, Tuple, validate
import time
import numpy as np
import threading
# for jpeg / png transfer ("compress frames"):
import base64
from io import BytesIO
from PIL import Image
@widgets.register
class PVDisplay(widgets.DOMWidget):
"""A ParaView interactive render widget"""
_view_name = Unicode('PVDisplayView').tag(sync=True)
_model_name = Unicode('PVDisplayModel').tag(sync=True)
_view_module = Unicode('ipyparaview').tag(sync=True)
_model_module = Unicode('ipyparaview').tag(sync=True)
_view_module_version = Unicode('^0.1.2').tag(sync=True)
_model_module_version = Unicode('^0.1.2').tag(sync=True)
# traitlets -- variables synchronized with front end
frame = Bytes().tag(sync=True)
compressedFrame = Bytes().tag(sync=True)
resolution = Tuple((800,500)).tag(sync=True) #canvas resolution; w,h
fpsLimit = Float(60.0).tag(sync=True) #maximum render rate
maxEventRate = Float(20.0).tag(sync=True) #maximum number of mouse events/s
# class variables
instances = dict()
rotateScale = 5.0
zoomScale = 0.05
@classmethod
def GetOrCreate(cls, ren, runAsync=True, **kwargs):
"""
Check if a PVDisplay instance already exists for the renderer. If yes, return that instance; otherwise, create a new one.
"""
instance = cls.instances.get(ren, None)
if instance is None:
instance = PVDisplay(ren, runAsync, **kwargs)
cls.instances.update({ ren : instance })
return instance
def __init__(self, ren, runAsync=True, compressFrames=False, **kwargs):
# see if we can import Dask.distributed, then try guessing the render
# mode based on the type of ren. Fallback to regular Jupyter rendering
# otherwise
try:
import dask.distributed as distributed
if(type(ren) == list and type(ren[0]) == distributed.actor.Actor):
self.mode = 'Dask'
else:
self.mode = 'Jupyter'
except ImportError:
self.mode = 'Jupyter'
if self.mode == 'Jupyter' and ren in PVDisplay.instances:
raise RuntimeError(f"A PVDisplay instance already exists for this renderer. Use PVDisplay.GetOrCreate() to avoid this error.")
super(PVDisplay, self).__init__(**kwargs) #must call super class init
# regular vars
self.compressFrames = compressFrames
self.pvs, self.renv, self.w2i = None,None,None #used for Jupyter kernel rendering
self.master, self.renderers = None,[] #used for Dask rendering
self.tp = time.time() #time of latest render
self.fps = 10.0
self.fpsOut = [] #FPS output ipywidgets; passed in from Jupyter
self.intyld = [0.05, 0.01] #interaction yield--period and duration
self.tiy = time.time() #time of last interaction yield
if self.mode == 'Dask':
self.renderers = ren
self.master = [r for r in self.renderers if r.rank == 0][0]
self.resolution = tuple(self.master.run(
lambda self : list(self.renv.ViewSize),
[]).result())
cf = self.master.run(
lambda self : list(self.renv.CameraFocalPoint),
[]).result()
cp = self.master.run(
lambda self : list(self.renv.CameraPosition),
[]).result()
self.camf = (cf[0], cf[1], cf[2])
self.camp = (cp[0], cp[1], cp[2])
else:
import paraview.simple as pvs
self.pvs = pvs
self.renv = ren
self.resolution = tuple(self.renv.ViewSize)
cf = self.renv.CameraFocalPoint
cp = self.renv.CameraPosition
self.camf = (cf[0], cf[1], cf[2])
self.camp = (cp[0], cp[1], cp[2])
import vtk
from vtk import vtkWindowToImageFilter
self.w2i = vtkWindowToImageFilter()
self.w2i.ReadFrontBufferOff()
self.w2i.ShouldRerenderOff()
self.w2i.SetInput(self.renv.SMProxy.GetRenderWindow())
self.frameNum = 0
self.FRBufSz = 10
self.FRBuf = np.zeros(self.FRBufSz, dtype=np.float32);
self.runAsync = runAsync
if runAsync:
self.renderThread = threading.Thread(target=self.__renderLoop)
self.renderThread.start()
#FIXME: starting the render loop thread outside of __init__ seems to create
# a copy of the paraview.simple object, rather than using the one that's
# part of the PVDisplay state; this causes PV to crash
#def setAsync(self, on):
# if on and not self.runAsync:
# self.runAsync = on
# self.renderThread = threading.Thread(target=self.__renderLoop)
# self.renderThread.start()
# elif not on and self.runAsync:
# self.runAsync = False
def addFPSDisplay(self, *w):
"""Add a widget to write FPS to"""
for o in w:
self.fpsOut.append(o)
def updateCam(self):
self.render()
def render(self):
if self.runAsync:
return
else:
tc = time.time()
if(1.0/(tc-self.tp) < self.fpsLimit):
self.__renderFrame()
def fetchFrame(self):
if self.mode == 'Dask':
return self.master.fetchFrame().result()
else:
# Mathias's magic frame fetching snippet
self.w2i.Modified()
self.w2i.Update()
imagedata = self.w2i.GetOutput()
w,h,_ = imagedata.GetDimensions()
from vtk.util.numpy_support import vtk_to_numpy
imagedata_np = vtk_to_numpy(
imagedata.GetPointData().GetScalars()).reshape((h,w,3))
return np.flipud(np.pad(imagedata_np, ((0,0),(0,0),(0,1)),
mode='constant', constant_values=255))
def _handle_custom_msg(self, content, buffers):
self.content = content
if content['event'] == 'updateCam':
self.updateCam()
if content['event'] == 'rotate':
self.__rotateCam(content['data'])
if content['event'] == 'pan':
self.__panCam(content['data'])
if content['event'] == 'zoom':
self.__zoomCam(content['data'])
def __rotateCam(self, mouseDelta):
#rotates the camera around the focus in spherical
phiLim = 1.5175
if self.mode == 'Dask':
from dask.distributed import wait
wait([r.rotateCam(mouseDelta,self.rotateScale,phiLim) for r in self.renderers])
else:
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = rotateCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
self.rotateScale,
phiLim)
self.render()
def __panCam(self, mouseDelta):
#moves the camera with a 1:1 relation to current focal point
if self.mode == 'Dask':
from dask.distributed import wait
wait([r.panCam(mouseDelta) for r in self.renderers])
else:
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = panCameraTurntable(
mouseDelta,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
self.renv.CameraViewAngle)
self.render()
def __zoomCam(self, mouseDelta):
#zooms by scaling the distance between camera and focus
rlim = 0.00001 #minimum allowable radius
d = (1.0+self.zoomScale)**mouseDelta
if self.mode == 'Dask':
from dask.distributed import wait
wait([r.zoomCam(d,rlim) for r in self.renderers])
else:
(self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp) = zoomCameraTurntable(
d,
self.renv.CameraPosition,
self.renv.CameraFocalPoint,
self.renv.CameraViewUp,
rlim)
self.render()
def __compressFrame(self, frame):
img = Image.fromarray(frame[:,:,:3])
bytesIO = BytesIO()
img.save(bytesIO, format='jpeg', quality=50)
img_str = base64.b64encode(bytesIO.getvalue())
return img_str
def __renderFrame(self):
tc = time.time()
self.FRBuf[self.frameNum % self.FRBufSz] = 1.0/(tc - self.tp)
self.tp = tc
#set the camera position, render, and get the output frame
if self.mode == 'Dask':
from dask.distributed import wait
wait([r.render() for r in self.renderers])
else:
self.pvs.Render(view=self.renv)
uncompressedFrameNp = self.fetchFrame()
if self.compressFrames:
self.compressedFrame = self.__compressFrame(uncompressedFrameNp)
else:
self.frame = uncompressedFrameNp.tostring()
self.frameNum += 1
self.fps = np.average(self.FRBuf)
if self.fpsOut is not None:
for fo in self.fpsOut:
fo.value = self.fps
def __renderLoop(self):
while self.runAsync:
#check if it's time for an interaction yield; if so, do it
if time.time() - self.tiy > self.intyld[0]:
time.sleep(self.intyld[1])
self.tiy = time.time()
#sleep to keep FPS to fpsLimit
time.sleep(max(0, 1.0/self.fpsLimit - (time.time() - self.tp)))
self.__renderFrame()
@widgets.register
class VStream(widgets.DOMWidget):
"""A WebSocket-based video stream widget with interaction."""
_view_name = Unicode('VStreamView').tag(sync=True)
_model_name = Unicode('VStreamModel').tag(sync=True)
_view_module = Unicode('ipyparaview').tag(sync=True)
_model_module = Unicode('ipyparaview').tag(sync=True)
_view_module_version = Unicode('^0.1.2').tag(sync=True)
_model_module_version = Unicode('^0.1.2').tag(sync=True)
url = Unicode('ws://localhost:9002').tag(sync=True)
state = Unicode('').tag(sync=True)
def connect(self):
self.state = 'connect'
| ipyparaview-master | ipyparaview/widgets.py |
#!/usr/bin/env python
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from setuptools import find_packages, setup
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
author="MLPerf",
author_email="mlperf@local", # TODO
python_requires=">=3.8",
description="MLPerf utilities for NVIDIA related submissions",
license="Apache Software License 2.0",
long_description=readme,
long_description_content_type="text/markdown",
name="mlperf-common",
packages=find_packages(where="."),
scripts=['client/bindpcie','client/slurm2pytorch','client/mgpurun'],
url="https://github.com/NVIDIA/mlperf-common",
version="0.3",
)
| mlperf-common-main | setup.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants
class MLLoggerWrapper:
def __init__(self, comm_handler, mllogger=None, default_stack_offset=3, **default_log_kwargs):
"""
Args:
comm_handler (CommunicationHandler): framework specific handler for barrier
and determining process rank
mllogger: MLPerf official logger instance
default_stack_offset: number of stack calls added by this wrapper and the wrapped logger
when using `start`, `end` and `event` methods. Other helper methods automatically
increment stack_offset by 1.
**default_log_kwargs: kwargs appended to each logging call
"""
self.comm_handler = comm_handler
if mllogger is None:
mllogger = mllog.get_mllogger()
mllogger.default_stack_offset = default_stack_offset
self.default_stack_offset = default_stack_offset
self.mllogger = mllogger
self.default_log_kwargs = default_log_kwargs
# reducing imports convenience
self.constants = constants
def start(self, *args, **kwargs):
""" Primitive for logging a time interval start. """
self._log(self.mllogger.start, *args, **kwargs)
def end(self, *args, **kwargs):
""" Primitive for logging a time interval end. """
self._log(self.mllogger.end, *args, **kwargs)
def event(self, *args, **kwargs):
""" Primitive for logging a point in time event. """
self._log(self.mllogger.event, *args, **kwargs)
def _log(self, log_fn, *args, unique=True, unique_log_rank=0,
sync=False, sync_group=None, internal_call=False, **kwargs):
""" Main logging function.
Args:
log_fn: actual logging function
*args: passed to `log_fn`
unique: if True, only `unique_log_rank` process will log
unique_log_rank: which process should log a unique log
sync: calls a barrier *before* logging
sync_group: group passed to barrier function
internal_call: set to True for logs made by this wrapper.
Increments default stack_offset to correctly trace log call location.
**kwargs: passed to `log_fn` along with `self.default_log_kwargs`
"""
kwargs = {**self.default_log_kwargs, **kwargs}
if kwargs.get('stack_offset') is None and internal_call:
kwargs['stack_offset'] = self.default_stack_offset + 1
if sync:
self.comm_handler.barrier(sync_group)
if not unique or self.comm_handler.global_rank() == unique_log_rank:
log_fn(*args, **kwargs)
def log_init_stop_run_start(self):
""" Logs INIT_STOP and RUN_START with correct synchronization across workers. """
self.end(key=constants.INIT_STOP, unique=True, sync=True, internal_call=True)
self.start(key=constants.RUN_START, unique=True, sync=True, internal_call=True)
self.comm_handler.barrier()
def log_run_stop(self, status, sync=True, unique=True, unique_log_rank=0, **kwargs):
""" Logs RUN_STOP with correct synchronization across workers. """
self.end(key=constants.RUN_STOP, unique=unique, unique_log_rank=unique_log_rank, sync=sync,
metadata=dict(status=status, **kwargs), internal_call=True)
def log_weights_initialization(self, tensor_name):
""" Helper for weights initialization. """
self.event(key=constants.WEIGHTS_INITIALIZATION,
metadata=dict(tensor=tensor_name), internal_call=True)
def mlperf_submission_log(self, benchmark, num_nodes=None, org=None,
platform=None):
""" Helper for logging submission entry. """
if num_nodes is None:
num_nodes = os.environ.get('SLURM_JOB_NUM_NODES', 1)
if org is None:
org = os.environ.get('MLPERF_SUBMISSION_ORG',
'SUBMISSION_ORG_PLACEHOLDER')
if platform is None:
platform = os.environ.get('MLPERF_SUBMISSION_PLATFORM',
'SUBMISSION_PLATFORM_PLACEHOLDER')
self.event(
key=constants.SUBMISSION_BENCHMARK,
value=benchmark,
internal_call=True)
self.event(
key=constants.SUBMISSION_ORG,
value=org,
internal_call=True)
self.event(
key=constants.SUBMISSION_DIVISION,
value=constants.CLOSED,
internal_call=True)
self.event(
key=constants.SUBMISSION_STATUS,
value=constants.ONPREM,
internal_call=True)
self.event(
key=constants.SUBMISSION_PLATFORM,
value=f'{num_nodes}x{platform}',
internal_call=True)
| mlperf-common-main | mlperf_common/logging.py |
mlperf-common-main | mlperf_common/__init__.py |
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from time import time
from collections import defaultdict
from sys import exit
class Metricstats:
def __init__(self):
self.total = 0
self.count = 0
self.min = 1000000
self.max = 0
def addtag(self, dur):
self.total += dur
self.count += 1
if dur < self.min:
self.min = dur
if dur > self.max:
self.max = dur
def getstats(self):
return self.total, self.count, self.min, self.max
def getcount(self):
return self.count
class ScaleoutBridgeBase(object):
FWD_TIME = 'fwd_time'
BWD_TIME = 'bwd_time'
FBWD_TIME = 'fbwd_time'
OPT_TIME = 'opt_time'
LOAD_TIME = 'load_time'
EVAL_TIME = 'eval_time'
ITER_TIME = 'iter_time'
EPOCH_TIME = 'epoch_time'
def start_prof(self, tag):
pass
def stop_prof(self, tag):
return 1
def stop_start_prof(self, tag1, tag2):
pass
def start_epoch_prof(self):
pass
def stop_epoch_prof(self):
pass
def start_eval_prof(self):
pass
def stop_eval_prof(self):
pass
class ScaleoutBridgeIterwise(ScaleoutBridgeBase):
def __init__(self, qmax, time_tags, nvtx_flag, deviceid,
prof_handler, comm_handler,
mllogger):
super().__init__()
print("Scaleout performance bridge is running ...")
self.qmax = qmax
self.time_tags = time_tags
self.nvtx_flag = nvtx_flag
self.deviceid = deviceid
self.bridgestats = defaultdict(Metricstats)
self.start_epoch = 0
self.start_eval = 0
self.start_time = 0
self.start_iter = 0
self.comm_handler = comm_handler
self.prof_handler = prof_handler
self.mllogger = mllogger
def print_tag(self, tag, dur):
self.mllogger.event(key=tag, value={'r':self.deviceid, 't':dur}, unique=False)
def add_tag(self, tag, dur):
self.bridgestats[tag].addtag(dur)
if tag == self.ITER_TIME:
if self.bridgestats[tag].getcount() > self.qmax:
self.printstats()
return 0
return 1
def start_prof(self, tag):
if self.time_tags:
self.comm_handler.device_sync()
if tag == self.ITER_TIME:
self.start_iter = time()
else:
self.start_time = time()
if self.nvtx_flag:
self.prof_handler.push_nvtx(tag)
def stop_prof(self, tag):
if self.time_tags:
self.comm_handler.device_sync()
if tag == self.ITER_TIME:
if not self.add_tag(tag, time()-self.start_iter):
self.printstats()
self.prof_handler.profiler_stop()
self.time_tags = 0
exit()
self.start_iter = 0
else:
self.add_tag(tag, time()-self.start_time)
self.start_time = 0
if self.nvtx_flag:
self.prof_handler.pop_nvtx()
return self.time_tags
def stop_start_prof(self, tag1, tag2):
if self.time_tags:
self.comm_handler.device_sync()
new_start_time = time()
if not self.add_tag(tag1, new_start_time-self.start_time):
self.printstats()
self.time_tags = 0
self.start_time = new_start_time
if self.nvtx_flag:
self.prof_handler.pop_nvtx()
self.prof_handler.push_nvtx(tag2)
def start_epoch_prof(self):
self.prof_handler.profiler_start()
self.comm_handler.device_sync()
self.start_epoch = time()
def stop_epoch_prof(self):
self.printstats()
self.comm_handler.device_sync()
self.print_tag(self.EPOCH_TIME, time()-self.start_epoch)
self.prof_handler.profiler_stop()
exit()
def start_eval_prof(self):
self.comm_handler.device_sync()
self.start_eval = time()
def stop_eval_prof(self):
self.printstats()
self.comm_handler.device_sync()
self.print_tag(self.EVAL_TIME, time()-self.start_eval)
def printstats(self):
if not self.time_tags:
return
for tag in self.bridgestats:
self.printstat(tag)
self.bridgestats.clear()
def printstat(self, tag):
total, count, minimum, maximum = self.bridgestats[tag].getstats()
self.mllogger.event(key=tag+'_total', value={'r':self.deviceid, 't':total}, unique=False)
self.mllogger.event(key=tag+'_count', value={'r':self.deviceid, 't':count}, unique=False)
self.mllogger.event(key=tag+'_min', value={'r':self.deviceid, 't':minimum}, unique=False)
self.mllogger.event(key=tag+'_max', value={'r':self.deviceid, 't':maximum}, unique=False)
class ScaleoutBridgeEpochwise(ScaleoutBridgeBase):
def __init__(self, deviceid, comm_handler, mllogger):
super().__init__()
print("Scaleout performance bridge-epoch only is running ...")
self.start_time = 0
self.deviceid = deviceid
self.comm_handler = comm_handler
self.mllogger = mllogger
def start_epoch_prof(self):
self.comm_handler.device_sync()
self.start_time = time()
def stop_epoch_prof(self):
self.comm_handler.device_sync()
self.mllogger.event(key='epoch_time', value={'r':self.deviceid, 't':time()-self.start_time}, unique=False)
def init_bridge(prof_handler, comm_handler, mllogger):
deviceid = comm_handler.global_rank()
time_tags = int(os.getenv('TIME_TAGS', 0))
nvtx_flag = int(os.getenv('NVTX_FLAG', 0))
epoch_only = int(os.getenv('EPOCH_PROF', 0))
sbridge = ScaleoutBridgeBase()
if time_tags or nvtx_flag:
sbridge = ScaleoutBridgeIterwise(1000, time_tags, nvtx_flag, deviceid,
prof_handler, comm_handler, mllogger)
elif epoch_only:
sbridge = ScaleoutBridgeEpochwise(deviceid, comm_handler, mllogger)
return sbridge
| mlperf-common-main | mlperf_common/scaleoutbridge.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mxnet as mx
from mxnet import cuda_utils as cu
from mlperf_common.frameworks.base import ProfilerHandler
from mlperf_common.frameworks.base_mpi import BaseMPICommunicationHandler
class MPICommunicationHandler(BaseMPICommunicationHandler):
def device_sync(self):
mx.nd.waitall()
class MXNetProfilerHandler(ProfilerHandler):
def profiler_start(self):
cu.cuda_profiler_start()
def profiler_stop(self):
cu.cuda_profiler_stop()
def push_nvtx(self, tag):
cu.nvtx_range_push(tag)
def pop_nvtx(self):
cu.nvtx_range_pop()
| mlperf-common-main | mlperf_common/frameworks/mxnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from mlperf_common.frameworks.base import CommunicationHandler
class BaseMPICommunicationHandler(CommunicationHandler):
def __init__(self, comm=None, **kwargs):
super().__init__(**kwargs)
self.comm = comm
def _get_comm(self):
if self.comm is None:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
return self.comm
def barrier(self, sync_group=None):
c = self._get_comm() if sync_group is None else sync_group
# NOTE: MPI_Barrier is *not* working reliably at scale. Using MPI_Allreduce instead.
# c.Barrier()
val = np.ones(1, dtype=np.int32)
result = np.zeros(1, dtype=np.int32)
c.Allreduce(val, result)
def global_rank(self):
c = self._get_comm()
return c.Get_rank()
| mlperf-common-main | mlperf_common/frameworks/base_mpi.py |
mlperf-common-main | mlperf_common/frameworks/__init__.py |
|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mlperf_common.frameworks.base_mpi import BaseMPICommunicationHandler
class HCTRCommunicationHandler(BaseMPICommunicationHandler):
pass
| mlperf-common-main | mlperf_common/frameworks/hugectr.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CommunicationHandler:
def global_rank(self):
raise NotImplementedError
def barrier(self, sync_group=None):
raise NotImplementedError
def device_sync(self):
raise NotImplementedError
class ProfilerHandler:
def profiler_start(self):
raise NotImplementedError
def profiler_stop(self):
raise NotImplementedError
def push_nvtx(self, tag):
raise NotImplementedError
def pop_nvtx(self):
raise NotImplementedError
| mlperf-common-main | mlperf_common/frameworks/base.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from mlperf_common.frameworks.base import CommunicationHandler, ProfilerHandler
class PyTCommunicationHandler(CommunicationHandler):
def global_rank(self):
if self.is_distributed():
return torch.distributed.get_rank()
return 0
def barrier(self, sync_group=None):
if self.is_distributed():
torch.distributed.barrier(sync_group)
def is_distributed(self):
return torch.distributed.is_available() and torch.distributed.is_initialized()
def device_sync(self):
torch.cuda.synchronize()
class PyTProfilerHandler(ProfilerHandler):
def profiler_start(self):
torch.cuda.cudart().cudaProfilerStart()
def profiler_stop(self):
torch.cuda.cudart().cudaProfilerStop()
def push_nvtx(self, tag):
torch.cuda.nvtx.range_push(tag)
def pop_nvtx(self):
torch.cuda.nvtx.range_pop()
| mlperf-common-main | mlperf_common/frameworks/pyt.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
import functools
import operator
import collections
from library import *
###################################################################################################
#
# Data structure modeling a GEMM operation
#
###################################################################################################
#
class GemmOperation:
#
def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, D = None,
kernel_schedule = KernelScheduleType.ScheduleAuto, epilogue_schedule = EpilogueScheduleType.ScheduleAuto,
tile_scheduler = TileSchedulerType.Default):
self.prefix = "3x" if gemm_kind == GemmKind.Universal3x else ""
self.operation_kind = OperationKind.Gemm
self.arch = arch
self.tile_description = tile_description
self.gemm_kind = gemm_kind
self.A = A
self.B = B
self.C = C
self.D = D
if self.D == None:
self.D = self.C
if gemm_kind != GemmKind.Universal3x:
assert(kernel_schedule == KernelScheduleType.ScheduleAuto)
assert(epilogue_schedule == EpilogueScheduleType.ScheduleAuto)
self.kernel_schedule = kernel_schedule
self.epilogue_schedule = epilogue_schedule
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
self.tile_scheduler = tile_scheduler
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def is_planar_complex(self):
return self.gemm_kind in (GemmKind.PlanarComplex, GemmKind.PlanarComplexArray)
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
if self.gemm_kind == GemmKind.Universal3x:
inst_shape = "{0}x{1}x{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape))
else:
inst_shape = "{0}{1}{2}".format(*tuple(self.tile_description.math_instruction.instruction_shape))
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, GemmKindNames[self.gemm_kind])
# Generates a string representing the MMA instruction.
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
def extended_name_3x(self):
'''Generates a string representing the MMA atom. Assumes accumulator type is C type.'''
extended_name = "{core_name}_{element_a}_{element_b}_{element_acc}_{element_c}_{element_d}".format(
element_a = DataTypeNames[self.A.element],
element_b = DataTypeNames[self.B.element],
element_acc = DataTypeNames[self.tile_description.math_instruction.element_accumulator],
element_c = DataTypeNames[self.C.element],
element_d = DataTypeNames[self.D.element],
core_name = self.core_name())
return extended_name
# Generates a short string representing the AB layout tags (e.g. nt or tn)
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
# Generates a short string representing the ABC layout tags (e.g. ntn or tnn)
def layout_name_3x(self):
if self.is_complex() or self.is_planar_complex():
return "{}{}{}".format(
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)],
ShortComplexLayoutNames[(self.C.layout, self.C.complex_transform)])
else:
return "{}{}{}".format(
ShortLayoutTypeNames[self.A.layout],
ShortLayoutTypeNames[self.B.layout],
ShortLayoutTypeNames[self.C.layout])
# Generates a short string representing underlying kernel schedule type
def kernel_schedule_name_3x(self):
return KernelScheduleSuffixes[self.kernel_schedule]
# Generates a short string representing underlying epilogue schedule type
def epilogue_schedule_name_3x(self):
return EpilogueScheduleSuffixes[self.epilogue_schedule]
# Generates the full kernel function name
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
if self.arch >= 90:
kernel_name_template = "cutlass{p}_sm{ar}_{op}_{ex}_{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{l}_{s}_align{al}{t}{k}{e}"
return kernel_name_template.format(
p = self.prefix,
ar = self.arch,
op = opcode_class_name,
ex = self.extended_name_3x(),
tbm = self.tile_description.tile_shape[0],
tbn = self.tile_description.tile_shape[1],
tbk = self.tile_description.tile_shape[2],
cm = self.tile_description.cluster_shape[0],
cn = self.tile_description.cluster_shape[1],
ck = self.tile_description.cluster_shape[2],
l = self.tile_description.stages,
s = self.layout_name_3x(),
al = str(max(self.A.alignment, self.B.alignment)),
t = TileSchedulerSuffixes[self.tile_scheduler],
k = self.kernel_schedule_name_3x(),
e = self.epilogue_schedule_name_3x())
else:
threadblock = self.tile_description.procedural_name()
return "cutlass{p}_{op}_{ex}_{tb}_{l}_align{a}".format(
p = self.prefix,
op = opcode_class_name,
ex = self.extended_name(),
tb = threadblock,
l = self.layout_name(),
a = str(self.A.alignment))
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
def __hash__(self):
return hash(self.configuration_name())
def __eq__(self, other):
return self.configuration_name() == other.configuration_name()
###################################################################################################
#
# Data structure modeling a grouped GEMM operation
#
###################################################################################################
#
class GroupedGemmOperation(GemmOperation):
#
def __init__(self, gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
scheduler_mode = GroupScheduleMode.Device):
super().__init__(gemm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor, swizzling_functor)
self.scheduler_mode = scheduler_mode
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
base = super().procedural_name()
return SubstituteTemplate(
base + "_schedule${schedule}",
{
'schedule': ShortGroupScheduleModeNames[self.scheduler_mode]
})
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitGemmInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::Gemm<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
false,
${math_operation}
${residual}
>;
"""
self.gemm_complex_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::GemmComplex<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${transform_a},
${transform_b},
${math_operation}
${residual}
>;
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
residual = ''
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'residual': residual
}
template = self.gemm_complex_template if operation.is_complex() else self.gemm_template
return SubstituteTemplate(template, values)
###################################################################################################
class EmitSparseGemmInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.gemm_template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = cutlass::gemm::device::SparseGemm<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
false,
${math_operation}
${residual}
>;
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<Operation_${operation_name}>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
residual = ''
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'residual': residual
}
template = self.gemm_template
return SubstituteTemplate(template, values)
###################################################################################################
#
class EmitGemmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/device/gemm_universal_adapter.h",
"cutlass/gemm/kernel/default_gemm_universal.h",
]
self.builtin_epilogue_functor_template = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>
"""
self.gemm_template = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_b}, ${layout_b}, ${transform_b}, ${align_b}, // transposed B operand
${element_a}, ${layout_a}, ${transform_a}, ${align_a}, // transposed A operand
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
self.gemm_template_interleaved = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
transpose_layouts = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor
}
if operation.A.layout in transpose_layouts.keys() and \
operation.B.layout in transpose_layouts.keys() and \
operation.C.layout in transpose_layouts.keys():
instance_layout_A = transpose_layouts[operation.A.layout]
instance_layout_B = transpose_layouts[operation.B.layout]
instance_layout_C = transpose_layouts[operation.C.layout]
gemm_template = self.gemm_template
else:
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
gemm_template = self.gemm_template_interleaved
#
# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):
epilogue_vector_length = \
min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element]
values = {
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
}
epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values)
else:
epilogue_functor = self.epilogue_functor.emit_declaration()
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_functor': epilogue_functor,
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
return SubstituteTemplate(gemm_template, values)
###################################################################################################
#
class EmitGemmUniversal3xInstance:
''' Responsible for emitting a CUTLASS 3.x template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/gemm/gemm.h",
"cutlass/numeric_types.h",
"cutlass/gemm/kernel/gemm_universal.hpp",
"cutlass/gemm/collective/collective_builder.hpp",
"cutlass/epilogue/collective/collective_builder.hpp",
]
self.builtin_epilogue_functor_template = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>
"""
self.gemm_template = """
using ${operation_name}_epilogue =
typename cutlass::epilogue::collective::CollectiveBuilder<
${arch}, ${opcode_class},
cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
cutlass::epilogue::collective::EpilogueTileAuto,
${element_accumulator}, ${element_epilogue},
${element_c}, ${layout_c}, ${align_c},
${element_d}, ${layout_d}, ${align_d},
${epilogue_schedule}
>::CollectiveOp;
using ${operation_name}_mainloop =
typename cutlass::gemm::collective::CollectiveBuilder<
${arch}, ${opcode_class},
${element_a}, ${layout_a}, ${align_a},
${element_b}, ${layout_b}, ${align_b},
${element_accumulator},
cute::Shape<cute::_${tile_shape_m}, cute::_${tile_shape_n}, cute::_${tile_shape_k}>,
cute::Shape<cute::_${cluster_m},cute::_${cluster_n},cute::_${cluster_k}>,
${stages},
${kernel_schedule}
>::CollectiveOp;
// Gemm operator ${operation_name}
using ${operation_name}_base = cutlass::gemm::kernel::GemmUniversal<
cute::Shape<int,int,int,int>,
${operation_name}_mainloop,
${operation_name}_epilogue,
${tile_scheduler}>;
// Define named type
struct ${operation_name} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
using GemmKernel = cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>;
manifest.append(
new ${gemm_kind}<GemmKernel>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
tile_shape = operation.tile_description.tile_shape
warp_count = operation.tile_description.warp_count
# stage count set to zero indicates builder automatic stage selection
if operation.tile_description.stages > 0:
stage_count_string = f"cutlass::gemm::collective::StageCount<{str(operation.tile_description.stages)}>"
else:
stage_count_string = f"cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename {str(operation.procedural_name())}_epilogue::SharedStorage)>"
warp_shape = [tile_shape[idx] // warp_count[idx] for idx in range(3)]
instance_layout_A, instance_layout_B, instance_layout_C , instance_layout_D = \
(operation.A.layout, operation.B.layout, operation.C.layout, operation.D.layout)
# 3.0 profiler integration only supports trivial epilogues for now
epilogue_vector_length = 1
# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):
values = {
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
}
epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values)
else:
epilogue_functor = self.epilogue_functor.emit_declaration()
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_d': DataTypeTag[operation.D.element],
'layout_d': LayoutTag[instance_layout_D],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'tile_shape_m': str(operation.tile_description.tile_shape[0]),
'tile_shape_n': str(operation.tile_description.tile_shape[1]),
'tile_shape_k': str(operation.tile_description.tile_shape[2]),
'cluster_m': str(operation.tile_description.cluster_shape[0]),
'cluster_n': str(operation.tile_description.cluster_shape[1]),
'cluster_k': str(operation.tile_description.cluster_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'kernel_schedule' : str(KernelScheduleTag[operation.kernel_schedule]),
'epilogue_schedule' : str(EpilogueScheduleTag[operation.epilogue_schedule]),
'epilogue_functor': epilogue_functor,
'stages': stage_count_string,
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'align_c': str(operation.C.alignment),
'align_d': str(operation.C.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'tile_scheduler': str(TileSchedulerTag[operation.tile_scheduler])
}
return SubstituteTemplate(self.gemm_template, values)
###################################################################################################
#
class EmitGemmPlanarComplexInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${alignment_a},
${element_b}, ${layout_b}, ${transform_b}, ${alignment_b},
${element_c}, cutlass::layout::RowMajor,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
cutlass::epilogue::thread::LinearCombinationPlanarComplex<
${element_c},
${alignment_c},
${element_accumulator},
${element_epilogue}
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
${stages},
${math_operator}
>::GemmKernel;
struct ${operation_name} :
public Operation_${operation_name} { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
# exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major
transposed_layout_A = TransposedLayout[operation.A.layout]
transposed_layout_B = TransposedLayout[operation.B.layout]
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.B.element],
'layout_a': LayoutTag[transposed_layout_B],
'transform_a': ComplexTransformTag[operation.B.complex_transform],
'alignment_a': str(operation.B.alignment),
'element_b': DataTypeTag[operation.A.element],
'layout_b': LayoutTag[transposed_layout_A],
'transform_b': ComplexTransformTag[operation.A.complex_transform],
'alignment_b': str(operation.A.alignment),
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'alignment_c': str(operation.C.alignment),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'stages': str(operation.tile_description.stages),
'math_operator': 'cutlass::arch::OpMultiplyAdd'
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
class EmitGemmPlanarComplexArrayInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = []
self.template = """
// Gemm operator ${operation_name}
using Operation_${operation_name} = typename cutlass::gemm::kernel::DefaultGemmPlanarComplexUniversal<
${element_a}, ${layout_a}, ${transform_a}, ${alignment_a},
${element_b}, ${layout_b}, ${transform_b}, ${alignment_b},
${element_c}, cutlass::layout::RowMajor,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
cutlass::epilogue::thread::LinearCombinationPlanarComplex<
${element_c},
${alignment_c},
${element_accumulator},
${element_epilogue}
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
${stages},
${math_operator}
>::GemmArrayKernel;
struct ${operation_name} : public Operation_${operation_name} { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmUniversalAdapter<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
warp_shape = [operation.tile_description.threadblock_shape[idx] // operation.tile_description.warp_count[idx] for idx in range(3)]
# exchange and transpose A and B types, layouts, and complex transforms since the C layout is row-major
transposed_layout_A = TransposedLayout[operation.A.layout]
transposed_layout_B = TransposedLayout[operation.B.layout]
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.B.element],
'layout_a': LayoutTag[transposed_layout_B],
'transform_a': ComplexTransformTag[operation.B.complex_transform],
'alignment_a': str(operation.B.alignment),
'element_b': DataTypeTag[operation.A.element],
'layout_b': LayoutTag[transposed_layout_A],
'transform_b': ComplexTransformTag[operation.A.complex_transform],
'alignment_b': str(operation.A.alignment),
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'alignment_c': str(operation.C.alignment),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'stages': str(operation.tile_description.stages),
'math_operator': 'cutlass::arch::OpMultiplyAdd'
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
class EmitGemmGroupedInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self, operation_suffix = ''):
self.operation_suffix = operation_suffix
self.includes = [
"cutlass/cutlass.h",
"cutlass/numeric_types.h",
"cutlass/arch/arch.h",
"cutlass/arch/mma.h",
"cutlass/layout/matrix.h",
"cutlass/gemm/device/gemm.h",
"cutlass/gemm/kernel/gemm_grouped.h",
"cutlass/gemm/kernel/default_gemm_grouped.h",
"cutlass/gemm/device/gemm_grouped.h"
]
self.builtin_epilogue_functor_template = """
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>
"""
self.gemm_template = """
// Gemm operator ${operation_name}
using ${operation_name}_base =
typename cutlass::gemm::kernel::DefaultGemmGrouped<
${element_a}, ${layout_a}, ${transform_a}, ${align_a},
${element_b}, ${layout_b}, ${transform_b}, ${align_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor},
${swizzling_functor},
${stages},
${scheduler_mode},
${math_operation}
>::GemmKernel;
// Define named type
struct ${operation_name}${operation_suffix} :
public ${operation_name}_base { };
"""
#
def instance_template(self):
return """
${compile_guard_start}
manifest.append(new ${gemm_kind}<
cutlass::gemm::device::GemmGrouped<${operation_name}>
>("${operation_name}"));
${compile_guard_end}
"""
#
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
transpose_layouts = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor
}
instance_layout_A, instance_layout_B, instance_layout_C = \
(operation.A.layout, operation.B.layout, operation.C.layout)
#
# Support built-in epilogue functors or user-defined functions
if isinstance(operation.epilogue_functor, enum.Enum):
epilogue_vector_length = \
min(operation.C.alignment * DataTypeSize[operation.C.element], 128) // DataTypeSize[operation.C.element]
values = {
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
}
epilogue_functor = SubstituteTemplate(self.builtin_epilogue_functor_template, values)
else:
epilogue_functor = self.epilogue_functor.emit_declaration()
#
values = {
'operation_name': operation.procedural_name(),
'operation_suffix': self.operation_suffix,
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[instance_layout_A],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[instance_layout_B],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[instance_layout_C],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_functor': epilogue_functor,
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'scheduler_mode': GroupScheduleModeTag[operation.scheduler_mode],
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation]
}
return SubstituteTemplate(self.gemm_template, values)
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitGemmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
GemmKind.Gemm: EmitGemmInstance,
GemmKind.Sparse: EmitSparseGemmInstance,
GemmKind.Universal: EmitGemmUniversalInstance,
GemmKind.Universal3x: EmitGemmUniversal3xInstance,
GemmKind.PlanarComplex: EmitGemmPlanarComplexInstance,
GemmKind.PlanarComplexArray: EmitGemmPlanarComplexArrayInstance,
GemmKind.Grouped: EmitGemmGroupedInstance
}
self.gemm_kind_wrappers = {
GemmKind.Gemm: 'GemmOperation',
GemmKind.Sparse: 'GemmSparseOperation',
GemmKind.Universal: 'GemmUniversalOperation',
GemmKind.Universal3x: 'GemmUniversal3xOperation',
GemmKind.PlanarComplex: 'GemmPlanarComplexOperation',
GemmKind.PlanarComplexArray: 'GemmPlanarComplexArrayOperation',
GemmKind.Grouped: 'GemmGroupedOperation'
}
self.wmma_guard_start = "#if defined(CUTLASS_ARCH_WMMA_SM${sm_number}_ENABLED)"
self.separator = """
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by gemm_operation.py - Do not edit.
*/
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.configuration_file.write(self.separator)
self.includes = collections.OrderedDict([
("cutlass/cutlass.h", None),
("cutlass/library/library.h", None),
("cutlass/library/manifest.h", None),
("library_internal.h", None),
("gemm_operation.h", None),
("gemm_operation_3x.hpp", None),
("cutlass/arch/wmma.h", None),
("cutlass/numeric_types.h", None)
])
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.gemm_kind]()
for incl in emitter.includes:
self.includes[incl] = None
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(emitter.instance_template(), {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'gemm_kind': self.gemm_kind_wrappers[operation.gemm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write includes
for incl, _ in self.includes.items():
include_statement = "#include \"%s\"\n" % incl
self.configuration_file.write(include_statement)
self.configuration_file.write(self.separator)
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| cutlass-main | tools/library/scripts/gemm_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import re
###################################################################################################
import enum
# The following block implements enum.auto() for Python 3.5 variants that don't include it such
# as the default 3.5.2 on Ubuntu 16.04.
#
# https://codereview.stackexchange.com/questions/177309/reimplementing-pythons-enum-auto-for-compatibility
try:
from enum import auto as enum_auto
except ImportError:
__cutlass_library_auto_enum = 0
def enum_auto() -> int:
global __cutlass_library_auto_enum
i = __cutlass_library_auto_enum
__cutlass_library_auto_enum += 1
return i
###################################################################################################
#
class GeneratorTarget(enum.Enum):
Library = enum_auto()
#
GeneratorTargetNames = {
GeneratorTarget.Library: 'library'
}
#
###################################################################################################
#
class DataType(enum.Enum):
void = enum_auto() # primarily used to disable C tensor for epilogues
b1 = enum_auto()
u4 = enum_auto()
u8 = enum_auto()
u16 = enum_auto()
u32 = enum_auto()
u64 = enum_auto()
s4 = enum_auto()
s8 = enum_auto()
s16 = enum_auto()
s32 = enum_auto()
s64 = enum_auto()
e4m3 = enum_auto()
e5m2 = enum_auto()
f16 = enum_auto()
bf16 = enum_auto()
f32 = enum_auto()
tf32 = enum_auto()
f64 = enum_auto()
cf16 = enum_auto()
cbf16 = enum_auto()
cf32 = enum_auto()
ctf32 = enum_auto()
cf64 = enum_auto()
cs4 = enum_auto()
cs8 = enum_auto()
cs16 = enum_auto()
cs32 = enum_auto()
cs64 = enum_auto()
cu4 = enum_auto()
cu8 = enum_auto()
cu16 = enum_auto()
cu32 = enum_auto()
cu64 = enum_auto()
invalid = enum_auto()
#
ShortDataTypeNames = {
DataType.s32: 'i',
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: 'h',
DataType.f32: 's',
DataType.f64: 'd',
DataType.cf32: 'c',
DataType.cf64: 'z',
}
#
DataTypeNames = {
DataType.void: "void",
DataType.b1: "b1",
DataType.u4: "u4",
DataType.u8: "u8",
DataType.u16: "u16",
DataType.u32: "u32",
DataType.u64: "u64",
DataType.s4: "s4",
DataType.s8: "s8",
DataType.s16: "s16",
DataType.s32: "s32",
DataType.s64: "s64",
DataType.e4m3: 'e4m3',
DataType.e5m2: 'e5m2',
DataType.f16: "f16",
DataType.bf16: "bf16",
DataType.f32: "f32",
DataType.tf32: "tf32",
DataType.f64: "f64",
DataType.cf16: "cf16",
DataType.cbf16: "cbf16",
DataType.cf32: "cf32",
DataType.ctf32: "ctf32",
DataType.cf64: "cf64",
DataType.cu4: "cu4",
DataType.cu8: "cu8",
DataType.cu16: "cu16",
DataType.cu32: "cu32",
DataType.cu64: "cu64",
DataType.cs4: "cs4",
DataType.cs8: "cs8",
DataType.cs16: "cs16",
DataType.cs32: "cs32",
DataType.cs64: "cs64",
}
DataTypeTag = {
DataType.void: "void",
DataType.b1: "cutlass::uint1b_t",
DataType.u4: "cutlass::uint4b_t",
DataType.u8: "uint8_t",
DataType.u16: "uint16_t",
DataType.u32: "uint32_t",
DataType.u64: "uint64_t",
DataType.s4: "cutlass::int4b_t",
DataType.s8: "int8_t",
DataType.s16: "int16_t",
DataType.s32: "int32_t",
DataType.s64: "int64_t",
DataType.e4m3: 'cutlass::float_e4m3_t',
DataType.e5m2: 'cutlass::float_e5m2_t',
DataType.f16: "cutlass::half_t",
DataType.bf16: "cutlass::bfloat16_t",
DataType.f32: "float",
DataType.tf32: "cutlass::tfloat32_t",
DataType.f64: "double",
DataType.cf16: "cutlass::complex<cutlass::half_t>",
DataType.cbf16: "cutlass::complex<cutlass::bfloat16_t>",
DataType.cf32: "cutlass::complex<float>",
DataType.ctf32: "cutlass::complex<cutlass::tfloat32_t>",
DataType.cf64: "cutlass::complex<double>",
DataType.cu4: "cutlass::complex<cutlass::uint4b_t>",
DataType.cu8: "cutlass::complex<cutlass::uint8_t>",
DataType.cu16: "cutlass::complex<cutlass::uint16_t>",
DataType.cu32: "cutlass::complex<cutlass::uint32_t>",
DataType.cu64: "cutlass::complex<cutlass::uint64_t>",
DataType.cs4: "cutlass::complex<cutlass::int4b_t>",
DataType.cs8: "cutlass::complex<cutlass::int8_t>",
DataType.cs16: "cutlass::complex<cutlass::int16_t>",
DataType.cs32: "cutlass::complex<cutlass::int32_t>",
DataType.cs64: "cutlass::complex<cutlass::int64_t>",
}
DataTypeSize = {
DataType.void: 0,
DataType.b1: 1,
DataType.u4: 4,
DataType.u8: 8,
DataType.u16: 16,
DataType.u32: 32,
DataType.u64: 64,
DataType.s4: 4,
DataType.s8: 8,
DataType.s16: 16,
DataType.s32: 32,
DataType.s64: 64,
DataType.e4m3: 8,
DataType.e5m2: 8,
DataType.f16: 16,
DataType.bf16: 16,
DataType.f32: 32,
DataType.tf32: 32,
DataType.f64: 64,
DataType.cf16: 32,
DataType.cbf16: 32,
DataType.cf32: 64,
DataType.ctf32: 32,
DataType.cf64: 128,
DataType.cu4: 8,
DataType.cu8: 16,
DataType.cu16: 32,
DataType.cu32: 64,
DataType.cu64: 128,
DataType.cs4: 8,
DataType.cs8: 16,
DataType.cs16: 32,
DataType.cs32: 64,
DataType.cs64: 128,
}
###################################################################################################
#
class BlasMode(enum.Enum):
symmetric = enum_auto()
hermitian = enum_auto()
#
BlasModeTag = {
BlasMode.symmetric: 'cutlass::BlasMode::kSymmetric',
BlasMode.hermitian: 'cutlass::BlasMode::kHermitian',
}
#
class ComplexTransform(enum.Enum):
none = enum_auto()
conj = enum_auto()
#
ComplexTransformTag = {
ComplexTransform.none: 'cutlass::ComplexTransform::kNone',
ComplexTransform.conj: 'cutlass::ComplexTransform::kConjugate',
}
#
RealComplexBijection = [
(DataType.f16, DataType.cf16),
(DataType.f32, DataType.cf32),
(DataType.f64, DataType.cf64),
]
#
def is_complex(data_type):
for r, c in RealComplexBijection:
if data_type == c:
return True
return False
#
def get_complex_from_real(real_type):
for r, c in RealComplexBijection:
if real_type == r:
return c
return DataType.invalid
#
def get_real_from_complex(complex_type):
for r, c in RealComplexBijection:
if complex_type == c:
return r
return DataType.invalid
#
class ComplexMultiplyOp(enum.Enum):
multiply_add = enum_auto()
gaussian = enum_auto()
###################################################################################################
#
class MathOperation(enum.Enum):
multiply_add = enum_auto()
multiply_add_saturate = enum_auto()
xor_popc = enum_auto()
and_popc = enum_auto()
multiply_add_fast_bf16 = enum_auto()
multiply_add_fast_f16 = enum_auto()
multiply_add_fast_f32 = enum_auto()
multiply_add_complex_fast_f32 = enum_auto()
multiply_add_complex = enum_auto()
multiply_add_complex_gaussian = enum_auto()
#
MathOperationTag = {
MathOperation.multiply_add: 'cutlass::arch::OpMultiplyAdd',
MathOperation.multiply_add_saturate: 'cutlass::arch::OpMultiplyAddSaturate',
MathOperation.xor_popc: 'cutlass::arch::OpXorPopc',
MathOperation.and_popc: 'cutlass::arch::OpAndPopc',
MathOperation.multiply_add_fast_bf16: 'cutlass::arch::OpMultiplyAddFastBF16',
MathOperation.multiply_add_fast_f16: 'cutlass::arch::OpMultiplyAddFastF16',
MathOperation.multiply_add_fast_f32: 'cutlass::arch::OpMultiplyAddFastF32',
MathOperation.multiply_add_complex_fast_f32: 'cutlass::arch::OpMultiplyAddComplexFastF32',
MathOperation.multiply_add_complex: 'cutlass::arch::OpMultiplyAddComplex',
MathOperation.multiply_add_complex_gaussian: 'cutlass::arch::OpMultiplyAddGaussianComplex',
}
###################################################################################################
#
class LayoutType(enum.Enum):
ColumnMajor = enum_auto()
RowMajor = enum_auto()
ColumnMajorInterleaved2 = enum_auto()
RowMajorInterleaved2 = enum_auto()
ColumnMajorInterleaved32 = enum_auto()
RowMajorInterleaved32 = enum_auto()
ColumnMajorInterleaved64 = enum_auto()
RowMajorInterleaved64 = enum_auto()
TensorNHWC = enum_auto()
TensorNDHWC = enum_auto()
TensorNCHW = enum_auto()
TensorNGHWC = enum_auto()
TensorNC32HW32 = enum_auto()
TensorNC64HW64 = enum_auto()
TensorC32RSK32 = enum_auto()
TensorC64RSK64 = enum_auto()
#
LayoutTag = {
LayoutType.ColumnMajor: 'cutlass::layout::ColumnMajor',
LayoutType.RowMajor: 'cutlass::layout::RowMajor',
LayoutType.ColumnMajorInterleaved2: 'cutlass::layout::ColumnMajorInterleaved<2>',
LayoutType.RowMajorInterleaved2: 'cutlass::layout::RowMajorInterleaved<2>',
LayoutType.ColumnMajorInterleaved32: 'cutlass::layout::ColumnMajorInterleaved<32>',
LayoutType.RowMajorInterleaved32: 'cutlass::layout::RowMajorInterleaved<32>',
LayoutType.ColumnMajorInterleaved64: 'cutlass::layout::ColumnMajorInterleaved<64>',
LayoutType.RowMajorInterleaved64: 'cutlass::layout::RowMajorInterleaved<64>',
LayoutType.TensorNHWC: 'cutlass::layout::TensorNHWC',
LayoutType.TensorNDHWC: 'cutlass::layout::TensorNDHWC',
LayoutType.TensorNCHW: 'cutlass::layout::TensorNCHW',
LayoutType.TensorNGHWC: 'cutlass::layout::TensorNGHWC',
LayoutType.TensorNC32HW32: 'cutlass::layout::TensorNCxHWx<32>',
LayoutType.TensorC32RSK32: 'cutlass::layout::TensorCxRSKx<32>',
LayoutType.TensorNC64HW64: 'cutlass::layout::TensorNCxHWx<64>',
LayoutType.TensorC64RSK64: 'cutlass::layout::TensorCxRSKx<64>',
}
#
TransposedLayout = {
LayoutType.ColumnMajor: LayoutType.RowMajor,
LayoutType.RowMajor: LayoutType.ColumnMajor,
LayoutType.ColumnMajorInterleaved2: LayoutType.RowMajorInterleaved2,
LayoutType.RowMajorInterleaved2: LayoutType.ColumnMajorInterleaved2,
LayoutType.ColumnMajorInterleaved32: LayoutType.RowMajorInterleaved32,
LayoutType.RowMajorInterleaved32: LayoutType.ColumnMajorInterleaved32,
LayoutType.ColumnMajorInterleaved64: LayoutType.RowMajorInterleaved64,
LayoutType.RowMajorInterleaved64: LayoutType.ColumnMajorInterleaved64,
LayoutType.TensorNHWC: LayoutType.TensorNHWC
}
#
ShortLayoutTypeNames = {
LayoutType.ColumnMajor: 'n',
LayoutType.ColumnMajorInterleaved2: 'n2',
LayoutType.ColumnMajorInterleaved32: 'n32',
LayoutType.ColumnMajorInterleaved64: 'n64',
LayoutType.RowMajor: 't',
LayoutType.RowMajorInterleaved2: 't2',
LayoutType.RowMajorInterleaved32: 't32',
LayoutType.RowMajorInterleaved64: 't64',
LayoutType.TensorNHWC: 'nhwc',
LayoutType.TensorNDHWC: 'ndhwc',
LayoutType.TensorNCHW: 'nchw',
LayoutType.TensorNGHWC: 'nghwc',
LayoutType.TensorNC32HW32: 'nc32hw32',
LayoutType.TensorNC64HW64: 'nc64hw64',
LayoutType.TensorC32RSK32: 'c32rsk32',
LayoutType.TensorC64RSK64: 'c64rsk64'
}
#
ShortComplexLayoutNames = {
(LayoutType.ColumnMajor, ComplexTransform.none): 'n',
(LayoutType.ColumnMajor, ComplexTransform.conj): 'c',
(LayoutType.RowMajor, ComplexTransform.none): 't',
(LayoutType.RowMajor, ComplexTransform.conj): 'h'
}
###################################################################################################
class KernelScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
Multistage = enum_auto()
Tma = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedPingpong = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
TmaWarpSpecializedFP8FastAccum = enum_auto()
TmaWarpSpecializedCooperativeFP8FastAccum = enum_auto()
TmaWarpSpecializedPingpongFP8FastAccum = enum_auto()
#
KernelScheduleTag = {
KernelScheduleType.ScheduleAuto: 'cutlass::gemm::collective::KernelScheduleAuto',
KernelScheduleType.Multistage: 'cutlass::gemm::KernelMultistage',
KernelScheduleType.Tma: 'cutlass::gemm::KernelTma',
KernelScheduleType.TmaWarpSpecialized: 'cutlass::gemm::KernelTmaWarpSpecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: 'cutlass::gemm::KernelTmaWarpSpecializedPingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: 'cutlass::gemm::KernelTmaWarpSpecializedCooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedCooperativeFP8FastAccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: 'cutlass::gemm::KernelTmaWarpSpecializedPingpongFP8FastAccum',
}
#
KernelScheduleSuffixes = {
KernelScheduleType.ScheduleAuto: '',
KernelScheduleType.Multistage: '_cpasync',
KernelScheduleType.Tma: '_unspecialized',
KernelScheduleType.TmaWarpSpecialized: '_warpspecialized',
KernelScheduleType.TmaWarpSpecializedPingpong: '_warpspecialized_pingpong',
KernelScheduleType.TmaWarpSpecializedCooperative: '_warpspecialized_cooperative',
KernelScheduleType.TmaWarpSpecializedFP8FastAccum: '_warpspecialized_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum: '_warpspecialized_cooperative_fp8_fastaccum',
KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum: '_warpspecialized_pingpong_fp8_fastaccum',
}
class EpilogueScheduleType(enum.Enum):
ScheduleAuto = enum_auto()
EpilogueTransposed = enum_auto()
NoSmemWarpSpecialized = enum_auto()
TmaWarpSpecialized = enum_auto()
TmaWarpSpecializedCooperative = enum_auto()
#
EpilogueScheduleTag = {
EpilogueScheduleType.ScheduleAuto: 'cutlass::epilogue::collective::EpilogueScheduleAuto',
EpilogueScheduleType.EpilogueTransposed: 'cutlass::gemm::EpilogueTransposed',
EpilogueScheduleType.NoSmemWarpSpecialized: 'cutlass::epilogue::NoSmemWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecialized: 'cutlass::epilogue::TmaWarpSpecialized',
EpilogueScheduleType.TmaWarpSpecializedCooperative: 'cutlass::epilogue::TmaWarpSpecializedCooperative',
}
#
EpilogueScheduleSuffixes = {
EpilogueScheduleType.ScheduleAuto: '',
EpilogueScheduleType.EpilogueTransposed: '',
EpilogueScheduleType.NoSmemWarpSpecialized: '_epi_nosmem',
EpilogueScheduleType.TmaWarpSpecialized: '_epi_tma',
EpilogueScheduleType.TmaWarpSpecializedCooperative: '_epi_tma',
}
class TileSchedulerType(enum.Enum):
Default = enum_auto()
Persistent = enum_auto()
StreamK = enum_auto()
#
TileSchedulerTag = {
TileSchedulerType.Default: 'void',
TileSchedulerType.Persistent: 'cutlass::gemm::PersistentScheduler',
TileSchedulerType.StreamK: 'cutlass::gemm::StreamKScheduler',
}
#
TileSchedulerSuffixes = {
TileSchedulerType.Default: '',
TileSchedulerType.Persistent: '',
TileSchedulerType.StreamK: '_stream_k',
}
###################################################################################################
#
class SideMode(enum.Enum):
Left = enum_auto()
Right = enum_auto()
#
SideModeTag = {
SideMode.Left: 'cutlass::SideMode::kLeft',
SideMode.Right: 'cutlass::SideMode::kRight'
}
#
ShortSideModeNames = {
SideMode.Left: 'ls',
SideMode.Right: 'rs'
}
###################################################################################################
#
class FillMode(enum.Enum):
Lower = enum_auto()
Upper = enum_auto()
#
FillModeTag = {
FillMode.Lower: 'cutlass::FillMode::kLower',
FillMode.Upper: 'cutlass::FillMode::kUpper'
}
#
ShortFillModeNames = {
FillMode.Lower: 'l',
FillMode.Upper: 'u'
}
###################################################################################################
#
class DiagType(enum.Enum):
NonUnit = enum_auto()
Unit = enum_auto()
#
DiagTypeTag = {
DiagType.NonUnit: 'cutlass::DiagType::kNonUnit',
DiagType.Unit: 'cutlass::DiagType::kUnit'
}
#
ShortDiagTypeNames = {
DiagType.NonUnit: 'nu',
DiagType.Unit: 'un'
}
###################################################################################################
#
class OpcodeClass(enum.Enum):
Simt = enum_auto()
TensorOp = enum_auto()
WmmaTensorOp = enum_auto()
SparseTensorOp = enum_auto()
OpcodeClassNames = {
OpcodeClass.Simt: 'simt',
OpcodeClass.TensorOp: 'tensorop',
OpcodeClass.WmmaTensorOp: 'wmma_tensorop',
}
OpcodeClassTag = {
OpcodeClass.Simt: 'cutlass::arch::OpClassSimt',
OpcodeClass.TensorOp: 'cutlass::arch::OpClassTensorOp',
OpcodeClass.WmmaTensorOp: 'cutlass::arch::OpClassWmmaTensorOp',
}
###################################################################################################
#
class OperationKind(enum.Enum):
Gemm = enum_auto()
RankK = enum_auto()
Rank2K = enum_auto()
Trmm = enum_auto()
Symm = enum_auto()
Conv2d = enum_auto()
Conv3d = enum_auto()
#
OperationKindNames = {
OperationKind.Gemm: 'gemm'
, OperationKind.RankK: 'rank_k'
, OperationKind.Rank2K: 'rank_2k'
, OperationKind.Trmm: 'trmm'
, OperationKind.Symm: 'symm'
, OperationKind.Conv2d: 'conv2d'
, OperationKind.Conv3d: 'conv3d'
}
#
class Target(enum.Enum):
library = enum_auto()
#
ArchitectureNames = {
50: 'maxwell',
60: 'pascal',
61: 'pascal',
70: 'volta',
75: 'turing',
80: 'ampere',
89: 'ada',
90: 'hopper'
}
#
SharedMemPerCC = {
70: 96, # 96KB of SMEM
72: 96, # 96KB of SMEM
75: 64, # 64KB of SMEM
80: 163, # 163KB of SMEM - 1KB reserved for the driver
86: 99, # 99KB of SMEM - 1KB reserved for the driver
87: 163, # 163KB of SMEM - 1KB reserved for the driver
89: 99, # 99KB of SMEM - 1KB reserved for the driver
90: 227, # 227KB of SMEM - 1KB reserved for the driver
}
###################################################################################################
#
def SubstituteTemplate(template, values):
text = template
changed = True
while changed:
changed = False
for key, value in values.items():
regex = "\\$\\{%s\\}" % key
newtext = re.sub(regex, value, text)
if newtext != text:
changed = True
text = newtext
return text
###################################################################################################
#
class GemmKind(enum.Enum):
Gemm = enum_auto()
Sparse = enum_auto()
Universal = enum_auto()
Universal3x = enum_auto()
PlanarComplex = enum_auto()
PlanarComplexArray = enum_auto()
Grouped = enum_auto()
#
GemmKindNames = {
GemmKind.Gemm: "gemm",
GemmKind.Sparse: "spgemm",
GemmKind.Universal: "gemm",
GemmKind.Universal3x: "gemm",
GemmKind.PlanarComplex: "gemm_planar_complex",
GemmKind.PlanarComplexArray: "gemm_planar_complex_array",
GemmKind.Grouped: "gemm_grouped"
}
#
class RankKKind(enum.Enum):
Universal = enum_auto()
#
RankKKindNames = {
RankKKind.Universal: "rank_k"
}
#
class TrmmKind(enum.Enum):
Universal = enum_auto()
#
TrmmKindNames = {
TrmmKind.Universal: "trmm"
}
#
class SymmKind(enum.Enum):
Universal = enum_auto()
#
SymmKindNames = {
SymmKind.Universal: "symm"
}
#
class EpilogueFunctor(enum.Enum):
LinearCombination = enum_auto()
LinearCombinationClamp = enum_auto()
#
EpilogueFunctorTag = {
EpilogueFunctor.LinearCombination: 'cutlass::epilogue::thread::LinearCombination',
EpilogueFunctor.LinearCombinationClamp: 'cutlass::epilogue::thread::LinearCombinationClamp',
}
#
class SwizzlingFunctor(enum.Enum):
Identity1 = enum_auto()
Identity2 = enum_auto()
Identity4 = enum_auto()
Identity8 = enum_auto()
Horizontal = enum_auto()
StridedDgradIdentity1 = enum_auto()
StridedDgradIdentity4 = enum_auto()
StridedDgradHorizontal = enum_auto()
StreamK = enum_auto()
#
SwizzlingFunctorTag = {
SwizzlingFunctor.Identity1: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.Identity2: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<2>',
SwizzlingFunctor.Identity4: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.Identity8: 'cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>',
SwizzlingFunctor.Horizontal: 'cutlass::gemm::threadblock::GemmHorizontalThreadblockSwizzle',
SwizzlingFunctor.StridedDgradIdentity1: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<1>',
SwizzlingFunctor.StridedDgradIdentity4: 'cutlass::conv::threadblock::StridedDgradIdentityThreadblockSwizzle<4>',
SwizzlingFunctor.StridedDgradHorizontal: 'cutlass::conv::threadblock::StridedDgradHorizontalThreadblockSwizzle',
SwizzlingFunctor.StreamK: 'cutlass::gemm::threadblock::ThreadblockSwizzleStreamK',
}
#
class GroupScheduleMode(enum.Enum):
Device = enum_auto(),
Host = enum_auto()
#
GroupScheduleModeTag = {
GroupScheduleMode.Device: 'cutlass::gemm::kernel::GroupScheduleMode::kDeviceOnly',
GroupScheduleMode.Host: 'cutlass::gemm::kernel::GroupScheduleMode::kHostPrecompute'
}
#
ShortGroupScheduleModeNames = {
GroupScheduleMode.Device: 'Device',
GroupScheduleMode.Host: 'Host'
}
###################################################################################################
#
class ConvKind(enum.Enum):
Fprop = enum_auto()
Dgrad = enum_auto()
Wgrad = enum_auto()
#
ConvKindTag = {
ConvKind.Fprop: 'cutlass::conv::Operator::kFprop',
ConvKind.Dgrad: 'cutlass::conv::Operator::kDgrad',
ConvKind.Wgrad: 'cutlass::conv::Operator::kWgrad'
}
ConvKindNames = {
ConvKind.Fprop: 'fprop',
ConvKind.Dgrad: 'dgrad',
ConvKind.Wgrad: 'wgrad',
}
#
class IteratorAlgorithm(enum.Enum):
Analytic = enum_auto()
Optimized = enum_auto()
FixedChannels = enum_auto()
FewChannels = enum_auto()
FixedStrideDilation = enum_auto()
#
IteratorAlgorithmTag = {
IteratorAlgorithm.Analytic: 'cutlass::conv::IteratorAlgorithm::kAnalytic',
IteratorAlgorithm.Optimized: 'cutlass::conv::IteratorAlgorithm::kOptimized',
IteratorAlgorithm.FixedChannels: 'cutlass::conv::IteratorAlgorithm::kFixedChannels',
IteratorAlgorithm.FewChannels: 'cutlass::conv::IteratorAlgorithm::kFewChannels',
IteratorAlgorithm.FixedStrideDilation: 'cutlass::conv::IteratorAlgorithm::kFixedStrideDilation'
}
IteratorAlgorithmNames = {
IteratorAlgorithm.Analytic: 'analytic',
IteratorAlgorithm.Optimized: 'optimized',
IteratorAlgorithm.FixedChannels: 'fixed_channels',
IteratorAlgorithm.FewChannels: 'few_channels',
IteratorAlgorithm.FixedStrideDilation: 'fixed_stride_dilation'
}
#
class StrideSupport(enum.Enum):
Strided = enum_auto()
Unity = enum_auto()
Fixed = enum_auto()
#
StrideSupportTag = {
StrideSupport.Strided: 'cutlass::conv::StrideSupport::kStrided',
StrideSupport.Unity: 'cutlass::conv::StrideSupport::kUnity',
StrideSupport.Fixed: 'cutlass::conv::StrideSupport::kFixed'
}
StrideSupportNames = {
StrideSupport.Strided: '',
StrideSupport.Unity: 'unity_stride',
StrideSupport.Fixed: 'fixed_stride'
}
#
class GroupMode(enum.Enum):
NoneGroup = enum_auto() # dense conv (G=1)
SingleGroup = enum_auto() # grouped convolution (single group per CTA)
MultipleGroup = enum_auto() # grouped convolution ( multiple groups per CTA)
Depthwise = enum_auto() # Depthwise convolution ( C=K=G )
#
GroupModeTag = {
GroupMode.NoneGroup: 'cutlass::conv::GroupMode::kNone',
GroupMode.SingleGroup: 'cutlass::conv::GroupMode::kSingleGroup',
GroupMode.MultipleGroup: 'cutlass::conv::GroupMode::kMultipleGroup',
GroupMode.Depthwise: 'cutlass::conv::GroupMode::kDepthwise',
}
GroupModeNames = {
GroupMode.NoneGroup: '',
GroupMode.SingleGroup: 'single_group',
GroupMode.MultipleGroup: 'multiple_group',
GroupMode.Depthwise: 'depthwise',
}
###################################################################################################
#
class MathInstruction:
def __init__(self, instruction_shape, element_a, element_b, element_accumulator, opcode_class, math_operation = MathOperation.multiply_add):
self.instruction_shape = instruction_shape
self.element_a = element_a
self.element_b = element_b
self.element_accumulator = element_accumulator
self.opcode_class = opcode_class
self.math_operation = math_operation
#
class TileDescription:
def __init__(self, threadblock_shape, stages, warp_count, math_instruction, min_compute, max_compute, cluster_shape = [1,1,1]):
self.threadblock_shape = threadblock_shape
self.tile_shape = threadblock_shape
self.stages = stages
self.warp_count = warp_count
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
self.cluster_shape = cluster_shape
def procedural_name(self):
if self.minimum_compute_capability >= 90:
return "{tbm}x{tbn}x{tbk}_{cm}x{cn}x{ck}_{s}".format(
tbm = self.threadblock_shape[0],
tbn = self.threadblock_shape[1],
tbk = self.threadblock_shape[2],
cm = self.cluster_shape[0],
cn = self.cluster_shape[1],
ck = self.cluster_shape[2],
s = self.stages)
else:
return "%dx%d_%dx%d" % (self.threadblock_shape[0], self.threadblock_shape[1], self.threadblock_shape[2], self.stages)
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class Direct2dConvFixedStrideDilationTileDescription:
def __init__(self, threadblock_output_shape, filter_shape, stages, stride, dilation, warp_count, math_instruction, min_compute, max_compute):
self.threadblock_shape = [threadblock_output_shape[0]*threadblock_output_shape[1]*threadblock_output_shape[2], threadblock_output_shape[3], filter_shape[0]*filter_shape[1]]
self.threadblock_output_shape = threadblock_output_shape
self.filter_shape = filter_shape
self.stages = stages
self.warp_count = warp_count
self.stride = stride
self.dilation = dilation
self.math_instruction = math_instruction
self.minimum_compute_capability = min_compute
self.maximum_compute_capability = max_compute
def procedural_name(self):
str_name = "%dx%dx%d_%dx%dx%dx%d_%d_filter%dx%d" % (self.threadblock_shape[0],
self.threadblock_shape[1],
self.threadblock_shape[2],
self.threadblock_output_shape[0],
self.threadblock_output_shape[1],
self.threadblock_output_shape[2],
self.threadblock_output_shape[3],
self.stages,
self.filter_shape[0],
self.filter_shape[1])
# Fixed Strided and dilation
if self.stride != [-1, -1] and self.dilation != [-1, -1]:
str_name += "_stride%dx%d_dilation%dx%d" % (self.stride[0],
self.stride[1],
self.dilation[0],
self.dilation[1])
return str_name
#
class TensorDescription:
def __init__(self, element, layout, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.alignment = alignment
self.complex_transform = complex_transform
#
class SymmetricTensorDescription:
def __init__(self, element, layout, fill_mode, alignment = 1, complex_transform = ComplexTransform.none, side_mode = SideMode.Left):
self.element = element
self.layout = layout
self.fill_mode = fill_mode
self.alignment = alignment
self.complex_transform = complex_transform
self.side_mode = side_mode
#
class TriangularTensorDescription:
def __init__(self, element, layout, side_mode, fill_mode, diag_type, alignment = 1, complex_transform = ComplexTransform.none):
self.element = element
self.layout = layout
self.side_mode = side_mode
self.fill_mode = fill_mode
self.diag_type = diag_type
self.alignment = alignment
self.complex_transform = complex_transform
###################################################################################################
#
def CalculateSmemUsage(operation):
cta_shape = operation.tile_description.threadblock_shape
stages = operation.tile_description.stages
if operation.operation_kind == OperationKind.Gemm and operation.gemm_kind == GemmKind.Sparse:
# Elements represented by 8 bits of metadata (based on 4:8, 2:4 or 1:2 sparsity)
if DataTypeSize[operation.A.element] == 32:
elements_per_8b_md = 2
elif DataTypeSize[operation.A.element] == 4:
elements_per_8b_md = 8
else:
elements_per_8b_md = 4
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * (cta_shape[2] // 2) // 8 + \
DataTypeSize[operation.B.element] * cta_shape[1] * cta_shape[2] // 8 + \
cta_shape[0] * (cta_shape[2] // 2) // elements_per_8b_md
else:
# Few BLAS3 operations only have A tensor
smem_per_stage = DataTypeSize[operation.A.element] * cta_shape[0] * cta_shape[2] // 8 + \
DataTypeSize[operation.A.element] * cta_shape[1] * cta_shape[2] // 8
smem_usage = smem_per_stage * stages
return (smem_usage >> 10)
###################################################################################################
| cutlass-main | tools/library/scripts/library.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
from library import *
from gemm_operation import *
from rank_k_operation import *
from rank_2k_operation import *
from trmm_operation import *
from symm_operation import *
from conv2d_operation import *
from conv3d_operation import *
import logging
###################################################################################################
_LOGGER = logging.getLogger(__name__)
class EmitOperationKindLibrary:
def __init__(self, generated_path, kind, args):
self.generated_path = generated_path
self.kind = kind
self.args = args
self.emitters = {
OperationKind.Gemm: EmitGemmConfigurationLibrary
, OperationKind.Conv2d: EmitConv2dConfigurationLibrary
, OperationKind.Conv3d: EmitConv3dConfigurationLibrary
, OperationKind.RankK: EmitRankKConfigurationLibrary
, OperationKind.Rank2K: EmitRank2KConfigurationLibrary
, OperationKind.Trmm: EmitTrmmConfigurationLibrary
, OperationKind.Symm: EmitSymmConfigurationLibrary
}
self.configurations = [];
self.header_template ="""
/*
Generated by manifest.py - Do not edit.
*/
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.entry_template = """
//
// Entry point to construct operations
//
void initialize_all_${operation_name}_operations(Manifest &manifest) {
"""
self.configuration_prototype_template = "void initialize_${configuration_name}(Manifest &manifest);\n"
self.configuration_template =" initialize_${configuration_name}(manifest);\n"
self.epilogue_template ="""
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
"""
#
def __enter__(self):
self.operation_path = os.path.join(self.generated_path, OperationKindNames[self.kind])
os.mkdir(self.operation_path)
self.top_level_path = os.path.join(self.operation_path, "all_%s_operations.cu" % OperationKindNames[self.kind])
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.header_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, configuration_name, operations):
with self.emitters[self.kind](self.operation_path, configuration_name) as configuration_emitter:
for operation in operations:
configuration_emitter.emit(operation)
self.source_files.append(configuration_emitter.configuration_path)
self.configurations.append(configuration_name)
self.top_level_file.write(SubstituteTemplate(self.configuration_prototype_template, {'configuration_name': configuration_name} ))
#
def __exit__(self, exception_type, exception_value, traceback):
self.top_level_file.write(SubstituteTemplate(self.entry_template, {'operation_name': OperationKindNames[self.kind]}))
for configuration_name in self.configurations:
self.top_level_file.write(SubstituteTemplate(self.configuration_template, {'configuration_name': configuration_name}))
self.top_level_file.write(self.epilogue_template)
self.top_level_file.close()
class EmitInterfaceLibrary:
def __init__(self, generated_path, operation_count, args):
self.generated_path = generated_path
self.args = args
self.prototypes = []
self.fn_calls = []
self.operation_count = str(operation_count)
self.top_level_hdr_template = '''
/*
Generated by manifest.py - Do not edit.
*/
'''
self.top_level_prologue = '''
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
namespace cutlass {
\tnamespace library {
${prototypes}
\t\tvoid initialize_all(Manifest &manifest) {
\t\t\tmanifest.reserve(${operation_count});\n\n
${fn_calls}
\t\t\t}
\t} // namespace library
} // namespace cutlass
'''
#
def __enter__(self):
self.top_level_path = os.path.join(self.generated_path, 'initialize_all.cpp')
self.top_level_file = open(self.top_level_path, "w")
self.top_level_file.write(self.top_level_hdr_template)
self.source_files = [self.top_level_path,]
return self
#
def emit(self, operation_name):
self.prototypes.append(SubstituteTemplate(
"\t\tvoid initialize_all_${operation_kind}_operations(Manifest &manifest);",
{'operation_kind': operation_name}))
self.fn_calls.append(SubstituteTemplate(
"\t\t\tinitialize_all_${operation_kind}_operations(manifest);",
{'operation_kind': operation_name}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.top_level_file.write(SubstituteTemplate(self.top_level_prologue, {'prototypes':"\n".join(self.prototypes),
'fn_calls':"\n".join(self.fn_calls),
'operation_count': self.operation_count}))
self.top_level_file.close()
###################################################################################################
###################################################################################################
class Options:
def __init__(self):
pass
###################################################################################################
#
class Manifest:
#
def __init__(self, args = None):
self.operations = {}
self.args = args
self.operation_count = 0
self.operations_by_name = {}
self.kernel_filter = ''
self.kernel_filter_list = []
self.kernel_names = []
self.operations_enabled = []
self.selected_kernels = []
self.ignore_kernel_names = []
self.compute_capabilities = [50,]
self.curr_build_dir = '.'
self.filter_by_cc = True
if self.args:
self.kernel_filter = self.args.kernels
self.curr_build_dir = args.curr_build_dir
architectures = args.architectures.split(';') if len(args.architectures) else ['50',]
architectures = [x if x != '90a' else '90' for x in architectures]
self.compute_capabilities = [int(x) for x in architectures]
if args.filter_by_cc in ['false', 'False', '0']:
self.filter_by_cc = False
if args.operations == 'all':
self.operations_enabled = []
else:
operations_list = [
OperationKind.Gemm
, OperationKind.Conv2d
, OperationKind.Conv3d
, OperationKind.RankK
, OperationKind.Trmm
, OperationKind.Symm
]
self.operations_enabled = [x for x in operations_list if OperationKindNames[x] in args.operations.split(',')]
if args.kernels == 'all':
self.kernel_names = []
else:
self.kernel_names = [x for x in args.kernels.split(',') if x != '']
self.ignore_kernel_names = [x for x in args.ignore_kernels.split(',') if x != '']
if args.kernel_filter_file is None:
self.kernel_filter_list = []
else:
self.kernel_filter_list = self.get_kernel_filters(args.kernel_filter_file)
_LOGGER.info("Using {filter_count} kernel filters from {filter_file}".format(
filter_count = len(self.kernel_filter_list),
filter_file = args.kernel_filter_file))
self.operation_count = 0
self.operations_by_name = {}
self.disable_full_archs_compilation = args.disable_full_archs_compilation
def get_kernel_filters (self, kernelListFile):
if os.path.isfile(kernelListFile):
with open(kernelListFile, 'r') as fileReader:
lines = [line.rstrip() for line in fileReader if not line.startswith("#")]
lines = [re.compile(line) for line in lines if line]
return lines
else:
return []
#
def filter_out_kernels(self, kernel_name, kernel_filter_list):
for kernel_filter_re in kernel_filter_list:
if kernel_filter_re.search(kernel_name) is not None:
return True
return False
#
def _filter_string_matches(self, filter_string, haystack):
''' Returns true if all substrings appear in the haystack in order'''
substrings = filter_string.split('*')
for sub in substrings:
idx = haystack.find(sub)
if idx < 0:
return False
haystack = haystack[idx + len(sub):]
return True
#
def filter(self, operation):
''' Filtering operations based on various criteria'''
# filter based on compute capability
enabled = not (self.filter_by_cc)
for cc in self.compute_capabilities:
if cc >= operation.tile_description.minimum_compute_capability and \
cc <= operation.tile_description.maximum_compute_capability and \
(cc not in SharedMemPerCC or SharedMemPerCC[cc] >= CalculateSmemUsage(operation)):
enabled = True
break
if not enabled:
return False
if len(self.operations_enabled) and not operation.operation_kind in self.operations_enabled:
return False
# eliminate duplicates
if operation.procedural_name() in self.operations_by_name.keys():
return False
# Filter based on list of valid substrings
if len(self.kernel_names):
name = operation.procedural_name()
enabled = False
# compare against the include list
for name_substr in self.kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug("Kernel {kernel} included due to filter string '{filt}'.".format(
kernel = operation.procedural_name(),
filt = name_substr))
enabled = True
break
# compare against the exclude list
for name_substr in self.ignore_kernel_names:
if self._filter_string_matches(name_substr, name):
_LOGGER.debug("Kernel {kernel} ignored due to filter string '{filt}'.".format(
kernel = operation.procedural_name(),
filt = name_substr))
enabled = False
break
if len(self.kernel_filter_list) > 0:
if self.filter_out_kernels(operation.procedural_name(), self.kernel_filter_list):
_LOGGER.debug("Kernel {kernel} matched via kernel filter file.".format(kernel = operation.procedural_name()))
enabled = True
else:
_LOGGER.debug("Kernel {kernel} culled due to no match in kernel filter file.".format(kernel = operation.procedural_name()))
enabled = False
# TODO: filter based on compute data type
return enabled
#
#
def append(self, operation):
'''
Inserts the operation.
operation_kind -> configuration_name -> []
'''
if self.filter(operation):
self.selected_kernels.append(operation.procedural_name())
self.operations_by_name[operation.procedural_name()] = operation
# add the configuration
configuration_name = operation.configuration_name()
if operation.operation_kind not in self.operations.keys():
self.operations[operation.operation_kind] = {}
if configuration_name not in self.operations[operation.operation_kind].keys():
self.operations[operation.operation_kind][configuration_name] = []
self.operations[operation.operation_kind][configuration_name].append(operation)
self.operation_count += 1
else:
_LOGGER.debug("Culled {} from manifest".format(operation.procedural_name()))
#
#
def emit(self, target = GeneratorTarget.Library):
operation_emitters = {
GeneratorTarget.Library: EmitOperationKindLibrary
}
interface_emitters = {
GeneratorTarget.Library: EmitInterfaceLibrary
}
generated_path = os.path.join(self.curr_build_dir, 'generated')
# create generated/
if os.path.exists(generated_path):
shutil.rmtree(generated_path)
os.mkdir(generated_path)
source_files = []
with interface_emitters[target](generated_path, self.operation_count, self.args) as iface_emitter:
for operation_kind, configurations in self.operations.items():
iface_emitter.emit(OperationKindNames[operation_kind])
source_files += iface_emitter.source_files
# for each operation kind, emit initializer for all configurations
for operation_kind, configurations in self.operations.items():
with operation_emitters[target](generated_path, operation_kind, self.args) as operation_kind_emitter:
for configuration_name, operations in configurations.items():
_LOGGER.info("Emitting {config} with {num_ops} operations.".format(
config = configuration_name, num_ops = len(operations)))
operation_kind_emitter.emit(configuration_name, operations)
source_files += operation_kind_emitter.source_files
# write the manifest.cmake file containing paths from all targets
manifest_path = os.path.join(generated_path, "manifest.cmake")
with open(manifest_path, "w") as manifest_file:
target_name = 'cutlass_library_objs'
target_text = SubstituteTemplate("""cutlass_target_sources(
${target_name}
BATCH_SOURCES ON
PRIVATE
""", { 'target_name': target_name})
manifest_file.write(target_text + '\n\n')
for source_file in source_files:
manifest_file.write(" %s\n" % str(source_file.replace('\\', '/')))
manifest_file.write(")\n")
if self.disable_full_archs_compilation:
def for_hopper(name):
pass
def for_ampere(name):
return "16816" in name or \
"16832" in name or \
"16864" in name or \
("1688" in name and "tf32" in name)
def for_turing(name):
return ("1688" in name and "tf32" not in name) or \
"8816" in name
def for_volta(name):
return "884" in name
def is_cpp(name):
return name.endswith(".cpp")
def get_src_archs_str_given_requested_cuda_archs(archs, source_file):
intersected_archs = archs & set(self.compute_capabilities)
if intersected_archs == set():
raise RuntimeError(
"""
Empty archs set for file {} after taking
the intersection of {} (global requested archs) and
{} (per file requested archs)
""".format(source_file, set(self.compute_capabilities), archs))
else:
return " ".join(map(str, intersected_archs))
for source_file in source_files:
if is_cpp(source_file):
continue # skip because source is cpp
elif for_ampere(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({80, 87, 90}, source_file)
elif for_turing(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({75}, source_file)
elif for_volta(source_file):
archs_str = get_src_archs_str_given_requested_cuda_archs({70, 72}, source_file)
else:
raise RuntimeError("Per file archs are not set {}, as there is no rule specified for this file pattern".format(source_file))
manifest_file.write("cutlass_apply_cuda_gencode_flags({} SM_ARCHS {})\n".format(str(source_file.replace('\\', '/')), archs_str))
#
###################################################################################################
| cutlass-main | tools/library/scripts/manifest.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class RankKOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.RankK
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
self.A = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syrk' if self.blas_mode == BlasMode.symmetric else 'herk'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRankKUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::RankK<
${element_a}, ${layout_a},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${split_k_serial},
${math_operation},
${transform_a},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRankKConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRankKUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'RankKOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| cutlass-main | tools/library/scripts/rank_k_operation.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# System imports
import struct
import io
import ctypes
# CUDA Python import
from cuda import cuda
from cuda import nvrtc
# CUTLASS imports
from library import *
from gemm_operation import EmitGemmUniversalInstance
#################################################################################################
#
# CUTLASS Py Runtime Components
#
#################################################################################################
#
def MaxAlignment(fmt):
align = 1
for x in fmt:
align = max(align, struct.calcsize(x))
return align
#
def AlignedOffset(offset, align):
remainder = (offset % align)
if remainder:
offset += (align - remainder)
return offset
#
def PackInteger(host_workspace, offset, value):
fmt = "i"
padding = AlignedOffset(offset, 4)
struct.pack_into(fmt, host_workspace, offset, value)
return padding + struct.calcsize(fmt)
#
def PackDevicePointer(host_workspace, offset, value):
fmt = "P"
offset = AlignedOffset(offset, 8)
struct.pack_into(fmt, host_workspace, offset, value)
return offset + struct.calcsize(fmt)
#
def ceil_div(a, b):
return -(a // -b)
#################################################################################################
#
class PitchLinearCoord:
def __init__(self, contiguous, strided):
self.contiguous = contiguous
self.strided = strided
#
class GemmCoord:
def __init__(self, m = 1, n = 1, k = 1):
self.m = m
self.n = n
self.k = k
self.fmt = "iii"
#
def ceil_div(self, rhs):
return GemmCoord(ceil_div(self.m, rhs.m), ceil_div(self.n, rhs.n), ceil_div(self.k, rhs.k))
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def pack_into(self, host_workspace, offset):
offset = AlignedOffset(offset, 4)
struct.pack_into(
self.fmt,
host_workspace,
offset,
self.m, self.n, self.k)
return offset + self.size()
#
class TensorRef:
def __init__(self, pointer = None, layout = 0):
self.pointer = pointer
self.layout = layout
def __str__(self):
return "(%x, %d)" % (self.pointer._ptr, self.layout)
#################################################################################################
#
class PredicatedTileAccessIteratorDesc:
'''
'''
def __init__(
self,
element_size_bits,
advance_rank,
threadblock_shape,
threadmap_iterations,
threadmap_delta):
self.element_size_bits = element_size_bits
self.advance_rank = advance_rank
self.threadblock_shape = threadblock_shape
self.threadmap_iterations = threadmap_iterations
self.threadmap_delta = threadmap_delta
#
class PredicatedTileAccessIteratorParams:
'''
'''
#
def __init__(self, desc, label):
self.desc = desc
self.label = label
self.fmt = "qqqq"
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, stride):
offset = AlignedOffset(offset, self.alignment())
inc_strided = stride * \
self.desc.threadmap_delta.strided * \
self.desc.element_size_bits // 8
if self.desc.advance_rank:
inc_advance = self.desc.threadblock_shape.strided * \
stride * \
self.desc.element_size_bits // 8
else:
inc_advance = self.desc.threadblock_shape.contiguous * \
self.desc.element_size_bits // 8
inc_next = inc_advance - (self.desc.threadmap_iterations.strided - 1) * \
self.desc.threadmap_delta.strided * \
stride * \
self.desc.element_size_bits // 8
struct.pack_into(
self.fmt,
host_workspace,
offset,
stride, inc_strided, inc_next, inc_advance)
return offset + self.size()
#
#################################################################################################
#
class EpilogueTileDesc:
'''
'''
def __init__(self, column, row, group, cluster, tile):
self.column = column
self.row = row
self.group = group
self.cluster = cluster
self.tile = tile
#
class EpilogueThreadMap:
'''
'''
def __init__(self, threads, elements_per_access, element_size_bits, shape, iterations, delta, count):
self.threads = threads
self.elements_per_access = elements_per_access
self.element_size_bits = element_size_bits
self.shape = shape
self.iterations = iterations
self.delta = delta
self.count = count
pass
#
class EpilogueTileIteratorParams:
'''
'''
#
def __init__(self, desc, label):
self.desc = desc
self.label = label
self.fmt = "qqqqqqqq"
#
def size(self):
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, stride):
stride = stride * self.desc.element_size_bits // 8
offset = AlignedOffset(offset, self.alignment())
increment_row = stride * self.desc.delta.row
increment_group = stride * self.desc.delta.group \
- stride * self.desc.delta.row * (self.desc.iterations.row - 1)
increment_cluster = stride * self.desc.delta.cluster \
- stride * self.desc.delta.group * (self.desc.iterations.group - 1) \
- stride * self.desc.delta.row * (self.desc.iterations.row - 1)
advance_row = stride * self.desc.shape.row
advance_group = stride * \
(self.desc.shape.group - 1) * \
self.desc.shape.row * \
self.desc.count.row
advance_cluster = stride * \
self.desc.count.group * \
self.desc.shape.group * \
self.desc.count.row * \
self.desc.shape.row
advance_tile = stride * \
self.desc.shape.group * \
self.desc.shape.row * \
self.desc.shape.cluster * \
self.desc.shape.tile
struct.pack_into(
self.fmt, \
host_workspace, \
offset, \
stride, \
increment_row, increment_group, increment_cluster, \
advance_row, advance_group, advance_cluster, advance_tile)
return offset + self.size()
#
#################################################################################################
#
# Launch configuration
#
#################################################################################################
class LaunchConfiguration:
def __init__(self, grid = [1,1,1], block = [1,1,1], smem = 0):
self.grid = grid
self.block = block
self.shared_memory_capacity = smem
#################################################################################################
#
# Functors
#
#################################################################################################
#
class Functor:
def __init__(self):
self.decl = ''
self.definition = ''
self.fmt = ''
self.identifier = ''
#
def emit_declaration(self):
return self.decl
#
def emit_definition(self):
return self.definition
#
def size(self):
'''
Size of the packed Params structure
'''
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, arguments):
return offset + self.size()
#################################################################################################
#
class LinearCombinationFunctorArguments:
def __init__(self, alpha = 1.0, beta = 0.0):
self.alpha = alpha
self.beta = beta
self.alpha_ptr = 0
self.beta_ptr = 0
#
class LinearCombinationFunctor(Functor):
def __init__(self):
super().__init__()
self.decl = """
cutlass::epilogue::thread::LinearCombination<
float,
1,
float,
float
>"""
self.identifier = 'linear_combination'
self.fmt = "ffPP"
#
def size(self):
'''
Size of the packed Params structure
'''
return struct.calcsize(self.fmt)
#
def alignment(self):
return MaxAlignment(self.fmt)
#
def initialize(self, host_workspace, offset, arguments):
offset = AlignedOffset(offset, self.alignment())
struct.pack_into(
self.fmt,
host_workspace, offset,
arguments.alpha, arguments.beta, arguments.alpha_ptr, arguments.beta_ptr)
return offset + self.size()
#################################################################################################
#
# Base class for an executable operation
#
#################################################################################################
#
class ExecutableOperation:
'''
'''
def __init__(self, operation):
self.operation = operation
self.module = None
self.kernel = None
#
def name(self):
return self.operation.procedural_name()
#
def emit(self):
return ''
#
def can_implement(self, configuration, arguments):
return False
#
def get_host_workspace_size(self, arguments):
return 0
#
def get_device_workspace_size(self, arguments):
return 0
#
def plan(self, arguments):
return LaunchConfiguration()
#
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream = cuda.CUstream(0)):
raise NotImplementedError()
#
def run(self, host_workspace, device_workspace, launch_config, stream = cuda.CUstream(0)):
cArg = (ctypes.c_char * len(host_workspace)).from_buffer(host_workspace)
packed = (ctypes.c_void_p * 1)()
packed[0] = ctypes.addressof(cArg)
err, = cuda.cuLaunchKernel(
self.kernel,
launch_config.grid[0], launch_config.grid[1], launch_config.grid[2],
launch_config.block[0], launch_config.block[1], launch_config.block[2],
launch_config.shared_memory_capacity,
stream,
packed,
0)
return err
#################################################################################################
#
class GemmArguments:
'''
'''
def __init__(self):
self.problem_size = GemmCoord(0, 0, 0)
self.A = TensorRef()
self.B = TensorRef()
self.C = TensorRef()
self.D = TensorRef()
self.output_op = LinearCombinationFunctorArguments()
#
class ThreadblockSwizzle:
def __init__(self, threadblock_shape, log_threadblock_cohort = 0):
self.threadblock_shape = threadblock_shape
self.log_threadblock_cohort = log_threadblock_cohort
def grid_tiled_shape(self, problem_size):
return GemmCoord(
ceil_div(problem_size.m, self.threadblock_shape.m),
ceil_div(problem_size.n, self.threadblock_shape.n),
1)
#
class Gemm(ExecutableOperation):
'''
GEMM manages the CUTLASS runtime components
'''
#
def __init__(self, operation):
super().__init__(operation)
self.emitter = EmitGemmUniversalInstance('_type')
self.threadblock_swizzle = ThreadblockSwizzle(GemmCoord(128, 128, 8))
self.threads = 256
self.shared_memory_capacity = (32 << 10)
self.params_A = PredicatedTileAccessIteratorParams(
PredicatedTileAccessIteratorDesc(
32,
1,
PitchLinearCoord(128, 8),
PitchLinearCoord(1, 4),
PitchLinearCoord(1, 2)), 'A')
self.params_B = PredicatedTileAccessIteratorParams(
PredicatedTileAccessIteratorDesc(
32,
1,
PitchLinearCoord(128, 8),
PitchLinearCoord(1, 4),
PitchLinearCoord(1, 2)), 'B')
self.params_C = EpilogueTileIteratorParams(
EpilogueThreadMap(
256,
1,
32,
EpilogueTileDesc(128, 1, 4, 4, 1),
EpilogueTileDesc(4, 1, 2, 1, 1),
EpilogueTileDesc(32, 1, 8, 1, 1),
EpilogueTileDesc(1, 4, 2, 1, 8)), 'C')
self.params_D = EpilogueTileIteratorParams(
EpilogueThreadMap(
256,
1,
32,
EpilogueTileDesc(128, 1, 4, 4, 1),
EpilogueTileDesc(4, 1, 2, 1, 1),
EpilogueTileDesc(32, 1, 8, 1, 1),
EpilogueTileDesc(1, 4, 2, 1, 8)), 'D')
self.output_op = LinearCombinationFunctor()
#
def emit(self):
return self.emitter.emit(self.operation)
#
def can_implement(self, configuration, arguments):
pass
#
def get_host_workspace_size(self, arguments):
return 336
#
def get_device_workspace_size(self, arguments):
return 0
#
def plan(self, arguments):
grid = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
return LaunchConfiguration([grid.m, grid.n, grid.k], [self.threads, 1, 1], self.shared_memory_capacity)
#
def initialize(self, host_workspace, device_workspace, launch_config, arguments, stream = cuda.CUstream(0)):
offset = 0
# Compute intermediate results
swizzle_log_tile = 0
gemm_mode = 0
batch_count = 1
gemm_k_size = arguments.problem_size.k
# Pack into the host workspace buffer
offset = arguments.problem_size.pack_into(host_workspace, offset)
grid_tiled_shape = self.threadblock_swizzle.grid_tiled_shape(arguments.problem_size)
offset = grid_tiled_shape.pack_into(host_workspace, offset)
offset = PackInteger(host_workspace, offset, swizzle_log_tile)
offset = self.params_A.initialize(host_workspace, offset, arguments.A.layout)
offset = self.params_B.initialize(host_workspace, offset, arguments.B.layout)
offset = self.params_C.initialize(host_workspace, offset, arguments.C.layout)
offset = self.params_D.initialize(host_workspace, offset, arguments.D.layout)
offset = self.output_op.initialize(host_workspace, offset, arguments.output_op)
offset = PackInteger(host_workspace, offset, gemm_mode)
offset = PackInteger(host_workspace, offset, batch_count)
offset = PackInteger(host_workspace, offset, gemm_k_size)
offset = PackDevicePointer(host_workspace, offset, int(arguments.A.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.B.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.C.pointer))
offset = PackDevicePointer(host_workspace, offset, int(arguments.D.pointer))
return offset
#################################################################################################
#
# Module represents a compilation unit
#
#################################################################################################
#
class CompilationOptions:
'''
Compilation options.
'''
#
def __init__(self, architectures = [80], include_paths = []):
self.includes = []
self.include_paths = include_paths
self.flags = ['-std=c++11', '-default-device']
self.architectures = architectures
#
def get(self):
options = []
for flag in self.flags:
options.append(bytes(str.encode(flag)))
for incl in self.include_paths:
options.append(bytes(str.encode('--include-path=%s' % incl)))
arch_list = "-arch="
for idx, arch in enumerate(self.architectures):
if idx:
arch_list += ","
arch_list += "sm_%d" % arch
options.append(bytes(str.encode(arch_list)))
return options
IncludeTemplate = r'''#include "${include}"
'''
KernelTemplate = r'''
extern "C"
__global__ void
${operation_name}(${operation_name}${operation_suffix}::Params params) {
// Dynamic shared memory base pointer
extern __shared__ int SharedStorageBase[];
// Declare pointer to dynamic shared memory.
${operation_name}${operation_suffix}::SharedStorage *shared_storage =
reinterpret_cast<${operation_name}${operation_suffix}::SharedStorage *>(SharedStorageBase);
${operation_name}${operation_suffix} op;
op(params, *shared_storage);
}
'''
#
class Module:
def __init__(self, name, operations, compilation_options):
self.name = name
self.operations = operations
self.module = None
self.log = None
self.cubin_image = None
self.source_buffer = ''
#
# Emit source
#
self.emit_()
#
# Compile
#
self.compile_(compilation_options)
#
# Load module
#
self.load_()
# Done
return
# Emit a source buffer
def emit_(self):
# 1. Includes
includes = []
for operation in self.operations:
for incl in operation.emitter.includes:
if incl not in includes:
includes.append(incl)
for incl in includes:
self.source_buffer += SubstituteTemplate(IncludeTemplate, { 'include': incl} )
# 2. Operations
for operation in self.operations:
self.source_buffer += operation.emit()
values = {
'operation_name': operation.name(),
'operation_suffix': operation.emitter.operation_suffix
}
self.source_buffer += SubstituteTemplate(KernelTemplate, values)
# Done
return
# Compile with NVRTC
def compile_(self, compilation_options):
err, program = nvrtc.nvrtcCreateProgram(
str.encode(self.source_buffer),
bytes(str.encode(self.name)),
0, [], [])
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
# Compile program
options = compilation_options.get()
err, = nvrtc.nvrtcCompileProgram(program, len(options), options)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
error_string = 'NVRTC Error: {}\n'.format(err)
# Get log from compilation
err, logSize = nvrtc.nvrtcGetProgramLogSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
self.log = b' ' * logSize
err, = nvrtc.nvrtcGetProgramLog(program, self.log)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
raise RuntimeError(error_string + self.log.decode() + self.source_buffer)
# Get data from compilation
err, dataSize = nvrtc.nvrtcGetCUBINSize(program)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
self.cubin_image = b' ' * dataSize
err, = nvrtc.nvrtcGetCUBIN(program, self.cubin_image)
if err != nvrtc.nvrtcResult.NVRTC_SUCCESS:
raise RuntimeError('NVRTC Error: {}'.format(err))
return
#
def load_(self):
# Load data as module data
err, self.module = cuda.cuModuleLoadData(self.cubin_image)
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
# Get functions
for operation in self.operations:
err, operation.kernel = cuda.cuModuleGetFunction(
self.module,
bytes(str.encode(operation.name())))
if err != cuda.CUresult.CUDA_SUCCESS:
raise RuntimeError('Cuda Error: {}'.format(err))
operation.module = self
return
#################################################################################################
#
# Manifest represents an 'owner' for modules and operations
#
#################################################################################################
#
class Manifest:
#
def __init__(self):
self.operations = {}
self.modules = []
pass
#
def append_module(self, module):
'''
Appends a module and takes ownership of operations used to construct it.
'''
self.modules.append(module)
for operation in module.operations:
self.operations[operation.name()] = operation
#################################################################################################
| cutlass-main | tools/library/scripts/rt.py |
cutlass-main | tools/library/scripts/__init__.py |
|
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
from library import *
###################################################################################################
#
class Conv2dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity1, \
group_mode = GroupMode.NoneGroup):
self.operation_kind = OperationKind.Conv2d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
self.group_mode = group_mode
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian
]
return self.tile_description.math_instruction.math_operation in complex_operators
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.accumulator_type():
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s_%s" % (ShortDataTypeNames[self.accumulator_type()], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = self.tile_description.procedural_name()
# grouped conv
if self.group_mode != GroupMode.NoneGroup:
group_conv_name = f"{GroupModeNames[self.group_mode]}_"
else:
group_conv_name = ""
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_unity_stride_${group_conv_name}align${alignment}"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${group_conv_name}align${alignment}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'alignment': "%d" % self.A.alignment,
'group_conv_name': group_conv_name
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv2dInstance:
def __init__(self):
self.template = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2d${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_group_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv2dGroup${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
${math_operator},
${group_mode},
${iterator_algorithm},
${stride_support},
${align_a},
${align_b}
>::Kernel;
"""
self.template_depthwise_direct_conv = """
// Conv2d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultDepthwiseDirect2dConv${conv_kind_name}<
${element_a},
${layout_a},
${element_b},
${layout_b},
${element_c},
${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::conv::TensorNHWCShape<${threadblock_output_shape_n}, ${threadblock_output_shape_p}, ${threadblock_output_shape_q}, ${groups_per_cta}>,
cutlass::MatrixShape<${filter_shape_r}, ${filter_shape_s}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::conv::threadblock::DepthwiseDirect2dConvIdentityThreadblockSwizzle<
1,
${threadblock_output_shape_n},
${threadblock_output_shape_p},
${threadblock_output_shape_q}>,
${stages},
${math_operator},
${iterator_algorithm},
${stride_support},
cutlass::MatrixShape<${stride_r}, ${stride_s}>,
cutlass::MatrixShape<${dilation_r}, ${dilation_s}>
>::Kernel;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support],
'math_operator': 'cutlass::arch::OpMultiplyAddComplex' if operation.is_complex() else \
MathOperationTag[operation.tile_description.math_instruction.math_operation],
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
}
if operation.group_mode == GroupMode.NoneGroup:
return SubstituteTemplate(self.template, values)
elif operation.group_mode == GroupMode.Depthwise:
values['group_mode'] = GroupModeTag[operation.group_mode]
# Setup other template params
values['threadblock_output_shape_n'] = str(operation.tile_description.threadblock_output_shape[0])
values['threadblock_output_shape_p'] = str(operation.tile_description.threadblock_output_shape[1])
values['threadblock_output_shape_q'] = str(operation.tile_description.threadblock_output_shape[2])
values['groups_per_cta'] = str(operation.tile_description.threadblock_output_shape[3])
values['filter_shape_r'] = str(operation.tile_description.filter_shape[0])
values['filter_shape_s'] = str(operation.tile_description.filter_shape[1])
values['stride_r'] = str(operation.tile_description.stride[0])
values['stride_s'] = str(operation.tile_description.stride[1])
values['dilation_r'] = str(operation.tile_description.dilation[0])
values['dilation_s'] = str(operation.tile_description.dilation[1])
return SubstituteTemplate(self.template_depthwise_direct_conv, values)
else:
values['group_mode'] = GroupModeTag[operation.group_mode]
return SubstituteTemplate(self.template_group_conv, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv2dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv2dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv2dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv2dInstance()
self.instance_template = """
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by conv2d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "conv2d_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """
using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution<
${operation_name}>;
manifest.append(new cutlass::library::Conv2dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_direct_conv_instance = """
using Operation_${operation_name} = cutlass::conv::device::DirectConvolution<
${operation_name}>;
manifest.append(new cutlass::library::DirectConv2dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_epilogue = """
}
"""
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
#
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
#
def emit(self, operation):
self.operations.append(operation)
self.configuration_file.write(SubstituteTemplate(self.instance_template, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation)
}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
if operation.group_mode == GroupMode.Depthwise:
self.configuration_file.write(SubstituteTemplate(self.configuration_direct_conv_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
else:
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| cutlass-main | tools/library/scripts/conv2d_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
import enum
import os.path
import shutil
import argparse
import logging
from library import *
from manifest import *
from itertools import product
###################################################################################################
#
def CudaToolkitVersionSatisfies(semantic_ver_string, major, minor, patch = 0):
# by default, use the latest CUDA Toolkit version
cuda_version = [11, 0, 132]
# Update cuda_version based on parsed string
if semantic_ver_string != '':
for i, x in enumerate([int(x) for x in semantic_ver_string.split('.')]):
if i < len(cuda_version):
cuda_version[i] = x
else:
cuda_version.append(x)
return cuda_version >= [major, minor, patch]
###################################################################################################
###################################################################################################
#
def EpilogueAlignment(max_alignment, tile, epilogue_steps = 8):
''' Helper to compute the maximum alignment of the epilogue '''
def product(X, identity = 1):
result = identity
for item in X:
result *= item
return result
elements_per_thread = product(tile.threadblock_shape[:-1]) // product(tile.warp_count) // 32 // epilogue_steps
return min(max_alignment, elements_per_thread)
def DefaultSwizzlingFunctor():
return SwizzlingFunctor.Identity8;
# To use StreamK decomposition for basic GEMMs, set `swizzling_functor = SwizzlingFunctor.StreamK`
#
def CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = DefaultSwizzlingFunctor()):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Generates 3.0 API based GemmUniversal API kernels. Alignment constraints are folded in with layouts
def CreateGemmUniversal3xOperator(
manifest, layouts, tile_descriptions, data_types,
schedules = [[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto]],
complex_transforms=None,
epilogue_functor=EpilogueFunctor.LinearCombination,
swizzling_functor=SwizzlingFunctor.Identity1,
tile_schedulers=[TileSchedulerType.Persistent]):
if type(data_types) is dict:
data_types = [data_types]
for s in schedules:
assert(len(s) == 2)
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none), ]
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0]]
combinations = product(layouts, tile_descriptions, data_types, complex_transforms, schedules, tile_schedulers)
for layout, tile_description, data_type, complex_transform, schedules, tile_scheduler in combinations:
kernel_schedule, epilogue_schedule = schedules
A = TensorDescription(
data_type["a_type"], layout[0][0], layout[0][1], complex_transform[0])
B = TensorDescription(
data_type["b_type"], layout[1][0], layout[1][1], complex_transform[1])
C = TensorDescription(data_type["c_type"], layout[2][0], layout[2][1])
D = TensorDescription(data_type["d_type"], layout[2][0], layout[2][1])
element_compute = data_type.get("epi_type", data_type["acc_type"])
operation = GemmOperation(
GemmKind.Universal3x, tile_description.minimum_compute_capability,
tile_description, A, B, C, element_compute, epilogue_functor, swizzling_functor, D,
kernel_schedule, epilogue_schedule, tile_scheduler)
manifest.append(operation)
operations.append(operation)
return operations
#
def CreateSparseGemmOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.Sparse]
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GemmOperation(GemmKind.Sparse, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
gemm_kinds = [GemmKind.PlanarComplex, GemmKind.PlanarComplexArray]
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for gemm_kind in gemm_kinds:
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
manifest.append(GemmOperation(gemm_kind, \
tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue))
return
#
def CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none, ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment, complex_transform[0])
B = TensorDescription(element_b, layout[1], alignment, complex_transform[1])
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = GroupedGemmOperation(GemmKind.Grouped, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SERK supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
# HERK supported layouts (RowMajor + conj, ColumnMajor)
if blas_mode == BlasMode.hermitian and layout[0] == LayoutType.RowMajor:
complex_transform = ComplexTransform.conj
alignment_c = 1 # Alignment only applies to A in SYRK
A = TensorDescription(element_a, layout[0], alignment, complex_transform)
C = SymmetricTensorDescription(element_c, layout[1], fill_mode, alignment_c)
# Rank-K update
new_operation = RankKOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# Rank-2K update
new_operation = Rank2KOperation(RankKKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, data_type, \
alignment_constraints, complex_transforms = None, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
if complex_transforms is None:
complex_transforms = [(ComplexTransform.none),]
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for diag_type in diag_types:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
for complex_transform in complex_transforms:
alignment_c = min(8, alignment)
A = TriangularTensorDescription(element_a, layout[0], side_mode, fill_mode, diag_type,
alignment, complex_transform)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
new_operation = TrmmOperation(TrmmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
#
def CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, data_type, \
alignment_constraints, blas_mode, epilogue_functor = EpilogueFunctor.LinearCombination, \
swizzling_functor = SwizzlingFunctor.Identity8):
element_a, element_b, element_c, element_epilogue = data_type
operations = []
# by default, only generate the largest tile and largest alignment
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
for layout in layouts:
for side_mode in side_modes:
for fill_mode in fill_modes:
for tile_description in tile_descriptions:
for alignment in alignment_constraints:
# SYMM supported layouts (RowMajor, ColumnMajor) with no conjugation
complex_transform = ComplexTransform.none
alignment_a = 1 # No vectorized access for the triangular matrix
alignment_c = min(8, alignment)
A = SymmetricTensorDescription(element_a, layout[0], fill_mode, alignment_a, complex_transform, side_mode)
# tensor A and B have same data type and layout
B = TensorDescription(element_b, layout[0], alignment)
C = TensorDescription(element_c, layout[1], alignment_c)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
# SYMM/HEMM update
new_operation = SymmOperation(SymmKind.Universal, tile_description.minimum_compute_capability, \
tile_description, A, B, C, element_epilogue, epilogue_functor, swizzling_functor, blas_mode)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###########################################################################################################
# ConvolutionOperator support variations
# ____________________________________________________________________
# ConvolutionalOperator | Analytic | Optimized
# ____________________________________________________________________
# | Fprop | (strided) | (strided)
# | Dgrad | (strided, unity*) | (strided, unity)
# | Wgrad | (strided) | (strided)
# ____________________________________________________________________
#
# Note : Operator marked (*) are supported but not generated to keep the instantiated kernel count low
###########################################################################################################
# Convolution for 2D operations
def CreateConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operations = [
# None grouped kernel
Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_),
]
# Instance group conv kernel
if tile.math_instruction.opcode_class == OpcodeClass.TensorOp and A.layout == LayoutType.TensorNHWC and \
tile.minimum_compute_capability >= 80:
# SingleGroup kernel
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.SingleGroup))
# Analytic iterator supports MultipleGroup mode
if iterator_algorithm == IteratorAlgorithm.Analytic:
new_operations.append(Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_, group_mode=GroupMode.MultipleGroup))
for new_operation in new_operations:
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Analytic and Optimized Dgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# strided dgrad uses a special threadblock swizzle
# note that SwizzlingFunctor.StridedDgradHorizontal might be
# better for problem sizes with large activation channel count
swizzling_functor_strided_dgrad_ = SwizzlingFunctor.StridedDgradIdentity1
if IteratorAlgorithm.Analytic in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Optimized Dgrad
if IteratorAlgorithm.Optimized in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_strided_dgrad_)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv2d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 2D operations specialized for few channels
def CreateConv2dFixedChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FixedChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 2D operations specialized for few channels
def CreateConv2dFewChannelsOperator(manifest, layout, tile_descriptions, data_type, channel_counts, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.FewChannels,]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
channel_counts = [channel_counts[0],]
operations = []
for tile in tile_descriptions:
for channel_count in channel_counts:
alignment_c = EpilogueAlignment(channel_count, tile)
A = TensorDescription(element_a, layout[0], channel_count)
B = TensorDescription(element_b, layout[1], channel_count)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
#
# Conv2d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv2dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor, swizzling_functor_)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for 3D operations
def CreateConv3dOperator(manifest, layout, tile_descriptions, data_type, alignment, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], epilogue_functor = EpilogueFunctor.LinearCombination):
element_a, element_b, element_c, element_epilogue = data_type
# one exceptional case
alignment_c = min(8, alignment)
# iterator algorithm (analytic and optimized)
iterator_algorithms = [IteratorAlgorithm.Analytic, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size and optimized iterators
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
iterator_algorithms = [IteratorAlgorithm.Optimized]
operations = []
# All tile sizes for Conv3dFprop and Conv3dWgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Fprop
#
if ConvKind.Fprop in conv_kinds:
# Strided support for Analytic and Optimized Fprop
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Fprop, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided)
manifest.append(new_operation)
operations.append(new_operation)
#
# Conv3d Wgrad
#
if ConvKind.Wgrad in conv_kinds:
# Strided support for Analytic and Optimized Wgrad
for iterator_algorithm in iterator_algorithms:
new_operation = Conv3dOperation(ConvKind.Wgrad, iterator_algorithm, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# All tile sizes for Conv3dDgrad
for tile in tile_descriptions:
A = TensorDescription(element_a, layout, alignment)
B = TensorDescription(element_b, layout, alignment)
C = TensorDescription(element_c, layout, alignment_c)
#
# Conv3d Dgrad
#
if ConvKind.Dgrad in conv_kinds:
# Unity stride for Optimized Dgrad
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Optimized, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Unity, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
# Strided support for Analytic Dgrad
# Conv3dDgrad has a naive strided support which does not cut down redundant MMAs
new_operation = Conv3dOperation(ConvKind.Dgrad, IteratorAlgorithm.Analytic, tile.minimum_compute_capability, tile,\
A, B, C, element_epilogue, StrideSupport.Strided, epilogue_functor)
manifest.append(new_operation)
operations.append(new_operation)
return operations
# Convolution for Depthwise 2d conv
def CreateDepthwiseConv2dOperator(manifest, layout, tile_descriptions, data_type, alignment_constraints, \
conv_kinds = [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad], \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
element_a, element_b, element_c, element_epilogue = data_type
# iterator algorithm (FixedStrideDilation, Optimized)
iterator_algorithms = [IteratorAlgorithm.FixedStrideDilation, IteratorAlgorithm.Optimized]
# by default, only generate the largest tile size, largest alignment, and optimized iterator
if manifest.kernel_filter == '':
tile_descriptions = [tile_descriptions[0],]
alignment_constraints = [alignment_constraints[0],]
operations = []
for tile in tile_descriptions:
for alignment in alignment_constraints:
alignment_c = min(8, alignment)
A = TensorDescription(element_a, layout[0], alignment)
B = TensorDescription(element_b, layout[1], alignment)
C = TensorDescription(element_c, layout[2], alignment_c)
swizzling_functor_ = swizzling_functor
if ConvKind.Fprop in conv_kinds:
# Strided support for Optimized and FixedStridedDilation Depthwise Conv
for iterator_algorithm in iterator_algorithms:
stride_support = StrideSupport.Strided
if iterator_algorithm == IteratorAlgorithm.FixedStrideDilation:
if tile.stride == [-1, -1] or tile.dilation == [-1,-1]:
continue
stride_support = StrideSupport.Fixed
if iterator_algorithm == IteratorAlgorithm.Optimized:
if tile.stride != [-1, -1] or tile.dilation != [-1,-1]:
continue
new_operation = Conv2dOperation(ConvKind.Fprop,
iterator_algorithm,
tile.minimum_compute_capability,
tile,
A, B, C,
element_epilogue,
stride_support,
epilogue_functor,
swizzling_functor_,
group_mode=GroupMode.Depthwise)
manifest.append(new_operation)
operations.append(new_operation)
return operations
###################################################################################################
###################################################################################################
#
def GenerateSM50_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
if math_inst.element_a == DataType.f32:
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50_Simt_complex(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 50
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM50(manifest, cuda_version):
GenerateSM50_Simt(manifest, cuda_version)
GenerateSM50_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM60_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
def GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 60
max_cc = 1024
alignment_constraints = [8,]
filter_3x3 = [3, 3]
filter_5x5 = [5, 5]
# [stride_h, stride_w]
# [-1, -1] means all stride size.
strides = [[-1,-1], [1, 1], [2, 2]]
# [dilation_h, dilation_w]
# [-1, -1] means all dilation size.
dilations = [[-1,-1], [1, 1], [2, 2]]
#groups per thread block
g16 = 16
g32 = 32
g64 = 64
#output shape per thread block
npq_1x4x4 = [1, 4, 4]
npq_1x8x8 = [1, 8, 8]
npq_1x10x10 = [1, 10, 10]
tile_descriptions = []
for math_inst in math_instructions:
for stride, dilation in product(strides, dilations):
tile_descriptions.extend([
# filter3x3 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_3x3, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_3x3, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_3x3, 4, stride, dilation,[4, 1, 1], math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_3x3, 4, stride, dilation, [4, 1, 1], math_inst, min_cc, max_cc),
# filter5x5 ThreadBlock_output, filter, stage, warp
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g32], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g64], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x8x8+[g16], filter_5x5, 3, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x10x10+[g64], filter_5x5, 2, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g32], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g64], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc),
Direct2dConvFixedStrideDilationTileDescription(npq_1x4x4+[g16], filter_5x5, 4, stride, dilation,[4, 1, 1],math_inst, min_cc, max_cc)
])
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateDepthwiseConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM60(manifest, cuda_version):
GenerateSM60_Simt(manifest, cuda_version)
GenerateSM60_Simt_DepthwiseConv2d(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM61_Simt(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 4], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 61
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 32], 2, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
#
#
def GenerateSM61(manifest, cuda_version):
GenerateSM61_Simt(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM70_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
def GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[8, 8, 4], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 75
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 16, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 70
max_cc = 1024
alignment_constraints = [8,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
##################################################################################################
#
def GenerateSM70(manifest, cuda_version):
GenerateSM70_TensorOp_884(manifest, cuda_version)
GenerateSM70_PlanarComplexTensorOp_884(manifest, cuda_version)
# To limit build size, WMMA GEMMs are disabled for now.
#
#GenerateSM70_WmmaTensorOp_161616(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst):
min_cc = 75
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [1, 2, 4])
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [1, 2, 4])
#
def GenerateSM75_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [1, 2, 2], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
# Separate generator for 'few channels' specializations
GenerateSM75_TensorOp_1688_FewChannels(manifest, cuda_version, math_inst)
#
#
def GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [8, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM75_TensorOp_8816_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
alignment_constraints_small_channels = [16, 8, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 64], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 32, 32], 2, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[8, 8, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 16], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.s32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 2):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[8, 8, 32], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[8, 8, 32], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 2, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 2, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 2, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM75_TensorOp_88128(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[8, 8, 128], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
]
min_cc = 75
max_cc = {
MathOperation.xor_popc: 89,
MathOperation.and_popc: 90
}
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 2, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 512], 2, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 512], 2, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 512], 2, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 512], 2, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 10, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 16, 16], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.WmmaTensorOp, \
MathOperation.multiply_add),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 32], 2, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 2, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
DataType.f32,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM75_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 75
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc)
]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM75(manifest, cuda_version):
GenerateSM75_TensorOp_1688(manifest, cuda_version)
GenerateSM75_PlanarComplexTensorOp_1688(manifest, cuda_version)
GenerateSM75_TensorOp_8816_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8816_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_8832_TN(manifest, cuda_version)
GenerateSM75_TensorOp_8832_Interleaved(manifest, cuda_version)
GenerateSM75_TensorOp_88128(manifest, cuda_version)
#GenerateSM75_WmmaTensorOp_161616(manifest, cuda_version)
GenerateSM75_Simt_complex(manifest, cuda_version)
###################################################################################################
###################################################################################################
#
def GenerateSM80_TensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, 4, 2]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmGroupedOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type, 8)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, [4, 8])
CreateConv3dOperator(manifest, LayoutType.TensorNDHWC, tile_descriptions, data_type_mixed, 8)
#
#
def GenerateSM80_SparseTensorOp_16832(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 32], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 16], \
DataType.f16, DataType.f16, DataType.f16, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [8, ]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([ 64, 128, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmPlanarComplexOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_16832_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
smem_usage = 164
alignment_constraints = [16,]
alignment_constraints_small_channels = [16, 8, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 64], 6, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 64], 6, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFixedChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
operations += CreateConv2dFewChannelsOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints_small_channels, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
if op.tile_description.threadblock_shape[0] == 32:
op.C.alignment = 8
else:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 64], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
tile_descriptions = [
TileDescription([128, 64, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s8, DataType.s8, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s8, DataType.s8, DataType.s8, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved32, LayoutType.RowMajorInterleaved32, LayoutType.ColumnMajorInterleaved32),
]
math_instructions = [
MathInstruction( \
[16, 8, 32], \
DataType.s8, DataType.s8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 32], \
DataType.u8, DataType.u8, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [16,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 64], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 64], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 10, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC32HW32, LayoutType.TensorC32RSK32, LayoutType.TensorNC32HW32)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 128], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 256], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [math_inst.element_a, math_inst.element_b, math_inst.element_accumulator, DataType.s32]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombination)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] >= 128:
op.C.alignment = 16
elif op.tile_description.threadblock_shape[1] == 64:
op.C.alignment = 8
else:
op.C.alignment = 8
#
#
def GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 128], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate)
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
tile_descriptions = [
TileDescription([ 64, 64, 256], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 256], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 256], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 256], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 256], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 256], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 256], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 512], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 512], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.s4, DataType.s4, DataType.s32, DataType.s32]
data_type_mixed = [DataType.s4, DataType.s4, DataType.s4, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, None, EpilogueFunctor.LinearCombination)
operations = []
operations += CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
for op in operations:
if op.tile_description.threadblock_shape[1] > 128:
op.C.alignment = 16
else:
op.C.alignment = 8
#
#
def GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajorInterleaved64, LayoutType.RowMajorInterleaved64, LayoutType.ColumnMajorInterleaved64),
]
math_instructions = [
MathInstruction( \
[16, 8, 64], \
DataType.s4, DataType.s4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
MathInstruction( \
[16, 8, 64], \
DataType.u4, DataType.u4, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_saturate),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [32,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 128], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 128], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 128], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 128], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 128], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 128], 6, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type_mixed = [math_inst.element_a, math_inst.element_b, math_inst.element_a, DataType.f32]
operations = []
operations += CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints, None, EpilogueFunctor.LinearCombinationClamp)
conv_layout = (LayoutType.TensorNC64HW64, LayoutType.TensorC64RSK64, LayoutType.TensorNC64HW64)
operations += CreateConv2dOperator(manifest, conv_layout, tile_descriptions,
data_type_mixed, alignment_constraints, [ConvKind.Fprop], EpilogueFunctor.LinearCombinationClamp)
for op in operations:
op.C.alignment = 16
#
#
def GenerateSM80_TensorOp_168256(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 256], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.xor_popc),
MathInstruction( \
[16, 8, 256], \
DataType.b1, DataType.b1, DataType.s32, \
OpcodeClass.TensorOp, \
MathOperation.and_popc),
]
min_cc = 80
max_cc = {
MathOperation.xor_popc: 89,
MathOperation.and_popc: 90
}
alignment_constraints = [128,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 512], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 512], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 512], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 512], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 512], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 512], 6, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 512], 10, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 128, 1024], 3, [4, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 256, 1024], 3, [2, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([256, 64, 1024], 4, [4, 1, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 256, 1024], 4, [1, 4, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 128, 1024], 4, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([128, 64, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 128, 1024], 3, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
TileDescription([ 64, 64, 1024], 5, [2, 2, 1], math_inst, min_cc, max_cc[math_inst.math_operation]),
]
data_type = [DataType.b1, DataType.b1, DataType.s32, DataType.s32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
data_type_mixed = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_a,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type_mixed, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type_mixed, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f16, DataType.f16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f16),
MathInstruction( \
[16, 8, 8], \
DataType.bf16, DataType.bf16, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_bf16),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4, 2, 1]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
def GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 1):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.RowMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.RowMajor),
]
math_instructions = [
MathInstruction( \
[16, 8, 16], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 32], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 32], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 64], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 64], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 64], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 64], 3, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSparseGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
tile_descriptions = [
TileDescription([128, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4] # Alignment only applies to A in SYRK
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYRK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1, 2, 4]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_1688_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
# A and B have same layouts
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_fast_f32),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [
1, 2, 4
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 16], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 16], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 16], 10, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 32], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 32], 3, [2, 4, 1], math_inst, min_cc, max_cc),
#TileDescription([256, 64, 32], 4, [4, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 256, 32], 4, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 32], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([128, 64, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 128, 32], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([ 64, 64, 32], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f32, DataType.f32, DataType.f32, DataType.f32]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_instructions = [
MathInstruction( \
[16, 8, 8], \
DataType.tf32, DataType.tf32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex),
MathInstruction( \
[16, 8, 8], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_fast_f32),
]
min_cc = 80
max_cc = 1024
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 64, 16], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 16], 4, [2, 1, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
DataType.cf32, DataType.cf32, DataType.cf32, DataType.cf32
]
alignment_constraints = [1,]
# SYMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
def GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM80_TensorOp_884_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 0):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[8, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
#
def GenerateSM80_Simt_f32(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([256, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 5, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 256, 8], 4, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
#
def GenerateSM80_Simt_f64(manifest, cuda_version):
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.Simt, \
MathOperation.multiply_add),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 64, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 32, 8], 5, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([ 32, 128, 8], 5, [1, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [
math_inst.element_a,
math_inst.element_b,
math_inst.element_accumulator,
math_inst.element_accumulator,
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints)
#
##################################################################################################
#
def GenerateSM80_Simt_complex(manifest, cuda_version):
math_instructions = [
MathInstruction( \
[1, 1, 1], \
DataType.f32, DataType.f32, DataType.f32, \
OpcodeClass.Simt, \
MathOperation.multiply_add_complex),
]
min_cc = 80
max_cc = 1024
alignment_constraints = [1,]
data_type = [
DataType.cf32,
DataType.cf32,
DataType.cf32,
DataType.cf32
]
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
for math_inst in math_instructions:
tile_descriptions = [
TileDescription([128, 128, 8], 5, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 128, 8], 4, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([ 64, 128, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 6, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
]
CreateGemmOperator(manifest, layouts, tile_descriptions, data_type, alignment_constraints, complex_transforms)
conv_layout = (LayoutType.TensorNHWC, LayoutType.TensorNHWC, LayoutType.TensorNHWC)
CreateConv2dOperator(manifest, conv_layout, tile_descriptions, data_type, alignment_constraints)
#
###################################################################################################
#
def GenerateSM80(manifest, cuda_version):
GenerateSM80_TensorOp_16816(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16832(manifest, cuda_version)
GenerateSM80_PlanarComplexTensorOp_16816(manifest, cuda_version)
GenerateSM80_TensorOp_1688(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_math(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16816_fast_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_complex(manifest, cuda_version)
# 3xTF32
GenerateSM80_TensorOp_1688_fast_fp32_math(manifest, cuda_version)
GenerateSM80_TensorOp_1688_fast_fp32_math_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_1688_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm(manifest, cuda_version)
GenerateSM80_TensorOp_1688_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex(manifest, cuda_version)
GenerateSM80_TensorOp_884_symm_complex_gaussian(manifest, cuda_version)
GenerateSM80_TensorOp_16832_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16832_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_16864_TN(manifest, cuda_version)
GenerateSM80_SparseTensorOp_168128_TN(manifest, cuda_version)
GenerateSM80_TensorOp_16864_Interleaved(manifest, cuda_version)
GenerateSM80_TensorOp_168256(manifest, cuda_version)
GenerateSM80_Simt_f32(manifest, cuda_version)
GenerateSM80_Simt_f64(manifest, cuda_version)
GenerateSM80_Simt_complex(manifest, cuda_version)
###################################################################################################
#
def GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments.
layouts = [
[[LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.ColumnMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 8], [LayoutType.ColumnMajor, 1]],
[[LayoutType.RowMajor, 8], [LayoutType.RowMajor, 8], [LayoutType.ColumnMajor, 1]],
]
math_instructions = [
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f16,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.f16, DataType.f16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 16],
DataType.bf16, DataType.bf16, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
tile_descriptions_small = [
# Not compatible with TmaWarpSpecializedCooperative
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_large = [
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1]*2, math_inst.instruction_shape[2]*4],
0, [4, 2, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1]*2, math_inst.instruction_shape[2]*4],
0, [4, 2, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_large
data_type = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]]
else:
schedules = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
# TmaWarpSpecializedCooperative and TmaWarpSpecializedPingpong require CUDA version >= 12.1 for optimal performance.
]
stream_k_schedules = []
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Add stream-K variants
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# not enough smem for 256x128 f32 out with C allocation
if data_type["d_type"] == DataType.f32:
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
else:
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# Emit instance without C allocation + load
data_type["c_type"] = DataType.void
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# for mixed precision kernels, also generate kernels that write output matrix in the A/B format
# Avoid emitting two kernels if the accumulator type does not differ from the input type (e.g. F16 accumulation)
if math_inst.element_a != math_inst.element_accumulator:
data_type_mixed = {
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_a,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
# Set alignment c based on Destination format.
for layout in layouts:
if data_type_mixed["c_type"] in [DataType.s32, DataType.f32]:
layout[2][1] = 4
elif data_type_mixed["c_type"] in [DataType.f16, DataType.bf16]:
layout[2][1] = 8
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, schedules)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
# Emit instance without C allocation+load
data_type_mixed["c_type"] = DataType.void
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type_mixed,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts_tf32 = [
[[LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.RowMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4], [LayoutType.ColumnMajor, 4]],
[[LayoutType.ColumnMajor, 4], [LayoutType.RowMajor, 4], [LayoutType.ColumnMajor, 4]],
]
math_inst = MathInstruction(
[64, 128, 8],
DataType.tf32, DataType.tf32, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
math_inst_largeN = MathInstruction(
[64, 256, 8],
DataType.tf32, DataType.tf32, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
tile_descriptions_large = [
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4],
0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst_largeN.instruction_shape[0]*2, math_inst_largeN.instruction_shape[1], math_inst_largeN.instruction_shape[2]*4],
0, [4, 1, 1], math_inst_largeN, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_small = [
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : DataType.f32,
"b_type" : DataType.f32,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
schedules_default = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
]
# TMA kernels with TT layout use EpilogueTransposed (NoSmemWarpSpecialized with swapped strides),
# because they use NN kernels underneath and transposing its epilogue will get the correct output
schedules_transposed_epilogue = [
[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.EpilogueTransposed],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.EpilogueTransposed]
]
# TMA kernels with TN, NN, or NT layout
layouts_tf32_tn_nn_nt = [layouts_tf32[0], layouts_tf32[2], layouts_tf32[3]]
# TMA kernels with TT layout
layouts_tf32_tt = [layouts_tf32[1]]
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_small, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_medium, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_large, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions_medium, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_small, data_types, [
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.EpilogueTransposed]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_medium, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed]
])
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions_large, data_types, [
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.EpilogueTransposed]
])
else:
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tn_nn_nt, tile_descriptions, data_types, schedules_default)
CreateGemmUniversal3xOperator(manifest, layouts_tf32_tt, tile_descriptions, data_types, schedules_transposed_epilogue)
#
def GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 16]],
]
math_instructions = [
MathInstruction(
[64, 128, 32],
DataType.s8, DataType.s8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.u8, DataType.u8, DataType.s32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
tile_descriptions_small = [
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions_medium = [
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
]
tile_descriptions = tile_descriptions_medium + tile_descriptions_small
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : math_inst.element_accumulator,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.s8,
"d_type" : math_inst.element_a,
"acc_type" : math_inst.element_accumulator,
"epi_type" : DataType.f32
}
]
for data_type in data_types:
for layout in layouts:
layout[2][1] = 128 // DataTypeSize[data_type["d_type"]]
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type)
# persistent kernels with TMA epilogues
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Emit instance without C allocation+load
data_types += [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.void,
"d_type" : math_inst.element_accumulator,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
}
]
for data_type in data_types:
# Set output alignment based on destination format first
for layout in layouts:
layout[2][1] = 128 // DataTypeSize[data_type["d_type"]]
# Pingpong persistent
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpong, EpilogueScheduleType.NoSmemWarpSpecialized]])
# Cooperative persistent
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_medium, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized]],
tile_schedulers=[TileSchedulerType.Persistent, TileSchedulerType.StreamK]
)
def GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 12, 0):
return
# layouts for ABC and their alignments
layouts = [
[[LayoutType.RowMajor, 16], [LayoutType.ColumnMajor, 16], [LayoutType.ColumnMajor, 1]], # TN Layout
]
math_instructions = [
# inst 64x128x32
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 128, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
# inst 64x64x32
MathInstruction(
[64, 64, 32],
DataType.e4m3, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 64, 32],
DataType.e4m3, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 64, 32],
DataType.e5m2, DataType.e4m3, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
MathInstruction(
[64, 64, 32],
DataType.e5m2, DataType.e5m2, DataType.f32,
OpcodeClass.TensorOp,
MathOperation.multiply_add),
]
min_cc = 90
max_cc = 90
for math_inst in math_instructions:
data_types = [
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.f32,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f32,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.bf16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.bf16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.f16,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e4m3,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
{
"a_type" : math_inst.element_a,
"b_type" : math_inst.element_b,
"c_type" : DataType.f16,
"d_type" : DataType.e5m2,
"acc_type" : math_inst.element_accumulator,
"epi_type" : math_inst.element_accumulator
},
]
if math_inst.instruction_shape[1] == 128:
tile_descriptions_small = [
# 64x128x128
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0], math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
]
tile_descriptions = [
# 128x128x128
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*2, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
]
elif math_inst.instruction_shape[1] == 64:
tile_descriptions = [
# 256x64x128
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [1,2,1]),
TileDescription([math_inst.instruction_shape[0]*4, math_inst.instruction_shape[1], math_inst.instruction_shape[2]*4],
0, [4, 1, 1], math_inst, min_cc, max_cc, [2,1,1]),
]
else:
assert False, "math inst is not supported"
# some schedules disabled to save on library size
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
schedules = [
#[KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]
]
stream_k_schedules = [[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.NoSmemWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]]
else:
schedules = [
# [KernelScheduleType.ScheduleAuto, EpilogueScheduleType.ScheduleAuto],
[KernelScheduleType.TmaWarpSpecialized, EpilogueScheduleType.NoSmemWarpSpecialized]
# TmaWarpSpecializedCooperative require CUDA version >= 12.1 for optimal performance.
]
stream_k_schedules = []
for data_type in data_types:
# With No-SMEM epilogues
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, schedules)
if CudaToolkitVersionSatisfies(cuda_version, 12, 1):
# Persistent kernels with TMA epilogues
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]])
# Small tiles
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions_small, data_type,
[[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.TmaWarpSpecialized],
[KernelScheduleType.TmaWarpSpecializedPingpongFP8FastAccum, EpilogueScheduleType.NoSmemWarpSpecialized]])
# Add stream-K variants (with and without TMA epilogues)
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type, stream_k_schedules, tile_schedulers=[TileSchedulerType.StreamK])
CreateGemmUniversal3xOperator(manifest, layouts, tile_descriptions, data_type,
[[KernelScheduleType.TmaWarpSpecializedCooperative, EpilogueScheduleType.TmaWarpSpecializedCooperative],
[KernelScheduleType.TmaWarpSpecializedCooperativeFP8FastAccum, EpilogueScheduleType.TmaWarpSpecializedCooperative]],
tile_schedulers=[TileSchedulerType.StreamK])
#
def GenerateSM90_TensorOp_1684(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = MathInstruction(
[16, 8, 4],
DataType.f64, DataType.f64, DataType.f64,
OpcodeClass.TensorOp,
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([256, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 256, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([256, 32, 16], 3, [4, 1, 1], math_inst, min_cc, max_cc),
TileDescription([32, 256, 16], 3, [1, 4, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateGemmOperator(manifest, layouts, tile_descriptions,
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8 ], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8 ], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8 ], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8 ], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8 ], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8 ], 4, [2, 1, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 3, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.ColumnMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.RowMajor, LayoutType.ColumnMajor),
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
(ComplexTransform.none, ComplexTransform.none),
(ComplexTransform.conj, ComplexTransform.none),
(ComplexTransform.none, ComplexTransform.conj),
(ComplexTransform.conj, ComplexTransform.conj)
]
CreateGemmOperator(manifest, layouts, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64]
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor),
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYRK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HERK computation
CreateRankKOperator(manifest, layouts, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
(LayoutType.RowMajor, LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
diag_types = [
DiagType.NonUnit, DiagType.Unit,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [
ComplexTransform.none, ComplexTransform.conj,
]
CreateTrmmOperator(manifest, layouts, side_modes, fill_modes, diag_types, tile_descriptions, \
data_type, alignment_constraints, complex_transforms)
#
#
def GenerateSM90_TensorOp_1684_symm(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 128, 16], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([128, 64, 16], 3, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 16], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 32, 16], 5, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([16, 32, 16], 5, [1, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 16, 16], 5, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.f64, DataType.f64, DataType.f64, DataType.f64]
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
#
#
def GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([128, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 128, 8], 3, [2, 4, 1], math_inst, min_cc, max_cc),
TileDescription([64, 64, 8], 3, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
#
def GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version):
if not CudaToolkitVersionSatisfies(cuda_version, 11, 8):
return
layouts = [
(LayoutType.ColumnMajor, LayoutType.ColumnMajor),
]
side_modes = [
SideMode.Left, SideMode.Right,
]
fill_modes = [
FillMode.Lower, FillMode.Upper,
]
math_inst = \
MathInstruction( \
[16, 8, 4], \
DataType.f64, DataType.f64, DataType.f64, \
OpcodeClass.TensorOp, \
MathOperation.multiply_add_complex_gaussian)
min_cc = 90
max_cc = 90
alignment_constraints = [1,]
tile_descriptions = [
TileDescription([64, 64, 8], 3, [4, 2, 1], math_inst, min_cc, max_cc),
TileDescription([64, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
TileDescription([32, 64, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 32, 8], 4, [2, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([16, 32, 8], 4, [1, 2, 1], math_inst, min_cc, max_cc),
#TileDescription([32, 16, 8], 4, [2, 1, 1], math_inst, min_cc, max_cc),
]
data_type = [DataType.cf64, DataType.cf64, DataType.cf64, DataType.cf64]
complex_transforms = [ComplexTransform.none,]
# SYMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.symmetric)
# HEMM computation
CreateSymmOperator(manifest, layouts, side_modes, fill_modes, tile_descriptions, \
data_type, alignment_constraints, BlasMode.hermitian)
#
###################################################################################################
#
def GenerateSM90(manifest, cuda_version):
GenerateSM90_TensorOp_16b_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_tf32_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_int8_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_fp8_WGMMA_gemm(manifest, cuda_version)
GenerateSM90_TensorOp_1684(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_rank_k_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_trmm_complex_gaussian(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex(manifest, cuda_version)
GenerateSM90_TensorOp_1684_symm_complex_gaussian(manifest, cuda_version)
###################################################################################################
def numeric_log_level(log_level: str) -> int:
"""
Converts the string identifier of the log level into the numeric identifier used
in setting the log level
:param x: string representation of log level (e.g., 'INFO', 'DEBUG')
:type x: str
:return: numeric representation of log level
:rtype: int
"""
numeric_level = getattr(logging, log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError(f'Invalid log level: {log_level}')
return numeric_level
# This function for defining the ArgumentParser is used to make it easy for the CUTLASS Python interface
# to leverage the functionality in this file without running this script via a shell prompt.
def define_parser():
parser = argparse.ArgumentParser(description="Generates device kernel registration code for CUTLASS Kernels")
parser.add_argument("--operations", default="all", help="Specifies the operation to generate (gemm, all)")
parser.add_argument("--build-dir", default=".", required=False, help="CUTLASS top-level build directory")
parser.add_argument("--curr-build-dir", default=".", help="CUTLASS current build directory. cmake files will be emitted in this directory")
parser.add_argument("--generator-target", default='library', help="Target of CUTLASS Library Generator.")
parser.add_argument("--architectures", default='53;60;61;70;75;80;90', help="Target compute architectures")
parser.add_argument("--kernels", default='', help='Comma delimited list to filter kernels by name.')
parser.add_argument("--ignore-kernels", default='', help='Comma delimited list of kernels to exclude from build.')
parser.add_argument("--filter-by-cc", default='True', type=str, help='If enabled, kernels whose compute capability range is not satisfied by the build target are excluded.')
parser.add_argument("--cuda-version", default="11.0.0", help="Semantic version string of CUDA Toolkit")
parser.add_argument('--kernel-filter-file', type=str, default=None, required=False, help='Full path of filter file')
parser.add_argument('--selected-kernel-list', type=str, default=None, required=False,
help='Specify the output log file containing all enabled kernels in this build')
parser.add_argument("--interface-dir", default=None, required=False, help="Interface header to kernels")
parser.add_argument("--disable-full-archs-compilation", action="store_true", required=False, help="Disable compilation for every archs in --architectures")
parser.add_argument("--log-level", default='info', type=numeric_log_level, required=False,
help='Logging level to be used by the generator script')
return parser
if __name__ == "__main__":
parser = define_parser()
args = parser.parse_args()
# Set the logging level based on the user-provided `--log-level` command-line option
logging.basicConfig(level=args.log_level)
manifest = Manifest(args)
GenerateSM50(manifest, args.cuda_version)
GenerateSM60(manifest, args.cuda_version)
GenerateSM61(manifest, args.cuda_version)
GenerateSM70(manifest, args.cuda_version)
GenerateSM75(manifest, args.cuda_version)
GenerateSM80(manifest, args.cuda_version)
GenerateSM90(manifest, args.cuda_version)
if 'library' in args.generator_target.split(','):
manifest.emit(GeneratorTarget.Library)
if args.selected_kernel_list is not None:
if len(manifest.selected_kernels) > 0:
with open(args.selected_kernel_list, 'w') as file_writer:
for line in manifest.selected_kernels:
file_writer.write("%s\n" % line)
#
###################################################################################################
| cutlass-main | tools/library/scripts/generator.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
from library import *
###################################################################################################
#
class Conv3dOperation:
#
def __init__(self, conv_kind, iterator_algorithm, arch, tile_description, A, B, C, element_epilogue, \
stride_support, epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity4):
self.operation_kind = OperationKind.Conv3d
self.arch = arch
self.tile_description = tile_description
self.conv_kind = conv_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.iterator_algorithm = iterator_algorithm
self.stride_support = stride_support
self.swizzling_functor = swizzling_functor
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
intermediate_type = ''
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp:
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
else:
inst_shape = ''
return "%s%s%s%s3d_%s" % (ShortDataTypeNames[self.tile_description.math_instruction.element_accumulator], \
inst_shape, intermediate_type, ConvKindNames[self.conv_kind], IteratorAlgorithmNames[self.iterator_algorithm])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
threadblock = "%dx%d_%dx%d" % (
self.tile_description.threadblock_shape[0],
self.tile_description.threadblock_shape[1],
self.tile_description.threadblock_shape[2],
self.tile_description.stages
)
if self.stride_support == StrideSupport.Unity:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}_unity_stride"
else:
configuration_name = "cutlass_${opcode_class}_${extended_name}_${threadblock}"
return SubstituteTemplate(
configuration_name,
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
}
)
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.configuration_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
class EmitConv3dInstance:
def __init__(self):
self.template = """
// Conv3d${conv_kind_name} ${iterator_algorithm_name} kernel instance "${operation_name}"
using ${operation_name}_base =
typename cutlass::conv::kernel::DefaultConv3d${conv_kind_name}<
${element_a},
cutlass::layout::TensorNDHWC,
${element_b},
cutlass::layout::TensorNDHWC,
${element_c},
cutlass::layout::TensorNDHWC,
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k} >,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor}, // cutlass::gemm::threadblock::GemmSplitKIdentityThreadblockSwizzle<>,
${stages},
cutlass::arch::OpMultiplyAdd,
${iterator_algorithm},
${stride_support}
>::Kernel;
"""
def emit(self, operation):
warp_shape = [int(operation.tile_description.threadblock_shape[idx] / operation.tile_description.warp_count[idx]) for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'conv_kind': ConvKindTag[operation.conv_kind],
'conv_kind_name': ConvKindNames[operation.conv_kind].capitalize(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.tile_description.math_instruction.element_accumulator],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'iterator_algorithm': IteratorAlgorithmTag[operation.iterator_algorithm],
'iterator_algorithm_name': IteratorAlgorithmNames[operation.iterator_algorithm].capitalize(),
'stride_support': StrideSupportTag[operation.stride_support]
}
return SubstituteTemplate(self.template, values)
###################################################################################################
#
# Generator functions for all layouts
#
###################################################################################################
#
def GenerateConv3dTensorOp(manifest, tile_descriptions, min_cc, align = 128):
for tile in tile_descriptions:
for conv_kind in [ConvKind.Fprop, ConvKind.Dgrad, ConvKind.Wgrad]:
if conv_kind == ConvKind.Fprop or (tile.math_instruction.element_accumulator in [DataType.f16, DataType.f32]):
#
output_types = [tile.math_instruction.element_a, tile.math_instruction.element_accumulator] \
if DataTypeSize[tile.math_instruction.element_accumulator] == 32 \
else [tile.math_instruction.element_accumulator,]
for output_type in output_types:
A = TensorDescription(tile.math_instruction.element_a, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_a]))
B = TensorDescription(tile.math_instruction.element_b, LayoutType.TensorNDHWC, int(align / DataTypeSize[tile.math_instruction.element_b]))
C = TensorDescription(output_type, LayoutType.TensorNDHWC, max(1, int(align / DataTypeSize[output_type])))
manifest.append(Conv3dOperation(conv_kind, min_cc, tile, A, B, C, tile.math_instruction.element_accumulator))
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitConv3dConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name)
self.instance_emitter = EmitConv3dInstance()
self.instance_template = """
${operation_instance}
// Derived class
struct ${operation_name} :
public ${operation_name}_base { };
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.header_template = """
/*
Generated by conv3d_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "conv3d_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.configuration_header = """
namespace cutlass {
namespace library {
// Initialize all instances
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.configuration_instance = """
using Operation_${operation_name} = cutlass::conv::device::ImplicitGemmConvolution<
${operation_name}>;
manifest.append(new cutlass::library::Conv3dOperation<
Operation_${operation_name}>(
"${operation_name}"));
"""
self.configuration_epilogue = """
}
"""
self.epilogue_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
#
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(SubstituteTemplate(self.header_template, {
'configuration_name': self.configuration_name
}))
self.operations = []
return self
#
def emit(self, operation):
self.operations.append(operation)
self.configuration_file.write(SubstituteTemplate(self.instance_template, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'operation_instance': self.instance_emitter.emit(operation)
}))
#
def __exit__(self, exception_type, exception_value, traceback):
self.configuration_file.write(SubstituteTemplate(self.configuration_header, {
'configuration_name': self.configuration_name
}))
for operation in self.operations:
self.configuration_file.write(SubstituteTemplate(self.configuration_instance, {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name()
}))
self.configuration_file.write(self.configuration_epilogue)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
###################################################################################################
| cutlass-main | tools/library/scripts/conv3d_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Rank K update operation
#
###################################################################################################
#
class Rank2KOperation:
#
def __init__(self, rank_k_kind, arch, tile_description, A, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Rank2K
self.arch = arch
self.tile_description = tile_description
self.rank_k_kind = rank_k_kind
# tensor A and B have same data type and layout
self.A = A
self.B = A
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'syr2k' if self.blas_mode == BlasMode.symmetric else 'her2k'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.C.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.A.alignment, self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % self.A.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitRank2KUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.rank_k_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.rank_k_complex_template = """
// Rank K operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Rank2K<
${element_a}, ${layout_a},
${element_b}, ${layout_b},
${element_c}, ${layout_c}, ${fill_mode},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a},
${transform_b},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'fill_mode': FillModeTag[operation.C.fill_mode],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform],
'transform_b': ComplexTransformTag[operation.B.complex_transform],
'blas_mode': BlasModeTag[operation.blas_mode]
}
rank_k_template = self.rank_k_complex_template if operation.is_complex() else self.rank_k_template
return SubstituteTemplate(rank_k_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitRank2KConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
RankKKind.Universal: EmitRank2KUniversalInstance,
}
self.rank_k_kind_wrappers = {
RankKKind.Universal: 'Rank2KOperation',
}
self.instance_template = {
RankKKind.Universal: """
${compile_guard_start}
manifest.append(new ${rank_k_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by rank_2k_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "rank_2k_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.rank_k_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.rank_k_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'rank_k_kind': self.rank_k_kind_wrappers[operation.rank_k_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| cutlass-main | tools/library/scripts/rank_2k_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a Symm update operation
#
###################################################################################################
#
class SymmOperation:
#
def __init__(self, symm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8, \
blas_mode = BlasMode.symmetric):
self.blas_mode = blas_mode
self.operation_kind = OperationKind.Symm
self.arch = arch
self.tile_description = tile_description
self.symm_kind = symm_kind
# tensor A and B have same data type and layout
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
operation_name = 'symm' if self.blas_mode == BlasMode.symmetric else 'hemm'
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, operation_name)
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)]
)
return "%s" % (ShortLayoutTypeNames[self.A.layout])
#
def side_mode_name(self):
return "%s" % (ShortSideModeNames[self.A.side_mode])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.A.fill_mode])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = self.C.alignment
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'side_mode': self.side_mode_name(),
'fill_mode': self.fill_mode_name(),
'alignment': "%d" % alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitSymmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.symm_template = """
// Symm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Symm<
${element_a}, ${layout_a}, ${side_mode}, ${fill_mode},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.symm_complex_template = """
// Symm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Symm<
${element_a}, ${layout_a}, ${side_mode}, ${fill_mode},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue}
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${blas_mode}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'side_mode': SideModeTag[operation.A.side_mode],
'fill_mode': FillModeTag[operation.A.fill_mode],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(operation.A.alignment),
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'blas_mode': BlasModeTag[operation.blas_mode]
}
symm_template = self.symm_complex_template if operation.is_complex() else self.symm_template
return SubstituteTemplate(symm_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitSymmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
SymmKind.Universal: EmitSymmUniversalInstance,
}
self.symm_kind_wrappers = {
SymmKind.Universal: 'SymmOperation',
}
self.instance_template = {
SymmKind.Universal: """
${compile_guard_start}
manifest.append(new ${symm_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by symm_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "symm_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.symm_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.symm_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'symm_kind': self.symm_kind_wrappers[operation.symm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| cutlass-main | tools/library/scripts/symm_operation.py |
#
# \file generator.py
#
# \brief Generates the CUTLASS Library's instances
#
#
import enum
import os.path
import shutil
import functools
import operator
from library import *
###################################################################################################
#
# Data structure modeling a TRMM operation
#
###################################################################################################
#
class TrmmOperation:
#
def __init__(self, trmm_kind, arch, tile_description, A, B, C, element_epilogue, \
epilogue_functor = EpilogueFunctor.LinearCombination, swizzling_functor = SwizzlingFunctor.Identity8):
self.operation_kind = OperationKind.Trmm
self.arch = arch
self.tile_description = tile_description
self.trmm_kind = trmm_kind
self.A = A
self.B = B
self.C = C
self.element_epilogue = element_epilogue
self.epilogue_functor = epilogue_functor
self.swizzling_functor = swizzling_functor
#
def is_complex(self):
complex_operators = [
MathOperation.multiply_add_complex,
MathOperation.multiply_add_complex_gaussian,
MathOperation.multiply_add_complex_fast_f32
]
return self.tile_description.math_instruction.math_operation in complex_operators
return False
#
def is_planar_complex(self):
# return self.trmm_kind in (TrmmKind.PlanarComplex, TrmmKind.PlanarComplexArray)
return False
#
def accumulator_type(self):
accum = self.tile_description.math_instruction.element_accumulator
if self.is_complex():
return get_complex_from_real(accum)
return accum
#
def short_math_name(self):
if self.tile_description.math_instruction.math_operation == MathOperation.multiply_add_complex_gaussian:
return "g%s" % ShortDataTypeNames[self.accumulator_type()]
return ShortDataTypeNames[self.accumulator_type()]
#
def core_name(self):
''' The basic operation kind is prefixed with a letter indicating the accumulation type. '''
inst_shape = ''
inst_operation = ''
intermediate_type = ''
math_operations_map = {
MathOperation.xor_popc: 'xor',
MathOperation.and_popc: 'and'
}
if self.tile_description.math_instruction.opcode_class == OpcodeClass.TensorOp or \
self.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp:
math_op = self.tile_description.math_instruction.math_operation
math_op_string = math_operations_map[math_op] if math_op in math_operations_map.keys() else ''
inst_shape = "%d%d%d" % tuple(self.tile_description.math_instruction.instruction_shape)
inst_shape += math_op_string
if self.tile_description.math_instruction.element_a != self.A.element and \
self.tile_description.math_instruction.element_a != self.tile_description.math_instruction.element_accumulator:
intermediate_type = DataTypeNames[self.tile_description.math_instruction.element_a]
return "%s%s%s%s" % (self.short_math_name(), inst_shape, intermediate_type, TrmmKindNames[self.trmm_kind])
#
def extended_name(self):
''' Append data types if they differ from compute type. '''
if self.is_complex():
extended_name = "${core_name}"
else:
if self.C.element != self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${element_c}_${core_name}_${element_a}"
elif self.C.element == self.tile_description.math_instruction.element_accumulator and \
self.A.element != self.tile_description.math_instruction.element_accumulator:
extended_name = "${core_name}_${element_a}"
else:
extended_name = "${core_name}"
extended_name = SubstituteTemplate(extended_name, {
'element_a': DataTypeNames[self.A.element],
'element_c': DataTypeNames[self.C.element],
'core_name': self.core_name()
})
return extended_name
#
def layout_name(self):
if self.is_complex() or self.is_planar_complex():
return "%s%s" % (
ShortComplexLayoutNames[(self.A.layout, self.A.complex_transform)],
ShortComplexLayoutNames[(self.B.layout, self.B.complex_transform)]
)
return "%s%s" % (ShortLayoutTypeNames[self.A.layout], ShortLayoutTypeNames[self.B.layout])
#
def side_mode_name(self):
return "%s" % (ShortSideModeNames[self.A.side_mode])
#
def fill_mode_name(self):
return "%s" % (ShortFillModeNames[self.A.fill_mode])
#
def diag_type_name(self):
return "%s" % (ShortDiagTypeNames[self.A.diag_type])
#
def procedural_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
threadblock = self.tile_description.procedural_name()
opcode_class_name = OpcodeClassNames[self.tile_description.math_instruction.opcode_class]
alignment = max([self.C.alignment])
return SubstituteTemplate(
"cutlass_${opcode_class}_${extended_name}_${threadblock}_${layout}_${side_mode}_${fill_mode}_${diag_type}_align${alignment}",
{
'opcode_class': opcode_class_name,
'extended_name': self.extended_name(),
'threadblock': threadblock,
'layout': self.layout_name(),
'side_mode': self.side_mode_name(),
'fill_mode': self.fill_mode_name(),
'diag_type': self.diag_type_name(),
'alignment': "%d" % self.C.alignment,
}
)
#
def configuration_name(self):
''' The full procedural name indicates architecture, extended name, tile size, and layout. '''
return self.procedural_name()
###################################################################################################
#
# Emits single instances of a CUTLASS device-wide operator
#
###################################################################################################
#
class EmitTrmmUniversalInstance:
''' Responsible for emitting a CUTLASS template definition'''
def __init__(self):
self.trmm_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation}
>;
"""
self.trmm_complex_template = """
// Trmm operator ${operation_name}
using Operation_${operation_name} =
typename cutlass::gemm::device::Trmm<
${element_a}, ${layout_a},
${side_mode}, ${fill_mode}, ${diag_type},
${element_b}, ${layout_b},
${element_c}, ${layout_c},
${element_accumulator},
${opcode_class},
${arch},
cutlass::gemm::GemmShape<${threadblock_shape_m}, ${threadblock_shape_n}, ${threadblock_shape_k}>,
cutlass::gemm::GemmShape<${warp_shape_m}, ${warp_shape_n}, ${warp_shape_k}>,
cutlass::gemm::GemmShape<${instruction_shape_m}, ${instruction_shape_n}, ${instruction_shape_k}>,
${epilogue_functor}<
${element_c},
${epilogue_vector_length},
${element_accumulator},
${element_epilogue},
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
${swizzling_functor},
${stages},
${align_a},
${align_b},
${split_k_serial},
${math_operation},
${transform_a}
>;
"""
def emit(self, operation):
threadblock_shape = operation.tile_description.threadblock_shape
warp_count = operation.tile_description.warp_count
warp_shape = [threadblock_shape[idx] // warp_count[idx] for idx in range(3)]
epilogue_vector_length = int(min(operation.C.alignment * DataTypeSize[operation.C.element], 128) / DataTypeSize[operation.C.element])
values = {
'operation_name': operation.procedural_name(),
'element_a': DataTypeTag[operation.A.element],
'layout_a': LayoutTag[operation.A.layout],
'side_mode' : SideModeTag[operation.A.side_mode],
'fill_mode': FillModeTag[operation.A.fill_mode],
'diag_type' : DiagTypeTag[operation.A.diag_type],
'element_b': DataTypeTag[operation.B.element],
'layout_b': LayoutTag[operation.B.layout],
'element_c': DataTypeTag[operation.C.element],
'layout_c': LayoutTag[operation.C.layout],
'element_accumulator': DataTypeTag[operation.accumulator_type()],
'opcode_class': OpcodeClassTag[operation.tile_description.math_instruction.opcode_class],
'arch': "cutlass::arch::Sm%d" % operation.arch,
'threadblock_shape_m': str(operation.tile_description.threadblock_shape[0]),
'threadblock_shape_n': str(operation.tile_description.threadblock_shape[1]),
'threadblock_shape_k': str(operation.tile_description.threadblock_shape[2]),
'warp_shape_m': str(warp_shape[0]),
'warp_shape_n': str(warp_shape[1]),
'warp_shape_k': str(warp_shape[2]),
'instruction_shape_m': str(operation.tile_description.math_instruction.instruction_shape[0]),
'instruction_shape_n': str(operation.tile_description.math_instruction.instruction_shape[1]),
'instruction_shape_k': str(operation.tile_description.math_instruction.instruction_shape[2]),
'epilogue_vector_length': str(epilogue_vector_length),
'element_epilogue': str(DataTypeTag[operation.element_epilogue]),
'epilogue_functor': EpilogueFunctorTag[operation.epilogue_functor],
'swizzling_functor': SwizzlingFunctorTag[operation.swizzling_functor],
'stages': str(operation.tile_description.stages),
'align_a': str(1), # TRMM A's alignment is always 1 for no padding to work until we make zfill work with variable bytes
'align_b': str(operation.B.alignment),
'split_k_serial': 'false',
'math_operation': MathOperationTag[operation.tile_description.math_instruction.math_operation],
'transform_a': ComplexTransformTag[operation.A.complex_transform]
}
trmm_template = self.trmm_complex_template if operation.is_complex() else self.trmm_template
return SubstituteTemplate(trmm_template, values)
###################################################################################################
###################################################################################################
#
# Emitters functions for all targets
#
###################################################################################################
class EmitTrmmConfigurationLibrary:
def __init__(self, operation_path, configuration_name):
self.configuration_name = configuration_name
self.configuration_path = os.path.join(operation_path, "%s.cu" % configuration_name).replace('\\', '/')
self.instance_emitter = {
TrmmKind.Universal: EmitTrmmUniversalInstance,
}
self.trmm_kind_wrappers = {
TrmmKind.Universal: 'TrmmOperation',
}
self.instance_template = {
TrmmKind.Universal: """
${compile_guard_start}
manifest.append(new ${trmm_kind}<
Operation_${operation_name}
>("${operation_name}"));
${compile_guard_end}
"""
}
self.header_template = """
/*
Generated by trmm_operation.py - Do not edit.
*/
///////////////////////////////////////////////////////////////////////////////////////////////////
#include "cutlass/cutlass.h"
#include "cutlass/library/library.h"
#include "cutlass/library/manifest.h"
#include "library_internal.h"
#include "trmm_operation.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
self.initialize_function_template = """
///////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace library {
///////////////////////////////////////////////////////////////////////////////////////////////////
void initialize_${configuration_name}(Manifest &manifest) {
"""
self.epilogue_template = """
}
///////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace library
} // namespace cutlass
///////////////////////////////////////////////////////////////////////////////////////////////////
"""
def __enter__(self):
self.configuration_file = open(self.configuration_path, "w")
self.configuration_file.write(self.header_template)
self.instance_definitions = []
self.instance_wrappers = []
self.operations = []
return self
def emit(self, operation):
emitter = self.instance_emitter[operation.trmm_kind]()
self.operations.append(operation)
self.instance_definitions.append(emitter.emit(operation))
self.instance_wrappers.append(SubstituteTemplate(self.instance_template[operation.trmm_kind], {
'configuration_name': self.configuration_name,
'operation_name': operation.procedural_name(),
'trmm_kind': self.trmm_kind_wrappers[operation.trmm_kind],
'compile_guard_start': SubstituteTemplate(self.wmma_guard_start, {'sm_number': str(operation.arch)}) \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else "",
'compile_guard_end': "#endif" \
if operation.tile_description.math_instruction.opcode_class == OpcodeClass.WmmaTensorOp else ""
}))
def __exit__(self, exception_type, exception_value, traceback):
# Write instance definitions in top-level namespace
for instance_definition in self.instance_definitions:
self.configuration_file.write(instance_definition)
# Add wrapper objects within initialize() function
self.configuration_file.write(SubstituteTemplate(self.initialize_function_template, {
'configuration_name': self.configuration_name
}))
for instance_wrapper in self.instance_wrappers:
self.configuration_file.write(instance_wrapper)
self.configuration_file.write(self.epilogue_template)
self.configuration_file.close()
###################################################################################################
| cutlass-main | tools/library/scripts/trmm_operation.py |
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# this file creates the test/unit/gemm/device simt tests
outputDir = ""
################################################################################
# parameters
# Edge - for tiles, the edges represent the length of one side
# Ratio - the maximum ratio between 2 edges, limits the skinnyness of tiles
# MaxEdge - maximum length of each edge
# Min/Max - minimum/maximum of the product of edge lengths
################################################################################
warpsPerThreadblockEdge = [1, 2, 4, 8, 16]
warpsPerThreadblockRatio = 2
warpsPerThreadblockMax = 16
# NOTE 1x32 and 2x16 warp tile shapes fail validation for ~10% of cases
warpShapeEdges = [8, 16, 32, 64, 128, 256]
warpShapeRatio = 4
warpShapeMax = 64*64
warpShapeMin = 8*8
threadblockEdgeMax = 256
# char, type bits/elem, max tile, L0 threadblock tiles
precisions = [
["c", "cutlass::complex<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["q", "cutlass::Quaternion<float>", 64, 64*128, [ [ 64, 128], [ 64, 32] ] ],
["d", "double", 64, 64*64, [ [ 64, 64], [ 32, 32] ] ],
["h", "cutlass::half_t", 16, 128*256, [ [256, 128], [ 64, 128], [ 64, 32] ] ],
["i", "int", 32, 128*128, [ [128, 64], [ 16, 32] ] ],
["s", "float", 32, 128*128, [ [128, 256], [128, 128], [ 64, 64] ] ],
["z", "cutlass::complex<double>", 128, 64*64, [ [ 32, 64], [ 16, 32] ] ],
]
# L1 will have a single kernel for every unique shape
# L2 will have everything else
transposes = [
[False, False],
[False, True],
[True, False],
[True, True]
]
################################################################################
# warps per threadblock
################################################################################
warpsPerThreadblocks = []
for warpsPerThreadblock0 in warpsPerThreadblockEdge:
for warpsPerThreadblock1 in warpsPerThreadblockEdge:
if warpsPerThreadblock0 / warpsPerThreadblock1 <= warpsPerThreadblockRatio and warpsPerThreadblock1 / warpsPerThreadblock0 <= warpsPerThreadblockRatio and warpsPerThreadblock0 * warpsPerThreadblock1 <= warpsPerThreadblockMax:
warpsPerThreadblocks.append([warpsPerThreadblock0,
warpsPerThreadblock1])
print("WarpsPerThreadblocks",warpsPerThreadblocks)
################################################################################
# warp shapes
################################################################################
warpNumThreads = 32
warpShapes = []
for warp0 in warpShapeEdges:
for warp1 in warpShapeEdges:
if warp0 / warp1 <= warpShapeRatio and warp1 / warp0 <= warpShapeRatio and warp0*warp1 <= warpShapeMax and warp0*warp1 > warpShapeMin:
warpShapes.append([warp0, warp1])
print("WarpShapes", warpShapes)
numL0 = 0
numL1 = 0
numL2 = 0
################################################################################
# create kernels
# create a file for each precision/transpose
# each file contains many tile sizes
################################################################################
# precisions
for precision in precisions:
# get precision char
precisionChar = precision[0]
precisionType = precision[1]
precisionBits = precision[2]
threadblockMaxElements = precision[3]
threadblockTilesL0 = precision[4]
# transposes
for transpose in transposes:
# get transpose char
columnMajorA = transpose[0]
columnMajorB = transpose[1]
transCharA = "n" if columnMajorA else "t"
transCharB = "n" if columnMajorB else "t"
# open file
fileName="simt_%sgemm_%s%s_sm50.cu" % (precisionChar, transCharA, transCharB)
print("\n", fileName)
filePath = "%s%s" % (outputDir, fileName)
out = open(filePath, "w+")
# write file header
out.write("/***************************************************************************************************\n"
" * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. \n"
" * SPDX-License-Identifier: BSD-3-Clause \n"
" * \n"
" * Redistribution and use in source and binary forms, with or without \n"
" * modification, are permitted provided that the following conditions are met: \n"
" * \n"
" * 1. Redistributions of source code must retain the above copyright notice, this \n"
" * list of conditions and the following disclaimer. \n"
" * \n"
" * 2. Redistributions in binary form must reproduce the above copyright notice, \n"
" * this list of conditions and the following disclaimer in the documentation \n"
" * and/or other materials provided with the distribution. \n"
" * \n"
" * 3. Neither the name of the copyright holder nor the names of its \n"
" * contributors may be used to endorse or promote products derived from \n"
" * this software without specific prior written permission. \n"
" * \n"
" * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n"
" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n"
" * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE \n"
" * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE \n"
" * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL \n"
" * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR \n"
" * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER \n"
" * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, \n"
" * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE \n"
" * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \n"
" *\n"
" **************************************************************************************************/\n"
"/*! \\file\n"
" \\brief Tests for device-wide GEMM interface\n"
"*/\n"
"\n"
"#include <iostream>\n"
"\n"
"#include \"cutlass/cutlass.h\"\n"
"#include \"cutlass/gemm/device/gemm.h\"\n"
"#include \"cutlass/numeric_types.h\"\n"
"\n"
"#include \"../../common/cutlass_unit_test.h\"\n"
"\n"
"#include \"cutlass/util/host_tensor.h\"\n"
"#include \"cutlass/util/tensor_view_io.h\"\n"
"#include \"cutlass/util/reference/host/tensor_fill.h\"\n"
"#include \"cutlass/util/reference/host/tensor_copy.h\"\n"
"#include \"cutlass/util/reference/host/tensor_compare.h\"\n"
"#include \"cutlass/util/reference/host/gemm.h\"\n"
"\n"
"#include \"testbed.h\"\n"
"\n")
foundThreadblockTilesL0 = {}
foundThreadblockTilesL1 = {}
########################################################################
# for each combination of tile sizes
########################################################################
for warpsPerThreadblock in warpsPerThreadblocks:
for warpShape in warpShapes:
warpThreadsM = 0
if warpShape[0] > warpShape[1]:
warpThreadsM = 8
else:
warpThreadsM = 4
warpThreadsN = warpNumThreads / warpThreadsM
# skip shapes with conflicting rectangularity
# they are unlikely to be fastest
blockG = warpsPerThreadblock[0] > warpsPerThreadblock[1]
blockL = warpsPerThreadblock[0] < warpsPerThreadblock[1]
warpG = warpShape[0] > warpShape[1]
warpL = warpShape[0] < warpShape[1]
blockG2 = warpsPerThreadblock[0] > warpsPerThreadblock[1]*2
blockL2 = warpsPerThreadblock[0]*2 < warpsPerThreadblock[1]
warpG2 = warpShape[0] > warpShape[1]*2
warpL2 = warpShape[0]*2 < warpShape[1]
if blockG2 and warpL: continue
if blockL2 and warpG: continue
if warpG2 and blockL: continue
if warpL2 and blockG: continue
# check threadblock ratios and max
threadblockTile = [warpShape[0]*warpsPerThreadblock[0],
warpShape[1]*warpsPerThreadblock[1]]
if threadblockTile[0] * threadblockTile[1] > threadblockMaxElements: continue
if threadblockTile[0] > threadblockEdgeMax: continue
if threadblockTile[1] > threadblockEdgeMax: continue
totalThreads = warpNumThreads*warpsPerThreadblock[0]*warpsPerThreadblock[1]
# calculate unroll
# ensure that every iteration at least a full load of A,B are done
unrollMin = 8
unrollMin0 = totalThreads / threadblockTile[0]
unrollMin1 = totalThreads / threadblockTile[1]
unroll = max(unrollMin, unrollMin0, unrollMin1)
threadTileM = warpShape[0] / warpThreadsM
threadTileN = warpShape[1] / warpThreadsN
if threadTileM < 2 or threadTileN < 2: continue
if threadTileM*threadTileN*precisionBits > 8*8*32: continue
# epilogue currently only supports N < WarpNumThreads
if threadblockTile[1] < warpNumThreads: continue
# limit smem
smemBitsA = threadblockTile[0]*unroll*2*precisionBits
smemBitsB = threadblockTile[1]*unroll*2*precisionBits
smemKBytes = (smemBitsA+smemBitsB)/8/1024
if (smemKBytes > 48): continue
# test level 0
testLevel = -1
for tileId in range(0, len(threadblockTilesL0)):
tbTile = threadblockTilesL0[tileId]
if tbTile[0] == threadblockTile[0] and tbTile[1] == threadblockTile[1]:
if tuple(tbTile) not in foundThreadblockTilesL0:
testLevel = 0
numL0 += 1
foundThreadblockTilesL0[tuple(tbTile)] = True
# test level 1
if testLevel < 0:
threadblockTileAlreadyUsed = False
if tuple(threadblockTile) not in foundThreadblockTilesL1:
testLevel = 1
numL1 += 1
foundThreadblockTilesL1[tuple(threadblockTile)] = True
# test level 2
if testLevel < 0:
testLevel = 2
numL2 += 1
################################################################
# write this tile to file
################################################################
print("%ix%ix%i__%ix%i_%ix%i_%ix%i L%i" % (
threadblockTile[0], threadblockTile[1], unroll,
threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1], testLevel))
out.write("////////////////////////////////////////////////////////////////////////////////\n"
"// Elements / Thread: %3i x %3i\n"
"// Threads / Warp: %3i x %3i\n"
"// Warps / Block: %3i x %3i\n"
"// Threadblock: %3i x %3i x %2i\n"
% ( threadTileM, threadTileN,
warpThreadsM, warpThreadsN,
warpsPerThreadblock[0], warpsPerThreadblock[1],
threadblockTile[0], threadblockTile[1], unroll
)
)
out.write("CUTLASS_TEST_L%i(SM50_device_%sgemm_%s%s, %ix%ix%i_%ix%ix1_%ix%i_%ix%i_%ix%i, {\n" % (
testLevel,
precisionChar,
transCharA,
transCharB,
threadblockTile[0],
threadblockTile[1],
unroll,
warpShape[0],
warpShape[1],
threadTileM,
threadTileN,
warpThreadsM,
warpThreadsN,
warpsPerThreadblock[0],
warpsPerThreadblock[1]
))
out.write(" using precision = %s;\n" % precisionType)
out.write(" using ThreadblockShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n" % (
threadblockTile[0],
threadblockTile[1],
unroll))
out.write(" using WarpShape = cutlass::gemm::GemmShape<%i, %i, %i>;\n\n" % (
warpShape[0],
warpShape[1],
unroll))
out.write(" static int const kEpilogueElementsPerAccess = 1;\n"
" using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;\n"
" using EpilogueOutputOp = cutlass::epilogue::thread::LinearCombination<\n"
" precision, kEpilogueElementsPerAccess, precision, precision>;\n\n")
out.write(" using Gemm = cutlass::gemm::device::Gemm<\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::%sMajor,\n"
" precision, cutlass::layout::RowMajor,\n"
" precision,\n"
" cutlass::arch::OpClassSimt,\n"
" cutlass::arch::Sm50,\n"
" ThreadblockShape, WarpShape, InstructionShape,\n"
" EpilogueOutputOp,\n"
" cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,\n"
" 2 // Stages\n"
" >;\n" % (
"Column" if columnMajorA else "Row",
"Column" if columnMajorB else "Row",
))
out.write(" EXPECT_TRUE(test::gemm::device::TestAllGemm<Gemm>());\n"
"} )\n\n")
out.close()
print("NumKernels:", numL0, numL1, numL2)
| cutlass-main | test/unit/gemm/device/simt_sm50.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests the high-level GEMM interface
"""
from math import ceil
import unittest
import cutlass
import cutlass_bindings
import cutlass.utils.datatypes as datatypes
from cutlass.backend.utils.device import device_cc
from utils import ExpectException
class GemmEquivalence:
"""
Helper class for testing the equivalence of different constructions of the Gemm interface
"""
def __init__(self, element_A, element_B, element_C, element_D, element_accumulator,
layout_A, layout_B, layout_C, alignment_A, alignment_B, alignment_C):
self.element_A = element_A
self.element_B = element_B
self.element_C = element_C
self.element_D = element_D
self.element_accumulator = element_accumulator
self.layout_A = layout_A
self.layout_B = layout_B
self.layout_C = layout_C
self.alignment_A = alignment_A
self.alignment_B = alignment_B
self.alignment_C = alignment_C
self.plan = cutlass.op.Gemm(element_A=element_A, element_B=element_B, element_C=element_C,
element_D=element_D, element_accumulator=element_accumulator,
layout_A=layout_A, layout_B=layout_B, layout_C=layout_C)
self.op = self.plan.construct(alignment_A=alignment_A, alignment_B=alignment_B, alignment_C=alignment_C)
def _plans_equal(self, other_plan) -> bool:
"""
Compares whether two plans are equal
:param other_plan: plan to compare against the default GEMM
:type other_plan: cutlass.op.Gemm
:return: whether `other_plan` is equivalent to `self.plan`
:rtype: bool
"""
other_op = other_plan.construct(alignment_A=self.alignment_A, alignment_B=self.alignment_B, alignment_C=self.alignment_C)
# Compare whether the operations are equal by comparing the C++ code that would be emitted for them
return self.op.rt_module.emit() == other_op.rt_module.emit()
def generic_test(self):
"""
Tests the equivalence of various constructions of the Gemm interface when using CUTLASS data types
and layouts for constructing the Gemm interface
"""
if not datatypes.numpy_available:
return
# Test when specifying all parameters
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_A=self.layout_A, layout_B=self.layout_B, layout_C=self.layout_C)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A
plan_other = cutlass.op.Gemm(element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_B=self.layout_B, layout_C=self.layout_C,
element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A and B as tensors and using generic element and output
# Only run this test if the layouts and types for A and B are equal.
if self.element_A == self.element_B and self.layout_A == self.layout_B:
plan_other = cutlass.op.Gemm(element_C=self.element_C, element_D=self.element_D, element_accumulator=self.element_accumulator,
layout_C=self.layout_C, element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if self.element_C == self.element_accumulator:
plan_other = cutlass.op.Gemm(element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, layout_A=self.layout_A, layout_B=self.layout_B,
layout_C=self.layout_C)
assert self._plans_equal(plan_other)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D
and self.element_A == self.element_accumulator and
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
plan_other = cutlass.op.Gemm(element=self.element_A, layout=self.layout_A)
assert self._plans_equal(plan_other)
def numpy_test(self):
"""
Tests the equivalence of various constructions of the Gemm interface when using numpy as a frontend
"""
if not datatypes.numpy_available:
return
import numpy as np
type_A = datatypes.numpy_type(self.element_A)
type_B = datatypes.numpy_type(self.element_B)
type_C = datatypes.numpy_type(self.element_C)
type_D = datatypes.numpy_type(self.element_D)
type_accum = datatypes.numpy_type(self.element_accumulator)
layout_to_order = {
cutlass.LayoutType.RowMajor: 'C',
cutlass.LayoutType.ColumnMajor: 'F'
}
size = (2, 2)
A = np.zeros(size, order=layout_to_order[self.layout_A], dtype=type_A)
B = np.zeros(size, order=layout_to_order[self.layout_B], dtype=type_B)
C = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_C)
D = np.zeros(size, order=layout_to_order[self.layout_C], dtype=type_D)
# Test when specifying all parameters via tensors
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D, element_accumulator=type_accum)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A as tensors
plan_np = cutlass.op.Gemm(B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A, layout_A=self.layout_A)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A and B as tensors and using generic element and output
# Only run this test if the layouts and types for A and B are equal.
if type_A == type_B and self.layout_A == self.layout_B:
plan_np = cutlass.op.Gemm(C=C, D=D, element_accumulator=type_accum, element=type_A, layout=self.layout_A)
assert self._plans_equal(plan_np)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if type_C == type_accum:
plan_np = cutlass.op.Gemm(A=A, B=B, C=C, D=D)
assert self._plans_equal(plan_np)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum and
self.layout_A == self.layout_B and self.layout_A == self.layout_C):
plan_np = cutlass.op.Gemm(element=type_A, layout=self.layout_A)
assert self._plans_equal(plan_np)
def test_all(self):
"""
Runs all tests on the Gemm interface
"""
self.generic_test()
self.numpy_test()
class GemmEquivalenceTest(unittest.TestCase):
"""
Tests the equivalence of different constructions of the Gemm interface
"""
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_8_8_8(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f32_ntn_8_8_8(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f32,
layout_A=cutlass.LayoutType.ColumnMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.ColumnMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for FP16 Tensor Core tests.")
def test_gemm_equivalence_f16_f16_f16_f16_f16_ttt_4_4_4(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f16, element_B=cutlass.DataType.f16, element_C=cutlass.DataType.f16,
element_D=cutlass.DataType.f16, element_accumulator=cutlass.DataType.f16,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.RowMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=8, alignment_B=8, alignment_C=8)
gemm_eq.test_all()
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for F64 Tensor Core tests.")
def test_gemm_equivalence_f64_f64_f64_f64_f64_tnt_1_1_1(self):
gemm_eq = GemmEquivalence(
element_A=cutlass.DataType.f64, element_B=cutlass.DataType.f64, element_C=cutlass.DataType.f64,
element_D=cutlass.DataType.f64, element_accumulator=cutlass.DataType.f64,
layout_A=cutlass.LayoutType.RowMajor, layout_B=cutlass.LayoutType.ColumnMajor, layout_C=cutlass.LayoutType.RowMajor,
alignment_A=1, alignment_B=1, alignment_C=1)
gemm_eq.test_all()
class GemmErrorTests(unittest.TestCase):
"""
Tests various error scenarios that arise with the high-level Gemm interface
"""
def test_alignment(self):
"""
Tests case in which the alignment specified is unsupported
"""
plan = cutlass.op.Gemm(element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
with ExpectException(True, 'Alignment 16 is not supported for F16. The construction should fail.'):
op = plan.construct(alignment_A=16, alignment_B=16, alignment_C=16)
def test_tensorop_availability(self):
"""
Tests case in which only SIMT operations are available but TensorOp is requested
"""
cc = device_cc()
# F64 Tensor Core operations are only avaiable on devices with CC >= 80
supports_tensorop_f64 = cc >= 80
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f64, layout=cutlass.LayoutType.RowMajor)
error_msg = f'Incorrectly raised an exception for availability of TensorOp with F64 operands on SM{cc}'
with ExpectException(not supports_tensorop_f64, error_msg):
plan.opclass = cutlass.OpcodeClass.TensorOp
expected_opclass = cutlass.OpcodeClass.TensorOp if supports_tensorop_f64 else cutlass.OpcodeClass.Simt
assert plan.opclass == expected_opclass, f'Expected opclass to be {expected_opclass}, but received {plan.opclass} for SM{cc}'
@unittest.skipIf(device_cc() < 70, "Device compute capability is insufficient for F16 Tensor Core tests.")
def test_opclass_switch(self):
"""
Tests cases in which the opcode class in question is switched (e.g., from TensorOp to SIMT)
"""
plan = cutlass.op.Gemm( element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
assert plan.opclass == cutlass.OpcodeClass.TensorOp
# Ensure that all tile descriptions have opclass of TensorOp
for td in plan.tile_descriptions():
assert td.math_instruction.opcode_class == cutlass_bindings.OpClass.TensorOp
plan.opclass = cutlass.OpcodeClass.Simt
# Ensure that all tile descriptions have opclass of Simt
for td in plan.tile_descriptions():
assert td.math_instruction.opcode_class == cutlass_bindings.OpClass.Simt
def test_invalid_tile_description(self):
"""
Tests scenarios in which an invalid tile description is provided for a given CC
"""
cc = device_cc()
plan = cutlass.op.Gemm(cc=cc, element=cutlass.DataType.f16, layout=cutlass.LayoutType.RowMajor)
td = plan.tile_descriptions()[0]
stages = td.stages
# Zero stage count is valid for SM90+, as this is used to indicate that the builder's auto stage
# count should be used
with ExpectException(cc < 90, f'Requested zero stages'):
td.stages = 0
plan.construct(td)
if cc < 90:
with ExpectException(cc < 80, f'Requested more than 2 stages on SM{cc}'):
td.stages = 3
plan.construct(td)
else:
original_kschedule = td.kernel_schedule
original_eschedule = td.epilogue_schedule
with ExpectException(False, f'Incorrectly flagged an error for insufficient shared memory'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.NoSmemWarpSpecialized
td.stages = 3
plan.construct(td)
# Reset schedules
td.kernel_schedule = original_kschedule
td.epilogue_schedule = original_eschedule
with ExpectException(True, f'Requested too many stages'):
td.stages = 100
plan.construct(td)
# Reset stage count
td.stages = stages
cluster_shape = td.cluster_shape
with ExpectException(cc < 90, f'Requested non-unit cluster shape on SM{cc}'):
td.cluster_shape = [2, 1, 1]
plan.construct(td)
# Reset cluster shape
td.cluster_shape = cluster_shape
with ExpectException(cc < 90, f'Requested a non-auto schedule on SM{cc}'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecialized
plan.construct(td)
with ExpectException(True, f'Requested a non-auto kernel schedule with an auto epilogue schedule'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedPingpong
td.epilogue_schedule = cutlass.EpilogueScheduleType.ScheduleAuto
plan.construct(td)
with ExpectException(True, f'Requested an auto kernel schedule with a non-auto epilogue schedule'):
td.kernel_schedule = cutlass.KernelScheduleType.ScheduleAuto
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecialized
plan.construct(td)
with ExpectException(cc < 90, f'Requested a tile scheduler on SM{cc}'):
td.kernel_schedule = cutlass.KernelScheduleType.TmaWarpSpecializedCooperative
td.epilogue_schedule = cutlass.EpilogueScheduleType.TmaWarpSpecializedCooperative
td.tile_scheduler = cutlass.TileSchedulerType.StreamK
plan.construct(td)
# Ensure that all returned tile descriptions are unique
ops = {}
for i, td in enumerate(plan.tile_descriptions()):
op = plan.construct(td)
code_str = op.rt_module.emit()
if code_str in ops:
conflicting_td = ops[code_str]
assert False, f'Multiple tile descriptions emitted {code_str}\nTile descriptions are:\n{td}\n{conflicting_td}'
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/interface/gemm_interface.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Tests the high-level Conv2d interface
"""
from math import ceil
import unittest
import cutlass
import cutlass_bindings
import cutlass.utils.datatypes as datatypes
from cutlass.backend.utils.device import device_cc
from utils import ExpectException
import os
class Conv2dEquivalence:
"""
Helper class for testing the equivalence of different constructions of the Conv2d interface
"""
def __init__(self, conv_kind, element_A, element_B, element_C, element_D, element_accumulator,
alignment_A, alignment_B, alignment_C):
self.element_A = element_A
self.element_B = element_B
self.element_C = element_C
self.element_D = element_D
self.element_accumulator = element_accumulator
self.alignment_A = alignment_A
self.alignment_B = alignment_B
self.alignment_C = alignment_C
self.conv_kind = conv_kind
self.plan = cutlass.op.Conv2d(
kind=self.conv_kind, element_A=element_A, element_B=element_B, element_C=element_C,
element_D=element_D, element_accumulator=element_accumulator)
self.op = self.plan.construct(
alignment_A=self.alignment_A, alignment_B=self.alignment_B,
alignment_C=self.alignment_C)
def _plans_equal(self, other_plan) -> bool:
"""
Compares whether two plans are equal
:param other_plan: plan to compare against the default Conv2d
:type other_plan: cutlass.op.Conv2d
:return: whether `other_plan` is equivalent to `self.plan`
:rtype: bool
"""
other_op = other_plan.construct(
alignment_A=self.alignment_A, alignment_B=self.alignment_B,
alignment_C=self.alignment_C)
return self.op.rt_module.emit() == other_op.rt_module.emit()
def generic_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using CUTLASS data types
and layouts for constructing the Conv2d interface
"""
if not datatypes.numpy_available:
return
# Test when specifying all parameters
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_A=self.element_A, element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_B=self.element_B, element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test when specifying all parameters but A and B as tensors using generic element and output
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_C=self.element_C,
element_D=self.element_D, element_accumulator=self.element_accumulator,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test without explicit accumulator. Only run if the type of C and the accumulator are equal
if self.element_C == self.element_accumulator:
plan_other = cutlass.op.Conv2d(
kind=self.conv_kind,
element_C=self.element_C,
element_D=self.element_D,
element=self.element_A)
assert self._plans_equal(plan_other)
# Test with only the generic types. Only rune if the types of A, B, C, and D are the same
if (self.element_A == self.element_B and self.element_A == self.element_C and self.element_A == self.element_D
and self.element_A == self.element_accumulator):
plan_other = cutlass.op.Conv2d(kind=self.conv_kind, element=self.element_A)
assert self._plans_equal(plan_other)
def numpy_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using numpy as a frontend
"""
if not datatypes.numpy_available:
return
import numpy as np
type_A = datatypes.numpy_type(self.element_A)
type_B = datatypes.numpy_type(self.element_B)
type_C = datatypes.numpy_type(self.element_C)
type_D = datatypes.numpy_type(self.element_D)
type_accum = datatypes.numpy_type(self.element_accumulator)
size = (2, 2)
A = np.zeros(size, dtype=type_A)
B = np.zeros(size, dtype=type_B)
C = np.zeros(size, dtype=type_C)
D = np.zeros(size, dtype=type_D)
return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D)
def torch_test(self):
"""
Tests the equivalence of various constructions of the Conv2d interface when using torch as a frontend
"""
if not datatypes.torch_available:
return
import torch
type_A = datatypes.torch_type(self.element_A)
type_B = datatypes.torch_type(self.element_B)
type_C = datatypes.torch_type(self.element_C)
type_D = datatypes.torch_type(self.element_D)
type_accum = datatypes.torch_type(self.element_accumulator)
size = (2, 2)
A = torch.empty(size, dtype=type_A)
B = torch.empty(size, dtype=type_B)
C = torch.empty(size, dtype=type_C)
D = torch.empty(size, dtype=type_D)
return self.tensor_test(type_A, type_B, type_C, type_D, type_accum, A, B, C, D)
def tensor_test(self, type_A, type_B, type_C, type_D, type_accum, A, B, C, D):
# Test when specifying all parameters via tensors
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D, element_accumulator=type_accum)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A as tensors
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, B=B, C=C, D=D, element_accumulator=type_accum, element_A=type_A)
assert self._plans_equal(plan_np)
# Test when specifying all parameters but A and B as tensors and using generic element and output
if type_A == type_B:
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, C=C, D=D, element_accumulator=type_accum, element=type_A)
assert self._plans_equal(plan_np)
# Test without explicit accumulator. Only run if the type of C and the accumulator.
if type_C == type_accum:
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, A=A, B=B, C=C, D=D)
assert self._plans_equal(plan_np)
# Test with only the generic types and layouts. Only run if types and layouts of A, B, C, and D are the same.
if (type_A == type_B and type_A == type_C and type_A == type_D and type_A == type_accum):
plan_np = cutlass.op.Conv2d(kind=self.conv_kind, element=type_A)
assert self._plans_equal(plan_np)
def test_all(self):
"""
Runs all tests on the Gemm interface
"""
self.generic_test()
self.numpy_test()
self.torch_test()
@unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.')
class ConvEquivalenceTest(unittest.TestCase):
"""
Tests the equivalence of different constructions of the Conv2d interface
"""
pass
type2alignment = {
cutlass.DataType.f16: 8,
cutlass.DataType.f32: 4
}
def add_test(conv_kind, element_A, element_B, element_C, element_D, element_accumulator):
test_name = f"test_conv2d_{conv_kind}_{element_A}_{element_B}_{element_C}_{element_D}_{element_accumulator}"
def run(self):
conv2d_eq = Conv2dEquivalence(
conv_kind=conv_kind,
element_A=element_A, element_B=element_B,
element_C=element_C, element_D=element_D,
element_accumulator=element_accumulator,
alignment_A=type2alignment[element_A], alignment_B=type2alignment[element_B],
alignment_C=type2alignment[element_C]
)
conv2d_eq.test_all()
setattr(ConvEquivalenceTest, test_name, run)
for conv_kind in ["fprop", "wgrad", "dgrad"]:
for types in [
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f16],
[cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32],
[cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32]
]:
add_test(conv_kind, types[0], types[1], types[2], types[3], types[4])
@unittest.skipIf(device_cc() <= 80, 'Device compute capability is insufficient for SM80 tests.')
class Conv2dErrorTests(unittest.TestCase):
"""
Tests various error scenarios that arise with the high-level Gemm interface
"""
def test_alignment(self):
"""
Tests case in which the alignment specified is unsupported
"""
plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16)
with ExpectException(True, 'Alignment 3 is not supported for F16. The construction should fail.'):
op = plan.construct(alignment_A=3, alignment_B=3, alignment_C=3)
def test_invalid_tile_description(self):
"""
Tests scenarios in which an invalid tile description is provided for a given CC
"""
plan = cutlass.op.Conv2d(kind="fprop", element=cutlass.DataType.f16)
td = plan.tile_descriptions()[0]
td.threadblock_shape=[17, 32, 5]
plan.tile_description = td
with ExpectException(True, 'The threadblock shape is invalid. The compilation should fail.'):
plan.compile()
# Clean up the error message
os.remove("./cutlass_python_compilation_device_error.txt")
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/interface/conv2d_interface.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Helper functions & classes for interface test
"""
class ExpectException:
"""
Utility class to assert that an exception was raised when expected
Example:
.. highlight:: python
.. code-block:: python
with ExceptionExpected(True, 'Division by zero'):
x = 1.0 / 0.0
:param exception_expected: whether an exception is expected to be raised
:type exception_expected: bool
:param message: message to print if an exception is raised when not expected or vice versa
:type message: str
"""
def __init__(self, exception_expected: bool, message: str = ''):
self.exception_expected = exception_expected
self.message = message
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
exception_raised = exc_type is not None
assert self.exception_expected == exception_raised, self.message
# Suppress the exception
return True
| cutlass-main | test/python/interface/utils.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Low-level functionality tests for Conv2d operands on SM80
"""
from conv2d_test_utils import *
import cutlass
import logging
cutlass.set_log_level(logging.WARNING)
cc = 80
@unittest.skipIf(device_cc() != cc, 'Device compute capability is invalid for SM80 tests.')
class Conv2dSm80(unittest.TestCase):
"""
Wrapper class to which tests will be added dynamically in __main__
"""
pass
conv_problems = get_conv_problems()
# Tests for optimized & analytic
for conv_kind in ["fprop", "wgrad", "dgrad"]:
# F16, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=2, instruction_shape=[1, 1, 1])
# F16, tensor op
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, analytic iterator
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f16, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="analytic")
# F16, tensor op, f32 output
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16])
# F16, tensor op, different tile description
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8])
# F32, simt
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="simt", threadblock_shape=[128, 128, 8],
warp_count=[4, 2, 1], stages=4, instruction_shape=[1, 1, 1])
# Tf32, tensorop
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f32, cutlass.DataType.f32, cutlass.DataType.f32,
opclass="tensor_op", threadblock_shape=[128, 128, 16],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8]
)
# Split-K
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="serial",
split_k_slices=2)
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode="parallel",
split_k_slices=5)
# Swizzling functor
add_test(
Conv2dSm80, cc, conv_kind, conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 64, 32],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 8], swizzle=4)
# Tests for few channels and fixed channels
# F16, tensor op, few channels
for c, tb, stage, inst in zip([2, 1],
[[128, 128, 64], [128, 128, 32]],
[3, 2],
[[16, 8, 16], [16, 8, 8]]):
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=tb,
warp_count=[2, 2, 1], stages=stage, instruction_shape=inst, iterator_algorithm="few_channels"
)
# F16, tensor op, fixed channels
for c in [8, 4, 2]:
add_test(
Conv2dSm80, cc, "fprop", conv2d_few_channel_problemsizes(c), cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], iterator_algorithm="fixed_channels"
)
# Test activations
for activation in ["relu", "leaky_relu"]:
for split_k_mode, split_k_slices in zip(["parallel", "serial", "parallel"], [1, 7, 5]):
add_test(
Conv2dSm80, cc, "fprop", conv_problems, cutlass.DataType.f16, cutlass.DataType.f32, cutlass.DataType.f16,
opclass="tensor_op", threadblock_shape=[128, 128, 64],
warp_count=[2, 2, 1], stages=3, instruction_shape=[16, 8, 16], split_k_mode=split_k_mode,
split_k_slices=split_k_slices, activation=activation)
if __name__ == '__main__':
unittest.main()
| cutlass-main | test/python/conv2d/conv2d_sm80.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
"""
Util Functions for Conv2d Test
"""
import torch
import cutlass
import unittest
import cutlass_bindings
from cutlass.utils.datatypes import binding_type, binding_opclass
from cutlass.backend.test.conv2d_testbed import Conv2dLauncher, getTensorRef, getTensorView
from cutlass.backend.utils.device import device_cc
from cutlass.backend.test.utils import get_name_conv2d
import numpy as np
def conv2d_few_channel_problemsizes(channels):
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
cutlass_bindings.Tensor4DCoord(16, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
return problem_sizes
torch_dtype = {
cutlass.DataType.f16: torch.float16,
cutlass.DataType.f32: torch.float32,
cutlass.DataType.f64: torch.float64
}
numpy_dtype = {
cutlass.DataType.f16: np.float16,
cutlass.DataType.f32: np.float32,
cutlass.DataType.f64: np.float64
}
def validate_problem_size(ps, conv_kind, split_k_slices):
P = (ps.H + 2 * ps.pad_h - ps.dilation_h * (ps.R - 1) - 1) // ps.stride_h + 1
Q = (ps.W + 2 * ps.pad_w - ps.dilation_w * (ps.S - 1) - 1) // ps.stride_w + 1
if P != ps.P or Q != ps.Q:
return False
# Split-K (serial or parallel) is not supported for strided dgrad
if conv_kind == "dgrad" and split_k_slices > 1 and (ps.stride_h > 1 or ps.stride_w > 1):
return False
return True
# Override the backend launcher
class Conv2dLauncherFrontend(Conv2dLauncher):
def __init__(self, plan: cutlass.Conv2d, seed: int = 80, backend="numpy"):
self.operation = plan
self.conv_kind = plan.conv_kind
self.seed = seed
self.backend = backend
self.dtype_A = plan._element_a
self.dtype_B = plan._element_b
self.dtype_C = plan._element_c
self.dtype_acc = plan._element_accumulator
self.layout_A = cutlass_bindings.TensorNHWC
self.layout_B = cutlass_bindings.TensorNHWC
self.layout_C = cutlass_bindings.TensorNHWC
self.layout_D = cutlass_bindings.TensorNHWC
self.element_compute = cutlass_bindings.float32
self.enable_cached_results = True
# Get randomization_max
if self.dtype_A in [cutlass.DataType.f16, cutlass.DataType.bf16]:
if self.dtype_acc in [cutlass.DataType.f16, cutlass.DataType.bf16]:
self.randomization_max = 2
else:
self.randomization_max = 3
else:
self.randomization_max = 7
self.activation = plan.activation
self.host_conv2d = cutlass_bindings.test.conv.host.conv2d
def set_seed(self):
if self.backend == "numpy":
np.random.seed(self.seed)
else:
torch.manual_seed(self.seed)
def uniform_init(self, size, dtype):
if self.backend == "numpy":
return super().uniform_init(size, numpy_dtype[dtype])
else:
tensor = torch.ceil(
torch.empty(size=size, dtype=torch_dtype[dtype], device="cuda").uniform_(-self.randomization_max - 0.5, self.randomization_max - 0.5)
).to(memory_format=torch.channels_last)
return tensor
def zeros_like(self, tensor):
if self.backend == "numpy":
return np.zeros_like(tensor)
else:
return torch.zeros_like(tensor).to(memory_format=torch.channels_last)
def reference(self, ps, A, B, C, alpha, beta, activation):
if self.backend == "numpy":
numpy_result = self.host_reference(ps, A, B, C, alpha, beta, activation)
return numpy_result
else:
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
torch_result = alpha * torch.ops.aten.conv2d(
A,
B,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w)
) + beta * C
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
torch_result = alpha * torch.nn.grad.conv2d_input(
(ps.N, ps.C, ps.H, ps.W),
B,
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
elif self.conv_kind == cutlass_bindings.conv.Operator.wgrad:
torch_result = alpha * torch.nn.grad.conv2d_weight(
B,
(ps.K, ps.C, ps.R, ps.S),
A,
padding=(ps.pad_h, ps.pad_w),
stride=(ps.stride_h, ps.stride_w)
) + beta * C
else:
raise Exception(f"Conv kind {self.conv_kind} is currently unsupported.")
if activation == cutlass.backend.epilogue.relu:
torch_result = torch.nn.functional.relu(torch_result)
elif activation == cutlass.backend.epilogue.leaky_relu:
torch_result = torch.nn.functional.leaky_relu(torch_result, 0.5)
return torch_result
def host_reference(self, problem_size, tensor_A, tensor_B, tensor_C, alpha, beta, activation):
if self.element_compute == cutlass_bindings.float16:
alpha = cutlass_bindings.float16(alpha)
beta = cutlass_bindings.float16(beta)
elif self.element_compute == cutlass_bindings.int32:
alpha = int(alpha)
beta = int(beta)
else:
alpha = alpha
beta = beta
# If cached result is loaded
cached_result_loaded = False
if self.enable_cached_results:
# Get problem key
cached_test_key = cutlass_bindings.test.conv.host.CreateCachedConv2dTestKey(
self.conv_kind,
problem_size,
alpha,
beta,
getTensorView(
tensor_A, self.layout_A, self.conv_kind, problem_size, "a"
),
getTensorView(
tensor_B, self.layout_B, self.conv_kind, problem_size, "b"
),
getTensorView(
tensor_C, self.layout_C, self.conv_kind, problem_size, "c"
),
)
cached_test_key.problem = cached_test_key.problem + f"_{activation.tag.split('::')[-1]}"
cached_test_result = cutlass_bindings.test.conv.host.CachedTestResult()
conv2d_result_cache_name = "cached_results_SM%d_%d.txt" % (
self.operation.arch,
self.seed,
)
cached_results = cutlass_bindings.test.conv.host.CachedTestResultListing(
conv2d_result_cache_name
)
# CachedTestResultListing cached_results(conv2d_result_cache_name);
cached = cached_results.find(cached_test_key)
cached_result_loaded = cached[0]
if cached_result_loaded:
cached_test_result = cached[1]
if not cached_result_loaded:
# Compute the conv2d on host
tensor_D_ref = np.ones_like(tensor_C)
tensor_ref_A = getTensorRef(
tensor_A, self.layout_A, self.conv_kind, problem_size, "a"
)
tensor_ref_B = getTensorRef(
tensor_B, self.layout_B, self.conv_kind, problem_size, "b"
)
tensor_ref_C = getTensorRef(
tensor_C, self.layout_C, self.conv_kind, problem_size, "c"
)
tensor_ref_D_ref = getTensorRef(
tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d"
)
self.host_conv2d(
self.conv_kind,
problem_size,
tensor_ref_A,
tensor_ref_B,
tensor_ref_C,
tensor_ref_D_ref,
alpha,
beta,
)
if activation == cutlass.backend.epilogue.leaky_relu:
tensor_D_ref = activation.numpy(tensor_D_ref, 0.5)
else:
tensor_D_ref = activation.numpy(tensor_D_ref)
tensor_view_D_ref = getTensorView(
tensor_D_ref, self.layout_D, self.conv_kind, problem_size, "d"
)
if self.enable_cached_results:
cached_test_result.D = cutlass_bindings.test.conv.host.TensorHash(
tensor_view_D_ref
)
cached_results = (
cutlass_bindings.test.conv.host.CachedTestResultListing(
conv2d_result_cache_name
)
)
cached_results.append(cached_test_key, cached_test_result)
cached_results.write(conv2d_result_cache_name)
else:
return tensor_D_ref
return cached_test_result.D
def equal(self, tensor_D, tensor_D_ref, problem_size):
if self.backend == "numpy":
return super().equal(tensor_D, tensor_D_ref, problem_size)
else:
torch.cuda.synchronize()
return torch.equal(tensor_D, tensor_D_ref)
def run(self, ps, split_k_mode=cutlass_bindings.conv.SplitKMode.Serial, split_k_slices=1, alpha=1.0, beta=0.0):
#
# Initialize input and output tensors
#
if self.conv_kind == cutlass_bindings.conv.Operator.fprop:
if self.backend == "torch":
tensor_A_size = (ps.N, ps.C, ps.H, ps.W)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.K, ps.P, ps.Q)
else:
tensor_A_size = (ps.N, ps.H, ps.W, ps.C)
tensor_B_size = (ps.K, ps.R, ps.S, ps.C)
tensor_C_size = (ps.N, ps.P, ps.Q, ps.K)
elif self.conv_kind == cutlass_bindings.conv.Operator.dgrad:
if self.backend == "torch":
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.K, ps.C, ps.R, ps.S)
tensor_C_size = (ps.N, ps.C, ps.H, ps.W)
else:
tensor_A_size = (ps.N, ps.P, ps.Q, ps.K)
tensor_B_size = (ps.K, ps.R, ps.S, ps.C)
tensor_C_size = (ps.N, ps.H, ps.W, ps.C)
elif self.conv_kind == cutlass_bindings.conv.Operator.wgrad:
if self.backend == "torch":
tensor_A_size = (ps.N, ps.K, ps.P, ps.Q)
tensor_B_size = (ps.N, ps.C, ps.H, ps.W)
tensor_C_size = (ps.K, ps.C, ps.R, ps.S)
else:
tensor_A_size = (ps.N, ps.P, ps.Q, ps.K)
tensor_B_size = (ps.N, ps.H, ps.W, ps.C)
tensor_C_size = (ps.K, ps.R, ps.S, ps.C)
else:
raise Exception(f"Conv kind {self.conv_kind} is not supported")
self.set_seed()
tensor_A = self.uniform_init(size=tensor_A_size, dtype=self.dtype_A)
tensor_B = self.uniform_init(size=tensor_B_size, dtype=self.dtype_B)
tensor_C = self.uniform_init(size=tensor_C_size, dtype=self.dtype_C)
tensor_D = self.zeros_like(tensor_C)
self.operation.run(tensor_A, tensor_B, tensor_C, tensor_D,
stride=(ps.stride_h, ps.stride_w),
padding=(ps.pad_h, ps.pad_w),
dilation=(ps.dilation_h, ps.dilation_w),
alpha=alpha, beta=beta,
split_k=(split_k_mode, split_k_slices))
tensor_D_ref = self.reference(
ps, tensor_A, tensor_B, tensor_C, alpha, beta, self.activation
)
return self.equal(tensor_D, tensor_D_ref, ps)
def add_test(
cls,
cc,
conv_kind,
problem_sizes,
element,
element_accumulator,
element_output,
opclass,
threadblock_shape,
warp_count,
instruction_shape,
stages,
iterator_algorithm=None,
swizzle=None,
split_k_mode="serial",
split_k_slices=1,
activation = "identity"
):
"""Create a test-running function with the given specification"""
test_name = get_name_conv2d(
cc, conv_kind, element, element_accumulator,
element_output, opclass, threadblock_shape, warp_count, instruction_shape, stages,
iterator_algorithm, swizzle, split_k_mode, split_k_slices, activation)
def run(self):
# Create the plan
plan = cutlass.Conv2d(
kind=conv_kind,
element=element,
element_accumulator=element_accumulator,
element_C=element_output,
element_D=element_output
)
# Set the opclass
plan.opclass = opclass
# Set the tile description
td = {
"threadblock_shape": threadblock_shape,
"warp_count": warp_count,
"stages": stages,
"instruction_shape": instruction_shape,
}
plan.tile_description = td
# Set iterator algorithm
if iterator_algorithm is not None:
plan.iterator_algorithm = iterator_algorithm
# Set swizzling functor
if swizzle is not None:
plan.swizzling_stride = swizzle
if activation != "identity":
if activation == "leaky_relu":
plan.activation = (cutlass.epilogue.leaky_relu, 0.5)
else:
plan.activation = getattr(cutlass.epilogue, activation)
conv2d_launcher = Conv2dLauncherFrontend(plan, 80, backend="numpy")
for ps in problem_sizes:
if not validate_problem_size(ps, conv_kind, split_k_slices): continue
self.assertTrue(
conv2d_launcher.run(ps, split_k_mode, split_k_slices, 1.0, 0.5)
)
setattr(cls, test_name, run)
return run
def get_conv_problems():
# 64: minimum channel size
conv_problems = list(cutlass_bindings.test.conv.TestbedConv2dProblemSizes(64).conv2d_default_sizes)
# Insert alignment 4 & 2 tests
conv_problems += [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 14),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 14),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 23, 56, 98),
cutlass_bindings.Tensor4DCoord(128, 3, 3, 98),
cutlass_bindings.Tensor4DCoord(4, 0, 5, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
return conv_problems
| cutlass-main | test/python/conv2d/conv2d_test_utils.py |
#################################################################################################
#
# Copyright (c) 2023 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
import unittest
if __name__ == '__main__':
loader = unittest.TestLoader()
tests = loader.discover('./', 'conv2d_*.py')
testRunner = unittest.runner.TextTestRunner()
results = testRunner.run(tests)
if not results.wasSuccessful():
raise Exception('Test cases failed')
| cutlass-main | test/python/conv2d/run_all_tests.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmTF32nhwcTF32nhwcTF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float32, element_b=cutlass_bindings.float32,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_dgrad_implicit_gemm_tf32nhwc_tf32nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=4,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage3_64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_unity_stride_stage4_64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=4,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
from cutlass.backend.conv2d_operation import *
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dDgradImplicitGemmF16nhwcF16nhwcF16nhwcTensorOpF16SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float16, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float16)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Unity,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_dgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dStridedDgradImplicitGemmF16NHWCF16NHWCF32NHWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x256_64x3_64x64x64(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 256, 64], stages=3,
warp_count=[2, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Strided_Dgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_128x128_32x3_64x64x32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.dgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.StridedDgradIdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 56, 56, 12),
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 55, 55, 12),
cutlass_bindings.Tensor4DCoord(8, 1, 1, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_strided_dgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
def conv2d_few_channel_problemsizes(channels):
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 8, 8, channels),
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
cutlass_bindings.Tensor4DCoord(16, 3, 3, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 16, 16, channels),
cutlass_bindings.Tensor4DCoord(16, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(32, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 7, 7, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 224, 224, channels),
cutlass_bindings.Tensor4DCoord(64, 5, 5, channels),
cutlass_bindings.Tensor4DCoord(1, 1, 1, 1),
cutlass_bindings.MatrixCoord(2, 2),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
return problem_sizes
class Conv2dFpropFewChannelsF16NHWCF16NHWCF16HNWCTensorOpF32SM80(unittest.TestCase):
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_2(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=2)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 64], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(2)))
def test_SM80_Device_Conv2d_Fprop_Few_Channels_ImplicitGemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_channels_1(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=1)
C = TensorDescription(
element=cutlass_bindings.float16,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
tile_description = TileDescription(
threadblock_shape=[128, 128, 32], stages=2,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.fprop, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.few_channels,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation, conv2d_few_channel_problemsizes(1)))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_fprop_few_channels_f16nhwc_f16nhwc_f16nhwc_tensor_op_f32_sm80.py |
#################################################################################################
#
# Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################################
# test/unit/conv/device/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.cu
import cutlass.backend
from cutlass.backend import *
from cutlass.backend.test import *
from cutlass.backend.utils.device import device_cc
import unittest
@unittest.skipIf(device_cc() < 80, "Device compute capability is insufficient for SM80 tests.")
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 16],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=8)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[64, 256, 32], stages=3,
warp_count=[1, 4, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
self.assertTrue(test_all_conv2d(operation))
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.analytic,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(
instruction_shape=[16, 8, 8],
element_a=cutlass_bindings.float16, element_b=cutlass_bindings.float16,
element_accumulator=cutlass_bindings.float32, opcode_class=cutlass_bindings.OpClass.TensorOp,
math_operation=MathOperation.multiply_add
)
A = TensorDescription(
element=math_inst.element_a,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
B = TensorDescription(
element=math_inst.element_b,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
C = TensorDescription(
element=cutlass_bindings.float32,
layout=cutlass_bindings.TensorNHWC,
alignment=4)
tile_description = TileDescription(
threadblock_shape=[128, 128, 16], stages=3,
warp_count=[2, 2, 1],
math_instruction=math_inst
)
epilogue_functor = LinearCombination(
C.element, C.alignment,
math_inst.element_accumulator, cutlass_bindings.float32)
operation = Conv2dOperation(
conv_kind=cutlass_bindings.conv.Operator.wgrad, iterator_algorithm=cutlass_bindings.conv.IteratorAlgorithm.optimized,
arch=80, tile_description=tile_description, A=A, B=B, C=C,
stride_support=StrideSupport.Strided,
epilogue_functor=epilogue_functor,
swizzling_functor=cutlass_bindings.IdentitySwizzle1
)
problem_sizes = [
cutlass_bindings.conv.Conv2dProblemSize(
cutlass_bindings.Tensor4DCoord(1, 4, 4, 12),
cutlass_bindings.Tensor4DCoord(8, 3, 3, 12),
cutlass_bindings.Tensor4DCoord(0, 0, 0, 0),
cutlass_bindings.MatrixCoord(3, 3),
cutlass_bindings.MatrixCoord(1, 1),
cutlass_bindings.conv.Mode.cross_correlation,
1, 1
),
]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
if __name__ == '__main__':
cutlass.backend.get_memory_pool(2**26, 2**26)
unittest.main()
| cutlass-main | test/python/backend/conv/conv2d_wgrad_implicit_gemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_sm80.py |
Subsets and Splits