python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.sym.geometry.tessellation import Tessellation
from modulus.sym.geometry.primitives_3d import Box, Sphere, Cylinder, VectorizedBoxes
from modulus.sym.utils.io.vtk import var_to_polyvtk
from stl import mesh as np_mesh
import time
def speed_check(geo, nr_points):
tic = time.time()
s = geo.sample_boundary(nr_points=nr_points)
surface_sample_time = time.time() - tic
var_to_polyvtk(s, "boundary")
tic = time.time()
s = geo.sample_interior(nr_points=nr_points, compute_sdf_derivatives=False)
volume_sample_time = time.time() - tic
var_to_polyvtk(s, "interior")
print(
"Surface sample (seconds per million point): {:.3e}".format(
1000000 * surface_sample_time / nr_points
)
)
print(
"Volume sample (seconds per million point): {:.3e}".format(
1000000 * volume_sample_time / nr_points
)
)
if __name__ == "__main__":
# number of points to sample for speed test
nr_points = 1000000
# tesselated geometry speed test
mesh = np_mesh.Mesh.from_file("./stl_files/tessellated_example.stl")
geo = Tessellation(mesh)
print("Tesselated Speed Test")
print("Number of triangles: {:d}".format(mesh.vectors.shape[0]))
speed_check(geo, nr_points)
# primitives speed test
box = Box(point_1=(-1, -1, -1), point_2=(1, 1, 1))
sphere = Sphere(center=(0, 0, 0), radius=1.2)
cylinder_1 = Cylinder(center=(0, 0, 0), radius=0.5, height=2)
cylinder_2 = cylinder_1.rotate(angle=float(np.pi / 2.0), axis="x")
cylinder_3 = cylinder_1.rotate(angle=float(np.pi / 2.0), axis="y")
all_cylinders = cylinder_1 + cylinder_2 + cylinder_3
box_minus_sphere = box & sphere
geo = box_minus_sphere - all_cylinders
print("CSG Speed Test")
speed_check(geo, nr_points)
# make boxes for many body check
nr_boxes = [10, 100, 500]
boxes = []
for i in range(max(nr_boxes)):
x_pos = (np.sqrt(5.0) * i % 0.8) + 0.1
y_pos = (np.sqrt(3.0) * i % 0.8) + 0.1
z_pos = (np.sqrt(7.0) * i % 0.8) + 0.1
boxes.append(
np.array(
[[x_pos, x_pos + 0.05], [y_pos, y_pos + 0.05], [z_pos, z_pos + 0.05]]
)
)
boxes = np.array(boxes)
for nr_b in nr_boxes:
# csg many object speed test
geo = Box((0, 0, 0), (1, 1, 1))
for i in range(nr_b):
geo = geo - Box(tuple(boxes[i, :, 0]), tuple(boxes[i, :, 1]))
print("CSG Many Box Speed Test, Number of Boxes " + str(nr_b))
speed_check(geo, nr_points)
| modulus-sym-main | examples/geometry/speed_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import numpy as np
from modulus.sym.geometry.tessellation import Tessellation
from modulus.sym.geometry.discrete_geometry import DiscreteGeometry
from modulus.sym.utils.io.vtk import var_to_polyvtk
from modulus.sym.geometry.parameterization import Parameterization, Parameter
if __name__ == "__main__":
# make geometry for each bracket
bracket_files = glob.glob("./bracket_stl/*.stl")
bracket_files.sort()
brackets = []
radius = []
width = []
for f in bracket_files:
# get param values
radius.append(float(f.split("_")[3]))
width.append(float(f.split("_")[5][:-4]))
# make geometry
brackets.append(Tessellation.from_stl(f))
# make discretely parameterized geometry
parameterization = Parameterization(
{
Parameter("radius"): np.array(radius)[:, None],
Parameter("width"): np.array(width)[:, None],
}
)
geo = DiscreteGeometry(brackets, parameterization)
# sample geometry over entire parameter range
s = geo.sample_boundary(nr_points=1000000)
var_to_polyvtk(s, "parameterized_bracket_boundary")
s = geo.sample_interior(nr_points=1000000)
var_to_polyvtk(s, "parameterized_bracket_interior")
| modulus-sym-main | examples/geometry/parameterized_tesselated_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from modulus.sym.geometry.primitives_2d import Rectangle, Circle
from modulus.sym.utils.io.vtk import var_to_polyvtk
from modulus.sym.geometry.parameterization import Parameterization, Parameter
if __name__ == "__main__":
# make plate with parameterized hole
# make parameterized primitives
plate = Rectangle(point_1=(-1, -1), point_2=(1, 1))
y_pos = Parameter("y_pos")
parameterization = Parameterization({y_pos: (-1, 1)})
circle = Circle(center=(0, y_pos), radius=0.3, parameterization=parameterization)
geo = plate - circle
# sample geometry over entire parameter range
s = geo.sample_boundary(nr_points=100000)
var_to_polyvtk(s, "parameterized_boundary")
s = geo.sample_interior(nr_points=100000)
var_to_polyvtk(s, "parameterized_interior")
# sample specific parameter
s = geo.sample_boundary(
nr_points=100000, parameterization=Parameterization({y_pos: 0})
)
var_to_polyvtk(s, "y_pos_zero_boundary")
s = geo.sample_interior(
nr_points=100000, parameterization=Parameterization({y_pos: 0})
)
var_to_polyvtk(s, "y_pos_zero_interior")
| modulus-sym-main | examples/geometry/parameterized_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import matplotlib.pyplot as plt
import numpy as np
from sympy import Number, Symbol, Heaviside, atan, sin, cos, sqrt
import os
from modulus.sym.geometry.primitives_2d import Polygon
from modulus.sym.geometry.parameterization import Parameterization, Parameter
from modulus.sym.utils.io.vtk import var_to_polyvtk
# Naca implementation modified from https://stackoverflow.com/questions/31815041/plotting-a-naca-4-series-airfoil
# https://en.wikipedia.org/wiki/NACA_airfoil#Equation_for_a_cambered_4-digit_NACA_airfoil
def camber_line(x, m, p, c):
cl = []
for xi in x:
cond_1 = Heaviside(xi, 0) * Heaviside((c * p) - xi, 0)
cond_2 = Heaviside(-xi, 0) + Heaviside(xi - (c * p), 0)
v_1 = m * (xi / p**2) * (2.0 * p - (xi / c))
v_2 = m * ((c - xi) / (1 - p) ** 2) * (1.0 + (xi / c) - 2.0 * p)
cl.append(cond_1 * v_1 + cond_2 * v_2)
return cl
def dyc_over_dx(x, m, p, c):
dd = []
for xi in x:
cond_1 = Heaviside(xi) * Heaviside((c * p) - xi)
cond_2 = Heaviside(-xi) + Heaviside(xi - (c * p))
v_1 = ((2.0 * m) / p**2) * (p - xi / c)
v_2 = (2.0 * m) / (1 - p**2) * (p - xi / c)
dd.append(atan(cond_1 * v_1 + cond_2 * v_2))
return dd
def thickness(x, t, c):
th = []
for xi in x:
term1 = 0.2969 * (sqrt(xi / c))
term2 = -0.1260 * (xi / c)
term3 = -0.3516 * (xi / c) ** 2
term4 = 0.2843 * (xi / c) ** 3
term5 = -0.1015 * (xi / c) ** 4
th.append(5 * t * c * (term1 + term2 + term3 + term4 + term5))
return th
def naca4(x, m, p, t, c=1):
th = dyc_over_dx(x, m, p, c)
yt = thickness(x, t, c)
yc = camber_line(x, m, p, c)
line = []
for xi, thi, yti, yci in zip(x, th, yt, yc):
line.append((xi - yti * sin(thi), yci + yti * cos(thi)))
x.reverse()
th.reverse()
yt.reverse()
yc.reverse()
for xi, thi, yti, yci in zip(x, th, yt, yc):
line.append((xi + yti * sin(thi), yci - yti * cos(thi)))
return line
if __name__ == "__main__":
# make parameters for naca airfoil
m = 0.02
p = 0.4
t = 0.12
c = 1.0
# make naca geometry
x = [x for x in np.linspace(0, 0.2, 10)] + [x for x in np.linspace(0.2, 1.0, 10)][
1:
] # higher res in front
line = naca4(x, m, p, t, c)[:-1]
geo = Polygon(line)
# sample different parameters
s = geo.sample_boundary(nr_points=100000)
var_to_polyvtk(s, "naca_boundary")
s = geo.sample_interior(nr_points=100000)
var_to_polyvtk(s, "naca_interior")
| modulus-sym-main | examples/geometry/naca_airfoil.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from modulus.sym.geometry.primitives_3d import Box, Sphere, Cylinder
from modulus.sym.utils.io.vtk import var_to_polyvtk
if __name__ == "__main__":
# number of points to sample
nr_points = 100000
# make standard constructive solid geometry example
# make primitives
box = Box(point_1=(-1, -1, -1), point_2=(1, 1, 1))
sphere = Sphere(center=(0, 0, 0), radius=1.2)
cylinder_1 = Cylinder(center=(0, 0, 0), radius=0.5, height=2)
cylinder_2 = cylinder_1.rotate(angle=float(np.pi / 2.0), axis="x")
cylinder_3 = cylinder_1.rotate(angle=float(np.pi / 2.0), axis="y")
# combine with boolean operations
all_cylinders = cylinder_1 + cylinder_2 + cylinder_3
box_minus_sphere = box & sphere
geo = box_minus_sphere - all_cylinders
# sample geometry for plotting in Paraview
s = geo.sample_boundary(nr_points=nr_points)
var_to_polyvtk(s, "boundary")
print("Surface Area: {:.3f}".format(np.sum(s["area"])))
s = geo.sample_interior(nr_points=nr_points, compute_sdf_derivatives=True)
var_to_polyvtk(s, "interior")
print("Volume: {:.3f}".format(np.sum(s["area"])))
# apply transformations
geo = geo.scale(0.5)
geo = geo.rotate(angle=np.pi / 4, axis="z")
geo = geo.rotate(angle=np.pi / 4, axis="y")
geo = geo.repeat(spacing=4.0, repeat_lower=(-1, -1, -1), repeat_higher=(1, 1, 1))
# sample geometry for plotting in Paraview
s = geo.sample_boundary(nr_points=nr_points)
var_to_polyvtk(s, "repeated_boundary")
print("Repeated Surface Area: {:.3f}".format(np.sum(s["area"])))
s = geo.sample_interior(nr_points=nr_points, compute_sdf_derivatives=True)
var_to_polyvtk(s, "repeated_interior")
print("Repeated Volume: {:.3f}".format(np.sum(s["area"])))
| modulus-sym-main | examples/geometry/csg_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reference: https://www.mathworks.com/help/pde/ug/deflection-analysis-of-a-bracket.html
"""
import os
import warnings
import numpy as np
from sympy import Symbol, Eq, And
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Cylinder
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.linear_elasticity import LinearElasticity
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# Specify parameters
nu = 0.3
E = 100e9
lambda_ = nu * E / ((1 + nu) * (1 - 2 * nu))
mu = E / (2 * (1 + nu))
mu_c = 0.01 * mu
lambda_ = lambda_ / mu_c
mu = mu / mu_c
characteristic_length = 1.0
characteristic_displacement = 1e-4
sigma_normalization = characteristic_length / (characteristic_displacement * mu_c)
T = -4e4 * sigma_normalization
# make list of nodes to unroll graph on
le = LinearElasticity(lambda_=lambda_, mu=mu, dim=3)
disp_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u"), Key("v"), Key("w")],
cfg=cfg.arch.fully_connected,
)
stress_net = instantiate_arch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[
Key("sigma_xx"),
Key("sigma_yy"),
Key("sigma_zz"),
Key("sigma_xy"),
Key("sigma_xz"),
Key("sigma_yz"),
],
cfg=cfg.arch.fully_connected,
)
nodes = (
le.make_nodes()
+ [disp_net.make_node(name="displacement_network")]
+ [stress_net.make_node(name="stress_network")]
)
# add constraints to solver
# make geometry
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
support_origin = (-1, -1, -1)
support_dim = (0.25, 2, 2)
bracket_origin = (-0.75, -1, -0.1)
bracket_dim = (1.75, 2, 0.2)
cylinder_radius = 0.1
cylinder_height = 2.0
aux_lower_origin = (-0.75, -1, -0.1 - cylinder_radius)
aux_lower_dim = (cylinder_radius, 2, cylinder_radius)
aux_upper_origin = (-0.75, -1, 0.1)
aux_upper_dim = (cylinder_radius, 2, cylinder_radius)
cylinder_lower_center = (-0.75 + cylinder_radius, 0, 0)
cylinder_upper_center = (-0.75 + cylinder_radius, 0, 0)
cylinder_hole_radius = 0.7
cylinder_hole_height = 0.5
cylinder_hole_center = (0.125, 0, 0)
support = Box(
support_origin,
(
support_origin[0] + support_dim[0],
support_origin[1] + support_dim[1],
support_origin[2] + support_dim[2],
),
)
bracket = Box(
bracket_origin,
(
bracket_origin[0] + bracket_dim[0],
bracket_origin[1] + bracket_dim[1],
bracket_origin[2] + bracket_dim[2],
),
)
aux_lower = Box(
aux_lower_origin,
(
aux_lower_origin[0] + aux_lower_dim[0],
aux_lower_origin[1] + aux_lower_dim[1],
aux_lower_origin[2] + aux_lower_dim[2],
),
)
aux_upper = Box(
aux_upper_origin,
(
aux_upper_origin[0] + aux_upper_dim[0],
aux_upper_origin[1] + aux_upper_dim[1],
aux_upper_origin[2] + aux_upper_dim[2],
),
)
cylinder_lower = Cylinder(cylinder_lower_center, cylinder_radius, cylinder_height)
cylinder_upper = Cylinder(cylinder_upper_center, cylinder_radius, cylinder_height)
cylinder_hole = Cylinder(
cylinder_hole_center, cylinder_hole_radius, cylinder_hole_height
)
cylinder_lower = cylinder_lower.rotate(np.pi / 2, "x")
cylinder_upper = cylinder_upper.rotate(np.pi / 2, "x")
cylinder_lower = cylinder_lower.translate([0, 0, -0.1 - cylinder_radius])
cylinder_upper = cylinder_upper.translate([0, 0, 0.1 + cylinder_radius])
curve_lower = aux_lower - cylinder_lower
curve_upper = aux_upper - cylinder_upper
geo = support + bracket + curve_lower + curve_upper - cylinder_hole
# Doamin bounds
bounds_x = (-1, 1)
bounds_y = (-1, 1)
bounds_z = (-1, 1)
bounds_support_x = (-1, -0.65)
bounds_support_y = (-1, 1)
bounds_support_z = (-1, 1)
bounds_bracket_x = (-0.65, 1)
bounds_bracket_y = (-1, 1)
bounds_bracket_z = (-0.1, 0.1)
# make domain
domain = Domain()
# back BC
backBC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.backBC,
lambda_weighting={"u": 10, "v": 10, "w": 10},
criteria=Eq(x, support_origin[0]),
)
domain.add_constraint(backBC, "backBC")
# front BC
frontBC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"traction_x": 0, "traction_y": 0, "traction_z": T},
batch_size=cfg.batch_size.frontBC,
criteria=Eq(x, bracket_origin[0] + bracket_dim[0]),
)
domain.add_constraint(frontBC, "frontBC")
# surface BC
surfaceBC = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=geo,
outvar={"traction_x": 0, "traction_y": 0, "traction_z": 0},
batch_size=cfg.batch_size.surfaceBC,
criteria=And((x > support_origin[0]), (x < bracket_origin[0] + bracket_dim[0])),
)
domain.add_constraint(surfaceBC, "surfaceBC")
# support interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"equilibrium_x": 0.0,
"equilibrium_y": 0.0,
"equilibrium_z": 0.0,
"stress_disp_xx": 0.0,
"stress_disp_yy": 0.0,
"stress_disp_zz": 0.0,
"stress_disp_xy": 0.0,
"stress_disp_xz": 0.0,
"stress_disp_yz": 0.0,
},
batch_size=cfg.batch_size.interior_support,
bounds={x: bounds_support_x, y: bounds_support_y, z: bounds_support_z},
lambda_weighting={
"equilibrium_x": Symbol("sdf"),
"equilibrium_y": Symbol("sdf"),
"equilibrium_z": Symbol("sdf"),
"stress_disp_xx": Symbol("sdf"),
"stress_disp_yy": Symbol("sdf"),
"stress_disp_zz": Symbol("sdf"),
"stress_disp_xy": Symbol("sdf"),
"stress_disp_xz": Symbol("sdf"),
"stress_disp_yz": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior_support")
# bracket interior
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={
"equilibrium_x": 0.0,
"equilibrium_y": 0.0,
"equilibrium_z": 0.0,
"stress_disp_xx": 0.0,
"stress_disp_yy": 0.0,
"stress_disp_zz": 0.0,
"stress_disp_xy": 0.0,
"stress_disp_xz": 0.0,
"stress_disp_yz": 0.0,
},
batch_size=cfg.batch_size.interior_bracket,
bounds={x: bounds_bracket_x, y: bounds_bracket_y, z: bounds_bracket_z},
lambda_weighting={
"equilibrium_x": Symbol("sdf"),
"equilibrium_y": Symbol("sdf"),
"equilibrium_z": Symbol("sdf"),
"stress_disp_xx": Symbol("sdf"),
"stress_disp_yy": Symbol("sdf"),
"stress_disp_zz": Symbol("sdf"),
"stress_disp_xy": Symbol("sdf"),
"stress_disp_xz": Symbol("sdf"),
"stress_disp_yz": Symbol("sdf"),
},
)
domain.add_constraint(interior, "interior_bracket")
# add validation data
mapping = {
"X Location (m)": "x",
"Y Location (m)": "y",
"Z Location (m)": "z",
"Directional Deformation (m)": "u",
}
mapping_v = {"Directional Deformation (m)": "v"}
mapping_w = {"Directional Deformation (m)": "w"}
mapping_sxx = {"Normal Stress (Pa)": "sigma_xx"}
mapping_syy = {"Normal Stress (Pa)": "sigma_yy"}
mapping_szz = {"Normal Stress (Pa)": "sigma_zz"}
mapping_sxy = {"Shear Stress (Pa)": "sigma_xy"}
mapping_sxz = {"Shear Stress (Pa)": "sigma_xz"}
mapping_syz = {"Shear Stress (Pa)": "sigma_yz"}
file_path = "commercial_solver"
if os.path.exists(to_absolute_path(file_path)):
commercial_solver_var = csv_to_dict(
to_absolute_path("commercial_solver/deformation_x.txt"),
mapping,
delimiter="\t",
)
commercial_solver_var_v = csv_to_dict(
to_absolute_path("commercial_solver/deformation_y.txt"),
mapping_v,
delimiter="\t",
)
commercial_solver_var_w = csv_to_dict(
to_absolute_path("commercial_solver/deformation_z.txt"),
mapping_w,
delimiter="\t",
)
commercial_solver_var_sxx = csv_to_dict(
to_absolute_path("commercial_solver/normal_x.txt"),
mapping_sxx,
delimiter="\t",
)
commercial_solver_var_syy = csv_to_dict(
to_absolute_path("commercial_solver/normal_y.txt"),
mapping_syy,
delimiter="\t",
)
commercial_solver_var_szz = csv_to_dict(
to_absolute_path("commercial_solver/normal_z.txt"),
mapping_szz,
delimiter="\t",
)
commercial_solver_var_sxy = csv_to_dict(
to_absolute_path("commercial_solver/shear_xy.txt"),
mapping_sxy,
delimiter="\t",
)
commercial_solver_var_sxz = csv_to_dict(
to_absolute_path("commercial_solver/shear_xz.txt"),
mapping_sxz,
delimiter="\t",
)
commercial_solver_var_syz = csv_to_dict(
to_absolute_path("commercial_solver/shear_yz.txt"),
mapping_syz,
delimiter="\t",
)
commercial_solver_var["x"] = commercial_solver_var["x"]
commercial_solver_var["y"] = commercial_solver_var["y"]
commercial_solver_var["z"] = commercial_solver_var["z"]
commercial_solver_var["u"] = (
commercial_solver_var["u"] / characteristic_displacement
)
commercial_solver_var["v"] = (
commercial_solver_var_v["v"] / characteristic_displacement
)
commercial_solver_var["w"] = (
commercial_solver_var_w["w"] / characteristic_displacement
)
commercial_solver_var["sigma_xx"] = (
commercial_solver_var_sxx["sigma_xx"] * sigma_normalization
)
commercial_solver_var["sigma_yy"] = (
commercial_solver_var_syy["sigma_yy"] * sigma_normalization
)
commercial_solver_var["sigma_zz"] = (
commercial_solver_var_szz["sigma_zz"] * sigma_normalization
)
commercial_solver_var["sigma_xy"] = (
commercial_solver_var_sxy["sigma_xy"] * sigma_normalization
)
commercial_solver_var["sigma_xz"] = (
commercial_solver_var_sxz["sigma_xz"] * sigma_normalization
)
commercial_solver_var["sigma_yz"] = (
commercial_solver_var_syz["sigma_yz"] * sigma_normalization
)
commercial_solver_invar = {
key: value
for key, value in commercial_solver_var.items()
if key in ["x", "y", "z"]
}
commercial_solver_outvar = {
key: value
for key, value in commercial_solver_var.items()
if key
in [
"u",
"v",
"w",
"sigma_xx",
"sigma_yy",
"sigma_zz",
"sigma_xy",
"sigma_xz",
"sigma_yz",
]
}
commercial_solver_validator = PointwiseValidator(
nodes=nodes,
invar=commercial_solver_invar,
true_outvar=commercial_solver_outvar,
batch_size=128,
)
domain.add_validator(commercial_solver_validator)
# add inferencer data
grid_inference = PointwiseInferencer(
nodes=nodes,
invar=commercial_solver_invar,
output_names=[
"u",
"v",
"w",
"sigma_xx",
"sigma_yy",
"sigma_zz",
"sigma_xy",
"sigma_xz",
"sigma_yz",
],
batch_size=128,
)
domain.add_inferencer(grid_inference, "inf_data")
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/bracket/bracket.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import torch
import numpy as np
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.eq.pdes.navier_stokes import NavierStokes, GradNormal
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.geometry import Parameterization, Parameter
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# params for domain
channel_length = (-2.5, 2.5)
channel_width = (-0.5, 0.5)
heat_sink_origin = (-1, -0.3)
nr_heat_sink_fins = 3
gap = 0.15 + 0.1
heat_sink_length = 1.0
heat_sink_fin_thickness = 0.1
inlet_vel = 1.5
heat_sink_temp = 350
base_temp = 293.498
nu = 0.01
diffusivity = 0.01 / 5
# define sympy varaibles to parametize domain curves
x, y = Symbol("x"), Symbol("y")
# define geometry
channel = Channel2D(
(channel_length[0], channel_width[0]), (channel_length[1], channel_width[1])
)
heat_sink = Rectangle(
heat_sink_origin,
(
heat_sink_origin[0] + heat_sink_length,
heat_sink_origin[1] + heat_sink_fin_thickness,
),
)
for i in range(1, nr_heat_sink_fins):
heat_sink_origin = (heat_sink_origin[0], heat_sink_origin[1] + gap)
fin = Rectangle(
heat_sink_origin,
(
heat_sink_origin[0] + heat_sink_length,
heat_sink_origin[1] + heat_sink_fin_thickness,
),
)
heat_sink = heat_sink + fin
geo = channel - heat_sink
inlet = Line(
(channel_length[0], channel_width[0]), (channel_length[0], channel_width[1]), -1
)
outlet = Line(
(channel_length[1], channel_width[0]), (channel_length[1], channel_width[1]), 1
)
x_pos = Parameter("x_pos")
integral_line = Line(
(x_pos, channel_width[0]),
(x_pos, channel_width[1]),
1,
parameterization=Parameterization({x_pos: channel_length}),
)
# make list of nodes to unroll graph on
ze = ZeroEquation(
nu=nu, rho=1.0, dim=2, max_distance=(channel_width[1] - channel_width[0]) / 2
)
ns = NavierStokes(nu=ze.equations["nu"], rho=1.0, dim=2, time=False)
ade = AdvectionDiffusion(T="c", rho=1.0, D=diffusivity, dim=2, time=False)
gn_c = GradNormal("c", dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
heat_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("c")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ ze.make_nodes()
+ ade.make_nodes(detach_names=["u", "v"])
+ gn_c.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
+ [heat_net.make_node(name="heat_network")]
)
# make domain
domain = Domain()
# inlet
inlet_parabola = parabola(
y, inter_1=channel_width[0], inter_2=channel_width[1], height=inlet_vel
)
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"u": inlet_parabola, "v": 0, "c": 0},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "outlet")
# heat_sink wall
hs_wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=heat_sink,
outvar={"u": 0, "v": 0, "c": (heat_sink_temp - base_temp) / 273.15},
batch_size=cfg.batch_size.hs_wall,
)
domain.add_constraint(hs_wall, "heat_sink_wall")
# channel wall
channel_wall = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=channel,
outvar={"u": 0, "v": 0, "normal_gradient_c": 0},
batch_size=cfg.batch_size.channel_wall,
)
domain.add_constraint(channel_wall, "channel_wall")
# interior flow
interior_flow = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior_flow,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": Symbol("sdf"),
"momentum_x": Symbol("sdf"),
"momentum_y": Symbol("sdf"),
},
)
domain.add_constraint(interior_flow, "interior_flow")
# interior heat
interior_heat = PointwiseInteriorConstraint(
nodes=nodes,
geometry=geo,
outvar={"advection_diffusion_c": 0},
batch_size=cfg.batch_size.interior_heat,
lambda_weighting={
"advection_diffusion_c": 1.0,
},
)
domain.add_constraint(interior_heat, "interior_heat")
# integral continuity
def integral_criteria(invar, params):
sdf = geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=nodes,
geometry=integral_line,
outvar={"normal_dot_vel": 1},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=integral_criteria,
)
domain.add_constraint(integral_continuity, "integral_continuity")
# add validation data
file_path = "openfoam/heat_sink_zeroEq_Pr5_mesh20.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"U:0": "u",
"U:1": "v",
"p": "p",
"d": "sdf",
"nuT": "nu",
"T": "c",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_var["nu"] += nu
openfoam_var["c"] += -base_temp
openfoam_var["c"] /= 273.15
openfoam_invar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["x", "y", "sdf"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u", "v", "p", "c"] # Removing "nu"
}
openfoam_validator = PointwiseValidator(
nodes=nodes,
invar=openfoam_invar_numpy,
true_outvar=openfoam_outvar_numpy,
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# monitors for force, residuals and temperature
global_monitor = PointwiseMonitor(
geo.sample_interior(100),
output_names=["continuity", "momentum_x", "momentum_y"],
metrics={
"mass_imbalance": lambda var: torch.sum(
var["area"] * torch.abs(var["continuity"])
),
"momentum_imbalance": lambda var: torch.sum(
var["area"]
* (torch.abs(var["momentum_x"]) + torch.abs(var["momentum_y"]))
),
},
nodes=nodes,
requires_grad=True,
)
domain.add_monitor(global_monitor)
force = PointwiseMonitor(
heat_sink.sample_boundary(100),
output_names=["p"],
metrics={
"force_x": lambda var: torch.sum(var["normal_x"] * var["area"] * var["p"]),
"force_y": lambda var: torch.sum(var["normal_y"] * var["area"] * var["p"]),
},
nodes=nodes,
)
domain.add_monitor(force)
peakT = PointwiseMonitor(
heat_sink.sample_boundary(100),
output_names=["c"],
metrics={"peakT": lambda var: torch.max(var["c"])},
nodes=nodes,
)
domain.add_monitor(peakT)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/three_fin_2d/heat_sink.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import warnings
import torch
import numpy as np
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_2d import Rectangle, Line, Channel2D
from modulus.sym.utils.sympy.functions import parabola
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.eq.pdes.navier_stokes import NavierStokes, GradNormal
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
PointwiseConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym.node import Node
@modulus.sym.main(config_path="conf_inverse", config_name="config")
def run(cfg: ModulusConfig) -> None:
nu, D = Symbol("nu"), Symbol("D")
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nu, rho=1.0, dim=2, time=False)
ade = AdvectionDiffusion(T="c", rho=1.0, D=D, dim=2, time=False)
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
heat_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("c")],
cfg=cfg.arch.fully_connected,
)
invert_net_nu = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("nu")],
cfg=cfg.arch.fully_connected,
)
invert_net_D = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("D")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes(
detach_names=[
"u",
"u__x",
"u__x__x",
"u__y",
"u__y__y",
"v",
"v__x",
"v__x__x",
"v__y",
"v__y__y",
"p",
"p__x",
"p__y",
]
)
+ ade.make_nodes(
detach_names=["u", "v", "c", "c__x", "c__y", "c__x__x", "c__y__y"]
)
+ [flow_net.make_node(name="flow_network")]
+ [heat_net.make_node(name="heat_network")]
+ [invert_net_nu.make_node(name="invert_nu_network")]
+ [invert_net_D.make_node(name="invert_D_network")]
)
base_temp = 293.498
# OpenFOAM data
file_path = "openfoam/heat_sink_Pr5_clipped2.csv"
if not os.path.exists(to_absolute_path(file_path)):
warnings.warn(
f"Directory {file_path} does not exist. Cannot continue. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
sys.exit()
mapping = {
"Points:0": "x",
"Points:1": "y",
"U:0": "u",
"U:1": "v",
"p": "p",
"T": "c",
}
openfoam_var = csv_to_dict(
to_absolute_path("openfoam/heat_sink_Pr5_clipped2.csv"), mapping
)
openfoam_var["c"] = openfoam_var["c"] / base_temp - 1.0
openfoam_invar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value for key, value in openfoam_var.items() if key in ["u", "v", "p", "c"]
}
openfoam_outvar_numpy["continuity"] = np.zeros_like(openfoam_outvar_numpy["u"])
openfoam_outvar_numpy["momentum_x"] = np.zeros_like(openfoam_outvar_numpy["u"])
openfoam_outvar_numpy["momentum_y"] = np.zeros_like(openfoam_outvar_numpy["u"])
openfoam_outvar_numpy["advection_diffusion_c"] = np.zeros_like(
openfoam_outvar_numpy["u"]
)
# make domain
domain = Domain()
# interior
data = PointwiseConstraint.from_numpy(
nodes=nodes,
invar=openfoam_invar_numpy,
outvar=openfoam_outvar_numpy,
batch_size=cfg.batch_size.data,
)
domain.add_constraint(data, "interior_data")
# add monitors
monitor = PointwiseMonitor(
openfoam_invar_numpy,
output_names=["nu"],
metrics={"mean_nu": lambda var: torch.mean(var["nu"])},
nodes=nodes,
)
domain.add_monitor(monitor)
monitor = PointwiseMonitor(
openfoam_invar_numpy,
output_names=["D"],
metrics={"mean_D": lambda var: torch.mean(var["D"])},
nodes=nodes,
)
domain.add_monitor(monitor)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/three_fin_2d/heat_sink_inverse.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from limerock_geometry import LimeRock
# make limerock
limerock = LimeRock()
#############
# Real Params
#############
# fluid params
fluid_viscosity = 1.84e-05 # kg/m-s
fluid_density = 1.1614 # kg/m3
fluid_specific_heat = 1005 # J/(kg K)
fluid_conductivity = 0.0261 # W/(m K)
# copper params
copper_density = 8930 # kg/m3
copper_specific_heat = 385 # J/(kg K)
copper_conductivity = 385 # W/(m K)
# boundary params
inlet_velocity = 5.7 # m/s
inlet_temp = 0 # K
# source
source_term = 2127.71 # K/m
source_origin = (-0.061667, -0.15833, limerock.geo_bounds_lower[2])
source_dim = (0.1285, 0.31667, 0)
################
# Non dim params
################
length_scale = 0.0575 # m
velocity_scale = 5.7 # m/s
time_scale = length_scale / velocity_scale # s
density_scale = 1.1614 # kg/m3
mass_scale = density_scale * length_scale**3 # kg
pressure_scale = mass_scale / (length_scale * time_scale**2) # kg / (m s**2)
temp_scale = 273.15 # K
watt_scale = (mass_scale * length_scale**2) / (time_scale**3) # kg m**2 / s**3
joule_scale = (mass_scale * length_scale**2) / (time_scale**2) # kg * m**2 / s**2
##############################
# Nondimensionalization Params
##############################
# fluid params
nd_fluid_viscosity = fluid_viscosity / (
length_scale**2 / time_scale
) # need to divide by density to get previous viscosity
nd_fluid_density = fluid_density / density_scale
nd_fluid_specific_heat = fluid_specific_heat / (joule_scale / (mass_scale * temp_scale))
nd_fluid_conductivity = fluid_conductivity / (watt_scale / (length_scale * temp_scale))
nd_fluid_diffusivity = nd_fluid_conductivity / (
nd_fluid_specific_heat * nd_fluid_density
)
# copper params
nd_copper_density = copper_density / (mass_scale / length_scale**3)
nd_copper_specific_heat = copper_specific_heat / (
joule_scale / (mass_scale * temp_scale)
)
nd_copper_conductivity = copper_conductivity / (
watt_scale / (length_scale * temp_scale)
)
nd_copper_diffusivity = nd_copper_conductivity / (
nd_copper_specific_heat * nd_copper_density
)
# boundary params
nd_inlet_velocity = inlet_velocity / velocity_scale
nd_volumetric_flow = limerock.inlet_area * nd_inlet_velocity
nd_inlet_temp = inlet_temp / temp_scale
nd_source_term = source_term / (temp_scale / length_scale)
| modulus-sym-main | examples/limerock/limerock_hFTB/limerock_properties.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import tanh, Symbol, Function
from modulus.sym.eq.pde import PDE
class FluxDiffusion(PDE):
name = "FluxDiffusion"
def __init__(self, D=0.01):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# Flux of Temperature
flux_theta_s_x = Function("flux_theta_s_x")(*input_variables)
flux_theta_s_y = Function("flux_theta_s_y")(*input_variables)
flux_theta_s_z = Function("flux_theta_s_z")(*input_variables)
# set equations
self.equations = {}
self.equations["diffusion_theta_s"] = -(
(D * flux_theta_s_x).diff(x)
+ (D * flux_theta_s_y).diff(y)
+ (D * flux_theta_s_z).diff(z)
)
self.equations["compatibility_theta_s_x_y"] = D * (
flux_theta_s_x.diff(y) - flux_theta_s_y.diff(x)
)
self.equations["compatibility_theta_s_x_z"] = D * (
flux_theta_s_x.diff(z) - flux_theta_s_z.diff(x)
)
self.equations["compatibility_theta_s_y_z"] = D * (
flux_theta_s_y.diff(z) - flux_theta_s_z.diff(y)
)
class FluxIntegrateDiffusion(PDE):
name = "IntegrateDiffusion"
def __init__(self):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# Temperature
theta_s = Function("theta_s")(*input_variables)
flux_theta_s_x = Function("flux_theta_s_x")(*input_variables)
flux_theta_s_y = Function("flux_theta_s_y")(*input_variables)
flux_theta_s_z = Function("flux_theta_s_z")(*input_variables)
# set equations
self.equations = {}
self.equations["integrate_diffusion_theta_s_x"] = (
theta_s.diff(x) - flux_theta_s_x
)
self.equations["integrate_diffusion_theta_s_y"] = (
theta_s.diff(y) - flux_theta_s_y
)
self.equations["integrate_diffusion_theta_s_z"] = (
theta_s.diff(z) - flux_theta_s_z
)
class FluxGradNormal(PDE):
def __init__(self):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# variables to set the gradients (example Temperature)
flux_theta_s_x = Function("flux_theta_s_x")(*input_variables)
flux_theta_s_y = Function("flux_theta_s_y")(*input_variables)
flux_theta_s_z = Function("flux_theta_s_z")(*input_variables)
# set equations
self.equations = {}
self.equations["normal_gradient_flux_theta_s"] = (
normal_x * flux_theta_s_x
+ normal_y * flux_theta_s_y
+ normal_z * flux_theta_s_z
)
class FluxRobin(PDE):
def __init__(self, theta_f_conductivity, theta_s_conductivity, h):
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
normal_x = Symbol("normal_x")
normal_y = Symbol("normal_y")
normal_z = Symbol("normal_z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# variables to set the gradients (example Temperature)
theta_s = Function("theta_s")(*input_variables)
flux_theta_s_x = Function("flux_theta_s_x")(*input_variables)
flux_theta_s_y = Function("flux_theta_s_y")(*input_variables)
flux_theta_s_z = Function("flux_theta_s_z")(*input_variables)
theta_f = Function("theta_f_prev_step")(*input_variables)
# set equations
flux_theta_f = -theta_f_conductivity * (
normal_x * theta_f.diff(x)
+ normal_y * theta_f.diff(y)
+ normal_z * theta_f.diff(z)
)
ambient_theta_f = theta_f - (flux_theta_f / h)
flux_theta_s = -theta_s_conductivity * (
normal_x * flux_theta_s_x
+ normal_y * flux_theta_s_y
+ normal_z * flux_theta_s_z
)
self.equations = {}
self.equations["robin_theta_s"] = (
flux_theta_s - h * (theta_s - ambient_theta_f)
) / theta_s_conductivity
class Dirichlet(PDE):
def __init__(self, lhs="theta_s", rhs="theta_f"):
# save name for u
self.lhs = lhs
self.rhs = rhs
# coordinates
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# make input variables
input_variables = {"x": x, "y": y, "z": z}
# functions
rhs = Function(rhs)(*input_variables)
lhs = Function(lhs)(*input_variables)
# set equations
self.equations = {}
self.equations["dirichlet_" + self.rhs + "_" + self.lhs] = rhs - lhs
| modulus-sym-main | examples/limerock/limerock_hFTB/flux_diffusion.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
from torch import Tensor
import copy
import numpy as np
from sympy import Symbol, Eq, tanh, Or, And
from omegaconf import DictConfig, OmegaConf
import hydra
from hydra.utils import to_absolute_path
from typing import Dict
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import SequentialSolver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.models.arch import Arch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointVTKInferencer
from modulus.sym.utils.io import (
VTKUniformGrid,
)
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from modulus.sym.eq.pdes.advection_diffusion import AdvectionDiffusion
from modulus.sym.distributed.manager import DistributedManager
from limerock_properties import *
from flux_diffusion import (
FluxDiffusion,
FluxIntegrateDiffusion,
FluxGradNormal,
FluxRobin,
Dirichlet,
)
class hFTBArch(Arch):
def __init__(
self,
arch: Arch,
) -> None:
output_keys = arch.output_keys + [
Key(x.name + "_prev_step") for x in arch.output_keys
]
super().__init__(
input_keys=arch.input_keys,
output_keys=output_keys,
periodicity=arch.periodicity,
)
# set networks for current and prev time window
self.arch_prev_step = arch
self.arch = copy.deepcopy(arch)
for param, param_prev_step in zip(
self.arch.parameters(), self.arch_prev_step.parameters()
):
param_prev_step.requires_grad = False
def forward(self, in_vars: Dict[str, Tensor]) -> Dict[str, Tensor]:
y_prev_step = self.arch_prev_step.forward(in_vars)
y = self.arch.forward(in_vars)
for key, b in y_prev_step.items():
y[key + "_prev_step"] = b
return y
def move_network(self):
for param, param_prev_step in zip(
self.arch.parameters(), self.arch_prev_step.parameters()
):
param_prev_step.data = param.detach().clone().data
param_prev_step.requires_grad = False
@modulus.sym.main(config_path="conf", config_name="conf_thermal")
def run(cfg: ModulusConfig) -> None:
if DistributedManager().distributed:
print("Multi-GPU currently not supported for this example. Exiting.")
return
# make list of nodes to unroll graph on
ad = AdvectionDiffusion(
T="theta_f", rho=nd_fluid_density, D=nd_fluid_diffusivity, dim=3, time=False
)
dif = FluxDiffusion(D=nd_copper_diffusivity)
flow_grad_norm = GradNormal("theta_f", dim=3, time=False)
solid_grad_norm = FluxGradNormal()
integrate_flux_dif = FluxIntegrateDiffusion()
robin_flux = FluxRobin(
theta_f_conductivity=nd_fluid_conductivity,
theta_s_conductivity=nd_copper_conductivity,
h=500.0,
)
dirichlet = Dirichlet(lhs="theta_f", rhs="theta_s")
flow_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
)
f_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")], output_keys=[Key("theta_f")]
)
thermal_f_net = hFTBArch(f_net)
thermal_s_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")], output_keys=[Key("theta_s")]
)
flux_s_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[
Key("flux_theta_s_x"),
Key("flux_theta_s_y"),
Key("flux_theta_s_z"),
],
)
thermal_nodes = (
ad.make_nodes(detach_names=["u", "v", "w"])
+ dif.make_nodes()
+ flow_grad_norm.make_nodes()
+ solid_grad_norm.make_nodes()
+ integrate_flux_dif.make_nodes(
detach_names=["flux_theta_s_x", "flux_theta_s_y", "flux_theta_s_z"]
)
+ robin_flux.make_nodes(
detach_names=[
"theta_f_prev_step",
"theta_f_prev_step__x",
"theta_f_prev_step__y",
"theta_f_prev_step__z",
]
)
+ dirichlet.make_nodes(detach_names=["theta_s"])
+ [flow_net.make_node(name="flow_network", optimize=False, jit=cfg.jit)]
+ [
thermal_f_net.make_node(
name="thermal_fluid_network", optimize=True, jit=cfg.jit
)
]
+ [
thermal_s_net.make_node(
name="thermal_solid_network", optimize=True, jit=cfg.jit
)
]
+ [flux_s_net.make_node(name="flux_solid_network", optimize=True, jit=cfg.jit)]
)
# make domain for first cycle of hFTB
cycle_1_domain = Domain("cycle_1")
# add constraints to solver
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
import time as time
tic = time.time()
# inlet
inlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.inlet,
outvar={"theta_f": nd_inlet_temp},
batch_size=cfg.batch_size.inlet,
batch_per_epoch=50,
lambda_weighting={"theta_f": 1000.0},
)
cycle_1_domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.outlet,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.outlet,
lambda_weighting={"normal_gradient_theta_f": 1.0},
)
cycle_1_domain.add_constraint(outlet, "outlet")
# channel walls insulating
walls = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.geo,
outvar={"normal_gradient_theta_f": 0},
batch_size=cfg.batch_size.no_slip,
criteria=Or(
Or(
Eq(y, limerock.geo_bounds_lower[1]), Eq(z, limerock.geo_bounds_lower[2])
),
Or(
Eq(y, limerock.geo_bounds_upper[1]), Eq(z, limerock.geo_bounds_upper[2])
),
),
lambda_weighting={"normal_gradient_theta_f": 1.0},
)
cycle_1_domain.add_constraint(walls, name="ChannelWalls")
# flow interior low res away from heat sink
lr_interior_f = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=limerock.geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.lr_interior_f,
criteria=Or(
(x < limerock.heat_sink_bounds[0]), (x > limerock.heat_sink_bounds[1])
),
lambda_weighting={"advection_diffusion_theta_f": 1000.0},
)
cycle_1_domain.add_constraint(lr_interior_f, "lr_interior_f")
# flow interiror high res near heat sink
hr_interior_f = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=limerock.geo,
outvar={"advection_diffusion_theta_f": 0},
batch_size=cfg.batch_size.hr_interior_f,
lambda_weighting={"advection_diffusion_theta_f": 1000.0},
criteria=And(
(x > limerock.heat_sink_bounds[0]), (x < limerock.heat_sink_bounds[1])
),
)
cycle_1_domain.add_constraint(hr_interior_f, "hr_interior_f")
# fluid solid interface
interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.geo_solid,
outvar={"theta_f": 0.05},
batch_size=cfg.batch_size.interface,
criteria=z > limerock.geo_bounds_lower[2],
lambda_weighting={"theta_f": 100.0},
)
cycle_1_domain.add_constraint(interface, "interface")
# add inferencer data
vtk_obj = VTKUniformGrid(
bounds=[limerock.geo_bounds[x], limerock.geo_bounds[y], limerock.geo_bounds[z]],
npoints=[256, 128, 256],
export_map={"u": ["u", "v", "w"], "p": ["p"], "theta_f": ["theta_f"]},
)
def mask_fn(x, y, z):
sdf = limerock.geo.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"] < 0
grid_inferencer = PointVTKInferencer(
vtk_obj=vtk_obj,
nodes=thermal_nodes,
input_vtk_map={"x": "x", "y": "y", "z": "z"},
output_names=["u", "v", "w", "p", "theta_f"],
mask_fn=mask_fn,
mask_value=np.nan,
requires_grad=False,
batch_size=100000,
)
cycle_1_domain.add_inferencer(grid_inferencer, "grid_inferencer")
# make domain for all other cycles
cycle_n_domain = Domain("cycle_n")
# inlet
cycle_n_domain.add_constraint(inlet, "inlet")
# outlet
cycle_n_domain.add_constraint(outlet, "outlet")
# channel walls insulating
cycle_n_domain.add_constraint(walls, name="ChannelWalls")
# flow interior low res away from heat sink
cycle_n_domain.add_constraint(lr_interior_f, "lr_interior_f")
# flow interiror high res near heat sink
cycle_n_domain.add_constraint(hr_interior_f, "hr_interior_f")
# diffusion dictionaries
diff_outvar = {
"diffusion_theta_s": 0,
"compatibility_theta_s_x_y": 0,
"compatibility_theta_s_x_z": 0,
"compatibility_theta_s_y_z": 0,
"integrate_diffusion_theta_s_x": 0,
"integrate_diffusion_theta_s_y": 0,
"integrate_diffusion_theta_s_z": 0,
}
diff_lambda = {
"diffusion_theta_s": 1000000.0,
"compatibility_theta_s_x_y": 1.0,
"compatibility_theta_s_x_z": 1.0,
"compatibility_theta_s_y_z": 1.0,
"integrate_diffusion_theta_s_x": 1.0,
"integrate_diffusion_theta_s_y": 1.0,
"integrate_diffusion_theta_s_z": 1.0,
}
# solid interior
interior_s = PointwiseInteriorConstraint(
nodes=thermal_nodes,
geometry=limerock.geo_solid,
outvar=diff_outvar,
batch_size=cfg.batch_size.interior_s,
lambda_weighting=diff_lambda,
)
cycle_n_domain.add_constraint(interior_s, "interior_s")
# limerock base
sharpen_tanh = 60.0
source_func_xl = (tanh(sharpen_tanh * (x - source_origin[0])) + 1.0) / 2.0
source_func_xh = (
tanh(sharpen_tanh * ((source_origin[0] + source_dim[0]) - x)) + 1.0
) / 2.0
source_func_yl = (tanh(sharpen_tanh * (y - source_origin[1])) + 1.0) / 2.0
source_func_yh = (
tanh(sharpen_tanh * ((source_origin[1] + source_dim[1]) - y)) + 1.0
) / 2.0
gradient_normal = (
nd_source_term
* source_func_xl
* source_func_xh
* source_func_yl
* source_func_yh
)
base = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.geo_solid,
outvar={"normal_gradient_flux_theta_s": gradient_normal},
batch_size=cfg.batch_size.base,
criteria=Eq(z, limerock.geo_bounds_lower[2]),
lambda_weighting={"normal_gradient_flux_theta_s": 10.0},
)
cycle_n_domain.add_constraint(base, "base")
# fluid solid interface
interface = PointwiseBoundaryConstraint(
nodes=thermal_nodes,
geometry=limerock.geo_solid,
outvar={"dirichlet_theta_s_theta_f": 0, "robin_theta_s": 0},
batch_size=cfg.batch_size.interface,
criteria=z > limerock.geo_bounds_lower[2],
lambda_weighting={"dirichlet_theta_s_theta_f": 100.0, "robin_theta_s": 1.0},
)
cycle_n_domain.add_constraint(interface, "interface")
# add fluid inferencer data
cycle_n_domain.add_inferencer(grid_inferencer, "grid_inferencer")
# add solid inferencer data
vtk_obj = VTKUniformGrid(
bounds=[
limerock.geo_hr_bounds[x],
limerock.geo_hr_bounds[y],
limerock.geo_hr_bounds[z],
],
npoints=[128, 128, 512],
export_map={"theta_s": ["theta_s"]},
)
def mask_fn(x, y, z):
sdf = limerock.geo.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"] > 0
grid_inferencer = PointVTKInferencer(
vtk_obj=vtk_obj,
nodes=thermal_nodes,
input_vtk_map={"x": "x", "y": "y", "z": "z"},
output_names=["theta_s"],
mask_fn=mask_fn,
mask_value=np.nan,
requires_grad=False,
batch_size=100000,
)
cycle_n_domain.add_inferencer(grid_inferencer, "grid_inferencer_solid")
# peak temperature monitor
invar_temp = limerock.geo_solid.sample_boundary(
10000, criteria=Eq(z, limerock.geo_bounds_lower[2])
)
peak_temp_monitor = PointwiseMonitor(
invar_temp,
output_names=["theta_s"],
metrics={"peak_temp": lambda var: torch.max(var["theta_s"])},
nodes=thermal_nodes,
)
cycle_n_domain.add_monitor(peak_temp_monitor)
# make solver
slv = SequentialSolver(
cfg,
[(1, cycle_1_domain), (20, cycle_n_domain)],
custom_update_operation=thermal_f_net.move_network,
)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/limerock/limerock_hFTB/limerock_thermal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import Modulus library
from sympy import Symbol, Eq, tanh, Max
import numpy as np
import itertools
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.geometry import Parameterization, Parameter
class LimeRock(object):
def __init__(self):
# scale STL
self.scale = 5 / 0.3
self.translate = (0, 0, -0.055)
# set fins
self.nr_fins = 47
self.fin_gap = 0.0018
# make solids
self.copper = None
# parse file
print("parsing stl file...")
self._parse_file("../stl_files/limerock.stl")
print("finished parsing")
# inlet area
self.inlet_area = (self.geo_bounds_upper[1] - self.geo_bounds_lower[1]) * (
self.geo_bounds_upper[2] - self.geo_bounds_lower[2]
)
# geo
self.heat_sink_bounds = (-0.7, 0.7)
self.geo = self.channel - self.copper
self.geo_solid = self.copper
self.geo_bounds = {
Symbol("x"): (self.geo_bounds_lower[0], self.geo_bounds_upper[0]),
Symbol("y"): (self.geo_bounds_lower[1], self.geo_bounds_upper[1]),
Symbol("z"): (self.geo_bounds_lower[2], self.geo_bounds_upper[2]),
}
self.geo_hr_bounds = {
Symbol("x"): self.heat_sink_bounds,
Symbol("y"): (self.geo_bounds_lower[1], self.geo_bounds_upper[1]),
Symbol("z"): (self.geo_bounds_lower[2], self.geo_bounds_upper[2]),
}
# integral plane
x_pos = Parameter("x_pos")
self.integral_plane = Plane(
(x_pos, self.geo_bounds_lower[1], self.geo_bounds_lower[2]),
(x_pos, self.geo_bounds_upper[1], self.geo_bounds_upper[2]),
1,
parameterization=Parameterization({x_pos: self.heat_sink_bounds}),
)
def solid_names(self):
return list(self.solids.keys())
def _parse_file(self, filename):
# Read file
reader = open(filename)
sdf = 0
while True:
line = reader.readline()
if "solid" == line.split(" ")[0]:
solid_name = line.split(" ")[-1].rstrip()
bounds_lower, bounds_upper = self.read_solid(reader)
if solid_name == "opening.1":
self.inlet = Plane(bounds_lower, bounds_upper, -1)
self.geo_bounds_lower = bounds_lower
elif solid_name == "fan.1":
self.outlet = Plane(bounds_lower, bounds_upper, 1)
self.geo_bounds_upper = bounds_upper
elif solid_name == "FIN":
fin = Box(bounds_lower, bounds_upper)
fin = fin.repeat(
self.scale * self.fin_gap,
repeat_lower=(0, 0, 0),
repeat_higher=(0, 0, self.nr_fins - 1),
center=_center(bounds_lower, bounds_upper),
)
if self.copper is not None:
self.copper = self.copper + fin
else:
self.copper = fin
else:
solid = Box(bounds_lower, bounds_upper)
if self.copper is not None:
self.copper = self.copper + solid
else:
self.copper = solid
else:
break
self.channel = Channel(self.geo_bounds_lower, self.geo_bounds_upper)
def read_solid(self, reader):
# solid pieces
faces = []
while True:
line = reader.readline()
split_line = line.split(" ")
if len(split_line) == 0:
break
elif "endsolid" == split_line[0]:
break
elif "facet" == split_line[0]:
curve = {}
# read outer loop line
_ = reader.readline()
# read 3 vertices
a_0 = [float(x) for x in reader.readline().split(" ")[-3:]]
a_1 = [float(x) for x in reader.readline().split(" ")[-3:]]
a_2 = [float(x) for x in reader.readline().split(" ")[-3:]]
faces.append([a_0, a_1, a_2])
# read end loop/end facet
_ = reader.readline()
_ = reader.readline()
faces = np.array(faces)
bounds_lower = (
np.min(faces[..., 2]),
np.min(faces[..., 1]),
np.min(faces[..., 0]),
) # flip axis
bounds_upper = (
np.max(faces[..., 2]),
np.max(faces[..., 1]),
np.max(faces[..., 0]),
)
bounds_lower = tuple(
[self.scale * (x + t) for x, t in zip(bounds_lower, self.translate)]
)
bounds_upper = tuple(
[self.scale * (x + t) for x, t in zip(bounds_upper, self.translate)]
)
return bounds_lower, bounds_upper
def _center(bounds_lower, bounds_upper):
center_x = bounds_lower[0] + (bounds_upper[0] - bounds_lower[0]) / 2
center_y = bounds_lower[1] + (bounds_upper[1] - bounds_lower[1]) / 2
center_z = bounds_lower[2] + (bounds_upper[2] - bounds_lower[2]) / 2
return center_x, center_y, center_z
| modulus-sym-main | examples/limerock/limerock_hFTB/limerock_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sympy import Symbol, Eq, tanh, Or, And
from omegaconf import DictConfig, OmegaConf
import hydra
from hydra.utils import to_absolute_path
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointVTKInferencer
from modulus.sym.utils.io import (
VTKUniformGrid,
)
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from limerock_properties import *
@modulus.sym.main(config_path="conf", config_name="conf_flow")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ze = ZeroEquation(
nu=nd_fluid_viscosity / nd_fluid_density, dim=3, time=False, max_distance=0.479
)
ns = NavierStokes(nu=ze.equations["nu"], rho=nd_fluid_density, dim=3, time=False)
normal_dot_vel = NormalDotVec()
flow_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
)
flow_nodes = (
ns.make_nodes()
+ ze.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# make flow domain
flow_domain = Domain()
# add constraints to solver
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# inlet
def channel_sdf(x, y, z):
sdf = limerock.channel.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"]
inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.inlet,
outvar={"u": nd_inlet_velocity, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
lambda_weighting={"u": channel_sdf, "v": 1.0, "w": 1.0},
)
flow_domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
)
flow_domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
batch_per_epoch=15000,
)
flow_domain.add_constraint(no_slip, "no_slip")
# flow interior low res away from limerock
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.lr_interior,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": 3 * Symbol("sdf"),
"momentum_x": 3 * Symbol("sdf"),
"momentum_y": 3 * Symbol("sdf"),
"momentum_z": 3 * Symbol("sdf"),
},
criteria=Or(
(x < limerock.heat_sink_bounds[0]), (x > limerock.heat_sink_bounds[1])
),
batch_per_epoch=2000,
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interior high res near limerock
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.hr_interior,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": 3 * Symbol("sdf"),
"momentum_x": 3 * Symbol("sdf"),
"momentum_y": 3 * Symbol("sdf"),
"momentum_z": 3 * Symbol("sdf"),
},
criteria=And(
(x > limerock.heat_sink_bounds[0]), (x < limerock.heat_sink_bounds[1])
),
batch_per_epoch=2000,
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = limerock.geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.integral_plane,
outvar={"normal_dot_vel": nd_volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=integral_criteria,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
# add inferencer data
vtk_obj = VTKUniformGrid(
bounds=[limerock.geo_bounds[x], limerock.geo_bounds[y], limerock.geo_bounds[z]],
npoints=[256, 128, 256],
export_map={"u": ["u", "v", "w"], "p": ["p"]},
)
def mask_fn(x, y, z):
sdf = limerock.geo.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"] < 0
grid_inference = PointVTKInferencer(
vtk_obj=vtk_obj,
nodes=flow_nodes,
input_vtk_map={"x": "x", "y": "y", "z": "z"},
output_names=["u", "v", "w", "p"],
mask_fn=mask_fn,
mask_value=np.nan,
requires_grad=False,
batch_size=100000,
)
flow_domain.add_inferencer(grid_inference, "grid_inference")
# add monitor
# front pressure
plane_param_ranges = {Symbol("x_pos"): -0.7}
invar_pressure = limerock.integral_plane.sample_boundary(
5000,
parameterization=plane_param_ranges,
)
front_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(front_pressure_monitor)
# back pressure
plane_param_ranges = {Symbol("x_pos"): 0.7}
invar_pressure = limerock.integral_plane.sample_boundary(
5000,
parameterization=plane_param_ranges,
)
back_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(back_pressure_monitor)
# make solver
slv = Solver(cfg, flow_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/limerock/limerock_hFTB/limerock_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from limerock_geometry import LimeRock
# make limerock
limerock = LimeRock()
# Real Params
# fluid params
fluid_viscosity = 1.84e-05 # kg/m-s
fluid_density = 1.1614 # kg/m3
fluid_specific_heat = 1005 # J/kg-K
fluid_conductivity = 0.0261 # W/m-K
# copper params
copper_density = 8930 # kg/m3
copper_specific_heat = 385 # J/kg-K
copper_conductivity = 385 # W/m-K
# boundary params
length_scale = 0.0575 # m
inlet_velocity = 5.7 # m/s
inlet_velocity_normalized = 1.0
power = 120 # W
ambient_temperature = 61 # degree Celsius
# Nondimensionalization Params
# fluid params
nu = limerock.scale * fluid_viscosity / (fluid_density * inlet_velocity)
rho = 1.0
volumetric_flow = limerock.inlet_area * inlet_velocity_normalized
# heat params
D_solid = 0.10
D_fluid = 0.02
source_grad = 1.5
source_area = 0.25**2
source_origin = (-0.061667, -0.15833, limerock.geo_bounds_lower[2])
source_dim = (0.1285, 0.31667, 0)
| modulus-sym-main | examples/limerock/limerock_transfer_learning/limerock_properties.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import libraries
import numpy as np
import chaospy
# define parameter ranges
fin_front_top_cut_angle_ranges = (0.0, np.pi / 6.0)
fin_front_bottom_cut_angle_ranges = (0.0, np.pi / 6.0)
fin_back_top_cut_angle_ranges = (0.0, np.pi / 6.0)
fin_back_bottom_cut_angle_ranges = (0.0, np.pi / 6.0)
# generate samples
samples = chaospy.generate_samples(
order=30,
domain=np.array(
[
fin_front_top_cut_angle_ranges,
fin_front_bottom_cut_angle_ranges,
fin_back_top_cut_angle_ranges,
fin_back_bottom_cut_angle_ranges,
]
).T,
rule="halton",
)
samples = samples.T
np.random.shuffle(samples)
np.savetxt("samples.txt", samples)
| modulus-sym-main | examples/limerock/limerock_transfer_learning/sample_generator.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# import libraries
import numpy as np
import csv
import chaospy
# load data
samples = np.loadtxt("samples.txt")
num_samples = len(samples)
# read monitored values
y_vec = []
for i in range(num_samples):
front_pressure_dir = (
"./outputs/limerock_flow/tl_" + str(i) + "/monitors/front_pressure.csv"
)
back_pressure_dir = (
"./outputs/limerock_flow/tl_" + str(i) + "/monitors/back_pressure.csv"
)
with open(front_pressure_dir, "r", encoding="utf-8", errors="ignore") as scraped:
front_pressure = float(scraped.readlines()[-1].split(",")[1])
with open(back_pressure_dir, "r", encoding="utf-8", errors="ignore") as scraped:
back_pressure = float(scraped.readlines()[-1].split(",")[1])
pressure_drop = front_pressure - back_pressure
y_vec.append(pressure_drop)
y_vec = np.array(y_vec)
# Split data into training and validation
val_portion = 0.15
val_idx = np.random.choice(
np.arange(num_samples, dtype=int), int(val_portion * num_samples), replace=False
)
val_x, val_y = samples[val_idx], y_vec[val_idx]
train_x, train_y = np.delete(samples, val_idx, axis=0).T, np.delete(
y_vec, val_idx
).reshape(-1, 1)
# Construct the PCE
distribution = chaospy.J(
chaospy.Uniform(0.0, np.pi / 6),
chaospy.Uniform(0.0, np.pi / 6),
chaospy.Uniform(0.0, np.pi / 6),
chaospy.Uniform(0.0, np.pi / 6),
)
expansion = chaospy.generate_expansion(2, distribution)
poly = chaospy.fit_regression(expansion, train_x, train_y)
# PCE closed form
print("__________")
print("PCE closd form:")
print(poly)
print("__________")
# Validation
print("PCE evaluatins:")
for i in range(len(val_x)):
pred = poly(val_x[i, 0], val_x[i, 1], val_x[i, 2], val_x[i, 3])[0]
print("Sample:", val_x[i])
print("True val:", val_y[i])
print("Predicted val:", pred)
print("Relative error (%):", abs(pred - val_y[i]) / val_y[i] * 100)
print("__________")
| modulus-sym-main | examples/limerock/limerock_transfer_learning/limerock_pce_surrogate.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Symbol, Eq, tanh, Max
import numpy as np
import itertools
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.geometry import Parameterization, Parameter
fin_front_top_cut_angle = 0.0
fin_front_bottom_cut_angle = 0.0
fin_back_top_cut_angle = 0.0
fin_back_bottom_cut_angle = 0.0
"""
If running for transfer learning:
(1) Run sample_generator.py
(2) Initialize networks with trained checkpoints in the config file
(3) Change the configs for learning rate, decay rate, and max steps
(4) Uncomment the following lines
"""
# sample_id = 0 # choose the sample ID
# samples = np.loadtxt('./samples.txt')
# samples = samples[sample_id,:]
# fin_front_top_cut_angle = samples[0]
# fin_front_bottom_cut_angle = samples[1]
# fin_back_top_cut_angle = samples[2]
# fin_back_bottom_cut_angle = samples[3]
class LimeRock(object):
def __init__(self):
# scale STL
self.scale = 5 / 0.3
self.translate = (0, 0, -0.055)
# set fins
self.nr_fins = 47
self.fin_gap = 0.0018
self.fin_gap_stl = 0.0018
# make solids
self.copper = None
# parse file
print("parsing stl file...")
self._parse_file("../stl_files/limerock.stl")
print("finished parsing")
# inlet area
self.inlet_area = (self.geo_bounds_upper[1] - self.geo_bounds_lower[1]) * (
self.geo_bounds_upper[2] - self.geo_bounds_lower[2]
)
# geo
self.heat_sink_bounds = (-0.7, 0.7)
self.geo = self.channel - self.copper
self.geo_bounds = {
Symbol("x"): (self.geo_bounds_lower[0], self.geo_bounds_upper[0]),
Symbol("y"): (self.geo_bounds_lower[1], self.geo_bounds_upper[1]),
Symbol("z"): (self.geo_bounds_lower[2], self.geo_bounds_upper[2]),
}
self.geo_hr_bounds = {
Symbol("x"): self.heat_sink_bounds,
Symbol("y"): (self.geo_bounds_lower[1], self.geo_bounds_upper[1]),
Symbol("z"): (self.geo_bounds_lower[2], self.geo_bounds_upper[2]),
}
# integral plane
x_pos = Parameter("x_pos")
self.integral_plane = Plane(
(x_pos, self.geo_bounds_lower[1], self.geo_bounds_lower[2]),
(x_pos, self.geo_bounds_upper[1], self.geo_bounds_upper[2]),
1,
parameterization=Parameterization({x_pos: self.heat_sink_bounds}),
)
def solid_names(self):
return list(self.solids.keys())
def _parse_file(self, filename):
# Read file
reader = open(filename)
sdf = 0
while True:
line = reader.readline()
if "solid" == line.split(" ")[0]:
solid_name = line.split(" ")[-1].rstrip()
bounds_lower, bounds_upper = self.read_solid(reader)
if solid_name == "opening.1":
self.inlet = Plane(bounds_lower, bounds_upper, -1)
self.geo_bounds_lower = bounds_lower
elif solid_name == "fan.1":
self.outlet = Plane(bounds_lower, bounds_upper, 1)
self.geo_bounds_upper = bounds_upper
elif solid_name == "FIN":
center_stl = _center(
bounds_lower, bounds_upper
) # center of the fin in stl file
fin = Box(bounds_lower, bounds_upper)
fin = fin.repeat(
self.scale * self.fin_gap,
repeat_lower=(0, 0, 0),
repeat_higher=(0, 0, self.nr_fins - 1),
center=center_stl,
)
fin_top = (
center_stl[2]
+ (self.nr_fins - 1) * self.scale * self.fin_gap_stl
+ (bounds_upper[2] - bounds_lower[2]) / 2.0
) # z value of the top fin upper-bound
cut_box_back_bottom = Box(
(bounds_lower[0], bounds_lower[1], bounds_lower[2] - 1.0),
(
bounds_upper[0] + 1.0,
bounds_upper[1],
bounds_lower[2] - 3 * self.scale * self.fin_gap_stl,
),
)
cut_box_back_bottom = cut_box_back_bottom.rotate(
fin_back_bottom_cut_angle,
axis="y",
center=(
bounds_lower[0],
bounds_lower[1],
bounds_lower[2] - 3 * self.scale * self.fin_gap_stl,
),
)
cut_box_front_bottom = Box(
(bounds_lower[0] - 1.0, bounds_lower[1], bounds_lower[2] - 1.0),
(
bounds_upper[0],
bounds_upper[1],
bounds_lower[2] - 3 * self.scale * self.fin_gap_stl,
),
)
cut_box_front_bottom = cut_box_front_bottom.rotate(
-fin_front_bottom_cut_angle,
axis="y",
center=(
bounds_upper[0],
bounds_upper[1],
bounds_lower[2] - 3 * self.scale * self.fin_gap_stl,
),
)
cut_box_back_top = Box(
(
bounds_lower[0],
bounds_lower[1],
fin_top + 3 * self.scale * self.fin_gap_stl,
),
(bounds_upper[0] + 1.0, bounds_upper[1], fin_top + 1.0),
)
cut_box_back_top = cut_box_back_top.rotate(
-fin_back_top_cut_angle,
axis="y",
center=(
bounds_lower[0],
bounds_lower[1],
fin_top + 3 * self.scale * self.fin_gap_stl,
),
)
cut_box_front_top = Box(
(
bounds_lower[0] - 1.0,
bounds_lower[1],
fin_top + 3 * self.scale * self.fin_gap_stl,
),
(bounds_upper[0], bounds_upper[1], fin_top + 1.0),
)
cut_box_front_top = cut_box_front_top.rotate(
fin_front_top_cut_angle,
axis="y",
center=(
bounds_upper[0],
bounds_upper[1],
fin_top + 3 * self.scale * self.fin_gap_stl,
),
)
fin = (
fin
- cut_box_back_bottom
- cut_box_front_bottom
- cut_box_back_top
- cut_box_front_top
)
if self.copper is not None:
self.copper = self.copper + fin
else:
self.copper = fin
else:
solid = Box(bounds_lower, bounds_upper)
if self.copper is not None:
self.copper = self.copper + solid
else:
self.copper = solid
else:
break
self.channel = Channel(self.geo_bounds_lower, self.geo_bounds_upper)
def read_solid(self, reader):
# solid pieces
faces = []
while True:
line = reader.readline()
split_line = line.split(" ")
if len(split_line) == 0:
break
elif "endsolid" == split_line[0]:
break
elif "facet" == split_line[0]:
curve = {}
# read outer loop line
_ = reader.readline()
# read 3 vertices
a_0 = [float(x) for x in reader.readline().split(" ")[-3:]]
a_1 = [float(x) for x in reader.readline().split(" ")[-3:]]
a_2 = [float(x) for x in reader.readline().split(" ")[-3:]]
faces.append([a_0, a_1, a_2])
# read end loop/end facet
_ = reader.readline()
_ = reader.readline()
faces = np.array(faces)
bounds_lower = (
np.min(faces[..., 2]),
np.min(faces[..., 1]),
np.min(faces[..., 0]),
) # flip axis
bounds_upper = (
np.max(faces[..., 2]),
np.max(faces[..., 1]),
np.max(faces[..., 0]),
)
bounds_lower = tuple(
[self.scale * (x + t) for x, t in zip(bounds_lower, self.translate)]
)
bounds_upper = tuple(
[self.scale * (x + t) for x, t in zip(bounds_upper, self.translate)]
)
return bounds_lower, bounds_upper
def _center(bounds_lower, bounds_upper):
center_x = bounds_lower[0] + (bounds_upper[0] - bounds_lower[0]) / 2
center_y = bounds_lower[1] + (bounds_upper[1] - bounds_lower[1]) / 2
center_z = bounds_lower[2] + (bounds_upper[2] - bounds_lower[2]) / 2
return center_x, center_y, center_z
| modulus-sym-main | examples/limerock/limerock_transfer_learning/limerock_geometry.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sympy import Symbol, Eq, tanh, Or, And
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry.primitives_3d import Box, Channel, Plane
from modulus.sym.models.fourier_net import FourierNetArch
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
IntegralBoundaryConstraint,
)
from modulus.sym.domain.monitor import PointwiseMonitor
from modulus.sym.domain.inferencer import PointwiseInferencer
from modulus.sym.key import Key
from modulus.sym.node import Node
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.turbulence_zero_eq import ZeroEquation
from modulus.sym.eq.pdes.basic import NormalDotVec, GradNormal
from limerock_properties import *
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# make list of nodes to unroll graph on
ze = ZeroEquation(nu=nu, dim=3, time=False, max_distance=0.5)
ns = NavierStokes(nu=ze.equations["nu"], rho=rho, dim=3, time=False)
normal_dot_vel = NormalDotVec()
flow_net = FourierNetArch(
input_keys=[Key("x"), Key("y"), Key("z")],
output_keys=[Key("u"), Key("v"), Key("w"), Key("p")],
)
flow_nodes = (
ns.make_nodes()
+ ze.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
)
# make flow domain
flow_domain = Domain()
# add constraints to solver
x, y, z = Symbol("x"), Symbol("y"), Symbol("z")
# inlet
def channel_sdf(x, y, z):
sdf = limerock.channel.sdf({"x": x, "y": y, "z": z}, {})
return sdf["sdf"]
inlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.inlet,
outvar={"u": inlet_velocity_normalized, "v": 0, "w": 0},
batch_size=cfg.batch_size.inlet,
batch_per_epoch=5000,
lambda_weighting={"u": channel_sdf, "v": 1.0, "w": 1.0},
)
flow_domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.outlet,
outvar={"p": 0},
batch_size=cfg.batch_size.outlet,
batch_per_epoch=5000,
)
flow_domain.add_constraint(outlet, "outlet")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"u": 0, "v": 0, "w": 0},
batch_size=cfg.batch_size.no_slip,
batch_per_epoch=15000,
)
flow_domain.add_constraint(no_slip, "no_slip")
# flow interior low res away from limerock
lr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.lr_interior,
batch_per_epoch=5000,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": 3 * Symbol("sdf"),
"momentum_x": 3 * Symbol("sdf"),
"momentum_y": 3 * Symbol("sdf"),
"momentum_z": 3 * Symbol("sdf"),
},
criteria=Or(
(x < limerock.heat_sink_bounds[0]), (x > limerock.heat_sink_bounds[1])
),
)
flow_domain.add_constraint(lr_interior, "lr_interior")
# flow interior high res near limerock
hr_interior = PointwiseInteriorConstraint(
nodes=flow_nodes,
geometry=limerock.geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0, "momentum_z": 0},
batch_size=cfg.batch_size.hr_interior,
batch_per_epoch=5000,
compute_sdf_derivatives=True,
lambda_weighting={
"continuity": 3 * Symbol("sdf"),
"momentum_x": 3 * Symbol("sdf"),
"momentum_y": 3 * Symbol("sdf"),
"momentum_z": 3 * Symbol("sdf"),
},
criteria=And(
(x > limerock.heat_sink_bounds[0]), (x < limerock.heat_sink_bounds[1])
),
)
flow_domain.add_constraint(hr_interior, "hr_interior")
# integral continuity
def integral_criteria(invar, params):
sdf = limerock.geo.sdf(invar, params)
return np.greater(sdf["sdf"], 0)
integral_continuity = IntegralBoundaryConstraint(
nodes=flow_nodes,
geometry=limerock.integral_plane,
outvar={"normal_dot_vel": volumetric_flow},
batch_size=cfg.batch_size.num_integral_continuity,
integral_batch_size=cfg.batch_size.integral_continuity,
lambda_weighting={"normal_dot_vel": 0.1},
criteria=integral_criteria,
)
flow_domain.add_constraint(integral_continuity, "integral_continuity")
print("finished generating points")
"""# add inferencer data
invar_flow_numpy = limerock.geo.sample_interior(10000, bounds=limerock.geo_bounds)
point_cloud_inference = PointwiseInferencer(invar_flow_numpy, ["u", "v", "w", "p"], flow_nodes)
flow_domain.add_inferencer(point_cloud_inference, "inf_data")"""
# add monitor
# front pressure
plane_param_ranges = {Symbol("x_pos"): -0.7}
invar_pressure = limerock.integral_plane.sample_boundary(
5000,
parameterization=plane_param_ranges,
)
front_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={"front_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(front_pressure_monitor)
# back pressure
plane_param_ranges = {Symbol("x_pos"): 0.7}
invar_pressure = limerock.integral_plane.sample_boundary(
5000,
parameterization=plane_param_ranges,
)
back_pressure_monitor = PointwiseMonitor(
invar_pressure,
output_names=["p"],
metrics={"back_pressure": lambda var: torch.mean(var["p"])},
nodes=flow_nodes,
)
flow_domain.add_monitor(back_pressure_monitor)
# make solver
slv = Solver(cfg, flow_domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/limerock/limerock_transfer_learning/limerock_flow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
import numpy as np
from sympy import Symbol, Eq
import modulus.sym
from modulus.sym.hydra import to_absolute_path, instantiate_arch, ModulusConfig
from modulus.sym.utils.io import csv_to_dict
from modulus.sym.solver import Solver
from modulus.sym.domain import Domain
from modulus.sym.geometry import Bounds
from modulus.sym.geometry.primitives_2d import Line, Circle, Channel2D
from modulus.sym.eq.pdes.navier_stokes import NavierStokes
from modulus.sym.eq.pdes.basic import NormalDotVec
from modulus.sym.domain.constraint import (
PointwiseBoundaryConstraint,
PointwiseInteriorConstraint,
)
from modulus.sym.domain.validator import PointwiseValidator
from modulus.sym.key import Key
from modulus.sym import quantity
from modulus.sym.eq.non_dim import NonDimensionalizer, Scaler
@modulus.sym.main(config_path="conf", config_name="config")
def run(cfg: ModulusConfig) -> None:
# physical quantities
nu = quantity(0.02, "kg/(m*s)")
rho = quantity(1.0, "kg/m^3")
inlet_u = quantity(1.0, "m/s")
inlet_v = quantity(0.0, "m/s")
noslip_u = quantity(0.0, "m/s")
noslip_v = quantity(0.0, "m/s")
outlet_p = quantity(0.0, "pa")
velocity_scale = inlet_u
density_scale = rho
length_scale = quantity(20, "m")
nd = NonDimensionalizer(
length_scale=length_scale,
time_scale=length_scale / velocity_scale,
mass_scale=density_scale * (length_scale**3),
)
# geometry
channel_length = (quantity(-10, "m"), quantity(30, "m"))
channel_width = (quantity(-10, "m"), quantity(10, "m"))
cylinder_center = (quantity(0, "m"), quantity(0, "m"))
cylinder_radius = quantity(0.5, "m")
channel_length_nd = tuple(map(lambda x: nd.ndim(x), channel_length))
channel_width_nd = tuple(map(lambda x: nd.ndim(x), channel_width))
cylinder_center_nd = tuple(map(lambda x: nd.ndim(x), cylinder_center))
cylinder_radius_nd = nd.ndim(cylinder_radius)
channel = Channel2D(
(channel_length_nd[0], channel_width_nd[0]),
(channel_length_nd[1], channel_width_nd[1]),
)
inlet = Line(
(channel_length_nd[0], channel_width_nd[0]),
(channel_length_nd[0], channel_width_nd[1]),
normal=1,
)
outlet = Line(
(channel_length_nd[1], channel_width_nd[0]),
(channel_length_nd[1], channel_width_nd[1]),
normal=1,
)
wall_top = Line(
(channel_length_nd[1], channel_width_nd[0]),
(channel_length_nd[1], channel_width_nd[1]),
normal=1,
)
cylinder = Circle(cylinder_center_nd, cylinder_radius_nd)
volume_geo = channel - cylinder
# make list of nodes to unroll graph on
ns = NavierStokes(nu=nd.ndim(nu), rho=nd.ndim(rho), dim=2, time=False)
normal_dot_vel = NormalDotVec(["u", "v"])
flow_net = instantiate_arch(
input_keys=[Key("x"), Key("y")],
output_keys=[Key("u"), Key("v"), Key("p")],
cfg=cfg.arch.fully_connected,
)
nodes = (
ns.make_nodes()
+ normal_dot_vel.make_nodes()
+ [flow_net.make_node(name="flow_network")]
+ Scaler(
["u", "v", "p"],
["u_scaled", "v_scaled", "p_scaled"],
["m/s", "m/s", "m^2/s^2"],
nd,
).make_node()
)
# make domain
domain = Domain()
x, y = Symbol("x"), Symbol("y")
# inlet
inlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=inlet,
outvar={"u": nd.ndim(inlet_u), "v": nd.ndim(inlet_v)},
batch_size=cfg.batch_size.inlet,
)
domain.add_constraint(inlet, "inlet")
# outlet
outlet = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=outlet,
outvar={"p": nd.ndim(outlet_p)},
batch_size=cfg.batch_size.outlet,
)
domain.add_constraint(outlet, "outlet")
# full slip (channel walls)
walls = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=channel,
outvar={"u": nd.ndim(inlet_u), "v": nd.ndim(inlet_v)},
batch_size=cfg.batch_size.walls,
)
domain.add_constraint(walls, "walls")
# no slip
no_slip = PointwiseBoundaryConstraint(
nodes=nodes,
geometry=cylinder,
outvar={"u": nd.ndim(noslip_u), "v": nd.ndim(noslip_v)},
batch_size=cfg.batch_size.no_slip,
)
domain.add_constraint(no_slip, "no_slip")
# interior contraints
interior = PointwiseInteriorConstraint(
nodes=nodes,
geometry=volume_geo,
outvar={"continuity": 0, "momentum_x": 0, "momentum_y": 0},
batch_size=cfg.batch_size.interior,
bounds=Bounds({x: channel_length_nd, y: channel_width_nd}),
)
domain.add_constraint(interior, "interior")
# Loading validation data from CSV
file_path = "openfoam/cylinder_nu_0.020.csv"
if os.path.exists(to_absolute_path(file_path)):
mapping = {
"Points:0": "x",
"Points:1": "y",
"U:0": "u_scaled",
"U:1": "v_scaled",
"p": "p_scaled",
}
openfoam_var = csv_to_dict(to_absolute_path(file_path), mapping)
openfoam_invar_numpy = {
key: value / length_scale.magnitude
for key, value in openfoam_var.items()
if key in ["x", "y"]
}
openfoam_outvar_numpy = {
key: value
for key, value in openfoam_var.items()
if key in ["u_scaled", "v_scaled", "p_scaled"]
}
openfoam_validator = PointwiseValidator(
nodes=nodes, invar=openfoam_invar_numpy, true_outvar=openfoam_outvar_numpy
)
domain.add_validator(openfoam_validator)
else:
warnings.warn(
f"Directory {file_path} does not exist. Will skip adding validators. Please download the additional files from NGC https://catalog.ngc.nvidia.com/orgs/nvidia/teams/modulus/resources/modulus_sym_examples_supplemental_materials"
)
# make solver
slv = Solver(cfg, domain)
# start solver
slv.solve()
if __name__ == "__main__":
run()
| modulus-sym-main | examples/cylinder/cylinder_2d.py |
#!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract strings from command files and externalize into translation files.
Expects to be run from the root directory of the repository.
Usage:
extract.py pkg/kubectl/cmd/apply.go
"""
import fileinput
import sys
import re
class MatchHandler(object):
""" Simple holder for a regular expression and a function
to run if that regular expression matches a line.
The function should expect (re.match, file, linenumber) as parameters
"""
def __init__(self, regex, replace_fn):
self.regex = re.compile(regex)
self.replace_fn = replace_fn
def short_replace(match, file, line_number):
"""Replace a Short: ... cobra command description with an internationalization
"""
sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2)))
SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace)
def import_replace(match, file, line_number):
"""Add an extra import for the i18n library.
Doesn't try to be smart and detect if it's already present, assumes a
gofmt round wil fix things.
"""
sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1)))
IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace)
def string_flag_replace(match, file, line_number):
"""Replace a cmd.Flags().String("...", "", "...") with an internationalization
"""
sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2)))
STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace)
def long_string_replace(match, file, line_number):
return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3))
LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace)
EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace)
def replace(filename, matchers, multiline_matchers):
"""Given a file and a set of matchers, run those matchers
across the file and replace it with the results.
"""
# Run all the matchers
line_number = 0
for line in fileinput.input(filename, inplace=True):
line_number += 1
matched = False
for matcher in matchers:
match = matcher.regex.match(line)
if match:
matcher.replace_fn(match, filename, line_number)
matched = True
break
if not matched:
sys.stdout.write(line)
sys.stdout.flush()
with open(filename, 'r') as datafile:
content = datafile.read()
for matcher in multiline_matchers:
match = matcher.regex.search(content)
while match:
rep = matcher.replace_fn(match, filename, 0)
# Escape back references in the replacement string
# (And escape for Python)
# (And escape for regex)
rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep)
content = matcher.regex.sub(rep, content, 1)
match = matcher.regex.search(content)
sys.stdout.write(content)
# gofmt the file again
from subprocess import call
call(["goimports", "-w", filename])
replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH])
| k8s-driver-manager-master | vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Instantiate the TAO-API docker container for developers."""
import argparse
from distutils.version import LooseVersion
import json
import os
import subprocess
import sys
ROOT_DIR = os.getenv("NV_TAO_API_TOP", os.getcwd())
with open(os.path.join(ROOT_DIR, "docker/manifest.json"), "r") as m_file:
docker_config = json.load(m_file)
DOCKER_REGISTRY = docker_config["registry"]
DOCKER_REPOSITORY = docker_config["repository"]
DOCKER_DIGEST = docker_config["digest"]
DOCKER_COMMAND = "docker"
HOME_PATH = os.path.expanduser("~")
MOUNTS_PATH = os.path.join(HOME_PATH, ".tao_mounts.json")
def get_docker_mounts_from_file(mounts_file=MOUNTS_PATH):
"""Check for docker mounts in ~/.tao_mounts.json."""
if not os.path.exists(mounts_file):
return []
with open(mounts_file, 'r') as mfile:
data = json.load(mfile)
assert "Mounts" in list(data.keys()), "Invalid json file. Requires Mounts key."
return data["Mounts"]
def format_mounts(mount_points):
"""Format mount points to docker recognizable commands."""
formatted_mounts = []
# Traverse through mount points and add format them for the docker command.
for mount_point in mount_points:
assert "source" in list(mount_point.keys()), "destination" in list(mount_point.keys())
mount = "{}:{}".format(mount_point["source"], mount_point["destination"])
formatted_mounts.append(mount)
return formatted_mounts
def check_image_exists(docker_image):
"""Check if the image exists locally."""
check_command = '{} images | grep "\\<{}\\>" >/dev/null 2>&1'.format(DOCKER_COMMAND, docker_image)
rc = subprocess.call(check_command, stdout=sys.stderr, shell=True)
return rc == 0
def pull_base_container(docker_image):
"""Pull the default base container."""
pull_command = "{} pull {}@{}".format(DOCKER_COMMAND, docker_image, DOCKER_DIGEST)
rc = subprocess.call(pull_command, stdout=sys.stderr, shell=True)
return rc == 0
def get_formatted_mounts(mount_file):
"""Simple function to get default mount points."""
default_mounts = get_docker_mounts_from_file(mount_file)
return format_mounts(default_mounts)
def check_mounts(formatted_mounts):
"""Check the formatted mount commands."""
assert type(formatted_mounts) == list
for mounts in formatted_mounts:
source_path = mounts.split(":")[0]
if not os.path.exists(source_path):
raise ValueError("Path doesn't exist: {}".format(source_path))
return True
def get_docker_gpus_prefix(gpus):
"""Get the docker command gpu's prefix."""
docker_version = (
subprocess.check_output(
["docker", "version", "--format={{ .Server.APIVersion }}"]
)
.strip()
.decode()
)
if LooseVersion(docker_version) > LooseVersion("1.40"):
# You are using the latest version of docker using
# --gpus instead of the nvidia runtime.
gpu_string = "--gpus "
if gpus == "all":
gpu_string += "all"
else:
gpu_string += "\'\"device={}\"\'".format(gpus)
else:
# Stick to the older version of getting the gpu's using runtime=nvidia
gpu_string = "--runtime=nvidia -e NVIDIA_DRIVER_CAPABILITIES=all "
if gpus != "none":
gpu_string += "-e NVIDIA_VISIBLE_DEVICES={}".format(gpus)
return gpu_string
def instantiate_dev_docker(gpus, mount_file,
mount_cli_list,
env_var_list,
tag, command, ulimit=None,
shm_size="16G", run_as_user=False):
"""Instiate the docker container."""
docker_image = "{}/{}@{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, DOCKER_DIGEST)
if tag is not None:
docker_image = "{}/{}:{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY, tag)
# Invoking the nvidia docker.
gpu_string = get_docker_gpus_prefix(gpus)
# Prefix for the run command.
run_command = "{} run -it --rm".format(DOCKER_COMMAND)
# get default mount points.
formatted_mounts = get_formatted_mounts(MOUNTS_PATH)
# get mounts from cli mount file.
formatted_mounts += get_formatted_mounts(mount_file)
if mount_cli_list is not None:
formatted_mounts.extend(mount_cli_list)
assert check_mounts(formatted_mounts), "Mounts don't exists, Please make sure the paths all exist."
mount_string = "-v {}:/tao-api ".format(os.getenv("NV_TAO_API_TOP", os.getcwd()))
# Defining env variables.
env_variables = "-e PYTHONPATH={}:$PYTHONPATH ".format("/tao-api/api")
for env in env_var_list:
if "=" not in env:
print(f"invalid env variable definition. skipping this {env}")
continue
env_variables += "-e {} ".format(env)
for path in formatted_mounts:
mount_string += "-v {} ".format(path)
# Setting shared memory.
shm_option = "--shm-size {}".format(shm_size)
# Setting ulimits for host
ulimit_options = ""
if ulimit is not None:
for param in ulimit:
ulimit_options += "--ulimit {} ".format(param)
user_option = ""
if run_as_user:
user_option = "--user {}:{}".format(os.getuid(), os.getgid())
final_command = "{} {} {} {} {} {} {} {} {}".format(
run_command, gpu_string,
mount_string, env_variables,
shm_option, ulimit_options, user_option,
docker_image, " ".join(command)
)
return subprocess.check_call(final_command, stdout=sys.stderr, shell=True)
def parse_cli_args(args=None):
"""Parse run container command line."""
parser = argparse.ArgumentParser(prog="tao_api", description="Tool to run the API container.", add_help=True)
parser.add_argument(
"--gpus", default="all", type=str, help="Comma separated GPU indices to be exposed to the docker."
)
parser.add_argument("--volume", action="append", type=str, default=[], help="Volumes to bind.")
parser.add_argument("--env", action="append", type=str, default=[], help="Environment variables to bind.")
parser.add_argument("--mounts_file", help="Path to the mounts file.", default="", type=str)
parser.add_argument("--shm_size", help="Shared memory size for docker", default="16G", type=str)
parser.add_argument("--run_as_user", help="Flag to run as user", action="store_true", default=False)
parser.add_argument("--tag", help="The tag value for the local dev docker.", default=None, type=str)
parser.add_argument("--ulimit", action='append', help="Docker ulimits for the host machine." )
args = vars(parser.parse_args(args))
return args
def main(cl_args=None):
"""Start docker container."""
index = cl_args.index("--")
# Split args to the tao docker wrapper and the command to be run inside the docker.
tao_api_args = cl_args[:index]
command_args = cl_args[index + 1:]
# parse command line args.
args = parse_cli_args(tao_api_args)
docker_image = "{}/{}".format(DOCKER_REGISTRY, DOCKER_REPOSITORY)
if args["tag"] is not None:
docker_image = "{}:{}".format(docker_image, args["tag"])
if not check_image_exists(docker_image):
assert pull_base_container(docker_image), "The base container doesn't exist locally and " "the pull failed."
try:
instantiate_dev_docker(
args["gpus"], args["mounts_file"],
args["volume"], args["env"],
args["tag"], command_args,
args["ulimit"], args["shm_size"],
args["run_as_user"]
)
except subprocess.CalledProcessError:
# Do nothing - the errors are printed in entrypoint launch.
pass
if __name__ == "__main__":
main(sys.argv[1:])
| tao_front_end_services-main | runner/tao_api.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launcher ."""
"""TAO-Client SDK version"""
MAJOR = "5"
MINOR = "0.0"
PATCH = "1"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "../build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r', encoding="utf-8") as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao-client"
__description__ = "NVIDIA's package for using REST-API via TAO-Client."
__keywords__ = "nvidia, tao, api"
__contact_names__ = "Varun Praveen"
__contact_emails__ = "[email protected]"
__license__ = "NVIDIA Proprietary Software"
| tao_front_end_services-main | cli/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tao-toolkit API module"""
| tao_front_end_services-main | cli/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script to build the TAO Toolkit client package."""
import os
import setuptools
import sys
CLI_SOURCE_PATH = os.getcwd()
def get_version_details():
"""Simple function to get packages for setup.py."""
# Get current __version__.
version_locals = {}
with open('version.py', 'r', encoding="utf-8") as version_file:
exec(version_file.read(), {}, version_locals) # pylint: disable=W0122
return version_locals
def get_requirements(package_root):
"""Simple function to get packages."""
with open(os.path.join(package_root, "requirements-pip.txt"), 'r', encoding="utf-8") as req_file:
requirements = [r.replace('\n', '') for r in req_file.readlines()]
return requirements
def find_packages(package_name):
"""List of packages.
Args:
package_name (str): Name of the package.
Returns:
packages (list): List of packages.
"""
packages = setuptools.find_packages(package_name)
packages = [f"{package_name}.{f}" for f in packages]
packages.append(package_name)
return packages
def main(args=sys.argv[1:]):
"""Main wrapper to run setup.py"""
# Get package related information.
version_locals = get_version_details()
install_requirements = get_requirements(CLI_SOURCE_PATH)
print(f'Building wheel with version number {version_locals["__version__"]}')
PACKAGE_LIST = [
"tao_cli"
]
setuptools_packages = []
for package_name in PACKAGE_LIST:
setuptools_packages.extend(find_packages(package_name))
# TODO: Modify script entry points
setuptools.setup(
name=version_locals["__package_name__"],
version=version_locals['__version__'],
description=version_locals["__description__"],
author=version_locals["__contact_names__"],
author_email=version_locals["__contact_emails__"],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Intended Audience :: Developers',
# Indicate what your project relates to
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Environment :: Console',
'License :: Other/Proprietary License',
f'Programming Language :: Python :: {sys.version_info.major}',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords=version_locals["__keywords__"],
license="NVIDIA Proprietary Software",
packages=setuptools_packages,
package_data={
'': ['*.pyc', "*.yaml", "*.so"]
},
install_requires=install_requirements,
zip_safe=False,
entry_points={
'console_scripts': [
'tao-client=tao_cli.tao:cli',
]
}
)
if __name__ == "__main__":
main()
| tao_front_end_services-main | cli/setup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defining enums for dataset and model formats and types"""
import enum
class dataset_format(str, enum.Enum):
"""Class defining dataset formats in enum"""
default = "default"
kitti = "kitti"
lprnet = "lprnet"
coco = "coco"
raw = "raw"
coco_raw = "coco_raw"
custom = "custom"
unet = "unet"
classification_pyt = "classification_pyt"
class dataset_type(str, enum.Enum):
"""Class defining dataset types in enum"""
semantic_segmentation = "semantic_segmentation"
image_classification = "image_classification"
object_detection = "object_detection"
character_recognition = "character_recognition"
instance_segmentation = "instance_segmentation"
bpnet = "bpnet"
fpenet = "fpenet"
action_recognition = "action_recognition"
ml_recog = "ml_recog"
ocdnet = "ocdnet"
ocrnet = "ocrnet"
optical_inspection = "optical_inspection"
pointpillars = "pointpillars"
pose_classification = "pose_classification"
re_identification = "re_identification"
class network_type(str, enum.Enum):
"""Class defining network types in enum"""
# TF1 CV networks
detectnet_v2 = "detectnet_v2"
dssd = "dssd"
efficientdet_tf1 = "efficientdet_tf1"
lprnet = "lprnet"
unet = "unet"
multitask_classification = "multitask_classification"
classification_tf1 = "classification_tf1"
mask_rcnn = "mask_rcnn"
ssd = "ssd"
retinanet = "retinanet"
faster_rcnn = "faster_rcnn"
yolo_v3 = "yolo_v3"
yolo_v4 = "yolo_v4"
yolo_v4_tiny = "yolo_v4_tiny"
# TF1 DRIVEIX networks
bpnet = "bpnet"
fpenet = "fpenet"
# TF2 CV networks
classification_tf2 = "classification_tf2"
efficientdet_tf2 = "efficientdet_tf2"
# PYTORCH CV networks
action_recognition = "action_recognition"
classification_pyt = "classification_pyt"
deformable_detr = "deformable_detr"
dino = "dino"
mal = "mal"
ml_recog = "ml_recog"
ocdnet = "ocdnet"
ocrnet = "ocrnet"
optical_inspection = "optical_inspection"
pointpillars = "pointpillars"
pose_classification = "pose_classification"
segformer = "segformer"
re_identification = "re_identification"
# PYTORCH TTS
spectro_gen = "spectro_gen"
vocoder = "vocoder"
# Data Services
annotations = "annotations"
analytics = "analytics"
auto_label = "auto_label"
augmentation = "augmentation"
| tao_front_end_services-main | cli/tao_cli/constants.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Add click network groups to the cli command"""
import click
from tao_cli.login import login
# TF1 CV networks
from tao_cli.networks.detectnet_v2 import detectnet_v2
from tao_cli.networks.dssd import dssd
from tao_cli.networks.efficientdet_tf1 import efficientdet_tf1
from tao_cli.networks.lprnet import lprnet
from tao_cli.networks.unet import unet
from tao_cli.networks.multi_task_classification import multitask_classification
from tao_cli.networks.multi_class_classification_tf1 import classification_tf1
from tao_cli.networks.mask_rcnn import mask_rcnn
from tao_cli.networks.ssd import ssd
from tao_cli.networks.retinanet import retinanet
from tao_cli.networks.faster_rcnn import faster_rcnn
from tao_cli.networks.yolo_v3 import yolo_v3
from tao_cli.networks.yolo_v4 import yolo_v4
from tao_cli.networks.yolo_v4_tiny import yolo_v4_tiny
# TF1 DRIVEIX networks
from tao_cli.networks.bpnet import bpnet
from tao_cli.networks.fpenet import fpenet
# TF2 CV networks
from tao_cli.networks.multi_class_classification_tf2 import classification_tf2
from tao_cli.networks.efficientdet_tf2 import efficientdet_tf2
# PYTORCH CV networks
from tao_cli.networks.action_recognition import action_recognition
from tao_cli.networks.multi_class_classification_pyt import classification_pyt
from tao_cli.networks.mal import mal
from tao_cli.networks.ml_recog import ml_recog
from tao_cli.networks.ocdnet import ocdnet
from tao_cli.networks.ocrnet import ocrnet
from tao_cli.networks.optical_inspection import optical_inspection
from tao_cli.networks.pointpillars import pointpillars
from tao_cli.networks.pose_classification import pose_classification
from tao_cli.networks.re_identification import re_identification
from tao_cli.networks.segformer import segformer
from tao_cli.networks.deformable_detr import deformable_detr
from tao_cli.networks.dino import dino
# Data Services
from tao_cli.networks.annotations import annotations
from tao_cli.networks.analytics import analytics
from tao_cli.networks.auto_label import auto_label
from tao_cli.networks.augmentation import augmentation
@click.group()
@click.version_option(package_name='nvidia-tao-client')
@click.pass_context
def cli(ctx):
"""Create base nvidia-tao-client group"""
pass
cli.add_command(login)
# TF1 CV networks
cli.add_command(detectnet_v2)
cli.add_command(dssd)
cli.add_command(efficientdet_tf2)
cli.add_command(lprnet)
cli.add_command(unet)
cli.add_command(multitask_classification)
cli.add_command(classification_tf1)
cli.add_command(mask_rcnn)
cli.add_command(ssd)
cli.add_command(retinanet)
cli.add_command(faster_rcnn)
cli.add_command(yolo_v3)
cli.add_command(yolo_v4)
cli.add_command(yolo_v4_tiny)
# TF1 DRIVEIX networks
cli.add_command(bpnet)
cli.add_command(fpenet)
# TF2 CV networks
cli.add_command(classification_tf2)
cli.add_command(efficientdet_tf1)
# PYTORCH CV networks
cli.add_command(action_recognition)
cli.add_command(classification_pyt)
cli.add_command(mal)
cli.add_command(ml_recog)
cli.add_command(ocdnet)
cli.add_command(ocrnet)
cli.add_command(optical_inspection)
cli.add_command(pointpillars)
cli.add_command(pose_classification)
cli.add_command(re_identification)
cli.add_command(segformer)
cli.add_command(deformable_detr)
cli.add_command(dino)
# Data Services
cli.add_command(annotations)
cli.add_command(analytics)
cli.add_command(auto_label)
cli.add_command(augmentation)
if __name__ == '__main__':
cli()
| tao_front_end_services-main | cli/tao_cli/tao.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Client CLI module"""
| tao_front_end_services-main | cli/tao_cli/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Login modules"""
import requests
import click
import json
import os
@click.command()
@click.option('--ngc-api-key', prompt='ngc_api_key', help='Your NGC API KEY.', required=True)
def login(ngc_api_key):
"""User login method"""
base_url = os.getenv('BASE_URL', 'http://localhost/api/v1')
endpoint = base_url + "/login/" + ngc_api_key
response = requests.get(endpoint)
click.echo(json.dumps(response.json()))
| tao_front_end_services-main | cli/tao_cli/login.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO-Client base actions module"""
import json
import requests
import os
class Actions:
"""Base class which defines API functions for general actions"""
def __init__(self):
"""Initialize the actions base class"""
self.user = os.getenv('USER', 'nobody')
self.base_url = os.getenv('BASE_URL', 'http://localhost/api/v1/') + f"/user/{self.user}"
self.token = os.getenv('TOKEN', 'invalid')
self.headers = {"Authorization": f"Bearer {self.token}"}
self.sub_action = self.__class__.__name__.lower()
def get_action_spec(self, id, action):
"""Return spec dictionary for the action passed"""
endpoint = self.base_url + f"/{self.sub_action}/{id}/specs/{action}/schema"
response = requests.get(endpoint, headers=self.headers)
data = response.json()["default"]
return data
def get_automl_defaults(self, id, action):
"""Return automl parameters enabled for a network"""
endpoint = self.base_url + f"/{self.sub_action}/{id}/specs/{action}/schema"
response = requests.get(endpoint, headers=self.headers)
data = response.json()["automl_default_parameters"]
return data
def run_action(self, id, job, action):
"""Submit post request for an action"""
data = json.dumps({"job": job, "actions": action})
endpoint = self.base_url + f"/{self.sub_action}/{id}/job"
response = requests.post(endpoint, data=data, headers=self.headers)
job_id = response.json()[0]
return job_id
def model_job_cancel(self, id, job):
"""Pause a running job"""
endpoint = self.base_url + f"/{self.sub_action}/{id}/job/{job}/cancel"
requests.post(endpoint, headers=self.headers)
def model_job_resume(self, id, job):
"""Resume a paused job"""
endpoint = self.base_url + f"/{self.sub_action}/{id}/job/{job}/resume"
requests.post(endpoint, headers=self.headers)
| tao_front_end_services-main | cli/tao_cli/cli_actions/actions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO client cli actions module"""
| tao_front_end_services-main | cli/tao_cli/cli_actions/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO-Client model module"""
import json
import requests
from tao_cli.cli_actions.actions import Actions
class Model(Actions):
"""Class which defines API functions for model specific actions"""
# def __init__(self):
# """Intialize Model class"""
# super().__init__()
def model_create(self, network_arch, encryption_key):
"""Create a model and return the id"""
data = json.dumps({"network_arch": network_arch, "encryption_key": encryption_key})
endpoint = self.base_url + "/model"
response = requests.post(endpoint, data=data, headers=self.headers)
id = response.json()["id"]
return id
| tao_front_end_services-main | cli/tao_cli/cli_actions/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO-Client dataset module"""
import json
import requests
from tao_cli.cli_actions.actions import Actions
class Dataset(Actions):
"""Class which defines API functions for dataset specific actions"""
# def __init__(self):
# """Intialize Dataset class"""
# super().__init__()
def dataset_create(self, dataset_type, dataset_format):
"""Create a dataset and return the id"""
data = json.dumps({"type": dataset_type, "format": dataset_format})
endpoint = self.base_url + "/dataset"
response = requests.post(endpoint, data=data, headers=self.headers)
id = response.json()["id"]
return id
| tao_front_end_services-main | cli/tao_cli/cli_actions/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-class classification tf1 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def classification_tf1():
"""Create Multi-class classification tf1 model click group"""
pass
@classification_tf1.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@classification_tf1.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@classification_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/multi_class_classification_tf1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet tf1 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def efficientdet_tf1():
"""Create EfficientDet tf1 model click group"""
pass
@efficientdet_tf1.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@efficientdet_tf1.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@efficientdet_tf1.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/efficientdet_tf1.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Multi-Class Classification-Pyt
#
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def classification_pyt():
pass
@classification_pyt.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@classification_pyt.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@classification_pyt.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/multi_class_classification_pyt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def ocdnet():
"""Create DetectNet V2 model click group"""
pass
@ocdnet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@ocdnet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@ocdnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/ocdnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LPRNET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def lprnet():
"""Create LPRNET model click group"""
pass
@lprnet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@lprnet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@lprnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/lprnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Data Services - Auto Labeling
#
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def auto_label():
pass
@auto_label.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@auto_label.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
def model_create(network_arch):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, "")
click.echo(f"{id}")
@auto_label.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_action_defaults(id):
"""Return default action spec"""
data = model_obj.get_action_spec(id, "generate")
click.echo(json.dumps(data, indent=2))
@auto_label.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def execute_action(id):
"""Run action"""
job_id = model_obj.run_action(id, None, ["generate"])
click.echo(f"{job_id}")
| tao_front_end_services-main | cli/tao_cli/networks/auto_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO v3 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def yolo_v3():
"""Create YOLO v3 model click group"""
pass
@yolo_v3.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v3.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v3.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/yolo_v3.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SSD tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def ssd():
"""Create SSD model click group"""
pass
@ssd.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@ssd.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@ssd.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@ssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/ssd.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unet tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def unet():
"""Create Unet model click group"""
pass
@unet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@unet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@unet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/unet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Data Services - Data Augmentation
#
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def augmentation():
pass
@augmentation.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@augmentation.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
def model_create(network_arch):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, "")
click.echo(f"{id}")
@augmentation.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_action_defaults(id):
"""Return default action spec"""
data = model_obj.get_action_spec(id, "generate")
click.echo(json.dumps(data, indent=2))
@augmentation.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def execute_action(id):
"""Run action"""
job_id = model_obj.run_action(id, None, ["generate"])
click.echo(f"{job_id}")
| tao_front_end_services-main | cli/tao_cli/networks/augmentation.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BPNet tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def bpnet():
"""Create BPNet model click group"""
pass
@bpnet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@bpnet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_trtexec_defaults(id):
"""Return default trtexec spec"""
data = model_obj.get_action_spec(id, "trtexec")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_trtexec(id, job):
"""Run trtexec action"""
job_id = model_obj.run_action(id, job, ["trtexec"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@bpnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/bpnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FasterRCNN tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def faster_rcnn():
"""Create FasterRCNN model click group"""
pass
@faster_rcnn.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@faster_rcnn.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@faster_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/faster_rcnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def ml_recog():
"""Create DetectNet V2 model click group"""
pass
@ml_recog.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@ml_recog.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@ml_recog.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/ml_recog.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO-Client individual network modules"""
| tao_front_end_services-main | cli/tao_cli/networks/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Action Recognition tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def action_recognition():
"""Create model click group"""
pass
@action_recognition.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@action_recognition.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@action_recognition.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/action_recognition.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-task classification tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def multitask_classification():
"""Create Multi-task classification model click group"""
pass
@multitask_classification.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@multitask_classification.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@multitask_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/multi_task_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose Classification tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def pose_classification():
"""Create Pose Classification model click group"""
pass
@pose_classification.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@pose_classification.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@pose_classification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/pose_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deformable-DETR tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def deformable_detr():
"""Create Deformable-DETR model click group"""
pass
@deformable_detr.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@deformable_detr.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@deformable_detr.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/deformable_detr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FPENET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def fpenet():
"""Create FPENET model click group"""
pass
@fpenet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@fpenet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@fpenet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/fpenet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MAL tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def mal():
"""Create MAL model click group"""
pass
@mal.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@mal.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@mal.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/mal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DSSD tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def dssd():
"""Create DSSD model click group"""
pass
@dssd.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@dssd.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@dssd.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@dssd.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/dssd.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectNet V2 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def detectnet_v2():
"""Create DetectNet V2 model click group"""
pass
@detectnet_v2.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@detectnet_v2.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@detectnet_v2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/detectnet_v2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pointpillars tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def pointpillars():
"""Create Pointpillars model click group"""
pass
@pointpillars.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@pointpillars.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@pointpillars.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/pointpillars.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MaskRCNN tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def mask_rcnn():
"""Create MaskRCNN model click group"""
pass
@mask_rcnn.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@mask_rcnn.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@mask_rcnn.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/mask_rcnn.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def ocrnet():
"""Create DetectNet V2 model click group"""
pass
@ocrnet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@ocrnet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@ocrnet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/ocrnet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Data Services - Analytics
#
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def analytics():
pass
@analytics.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@analytics.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
def model_create(network_arch):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, "")
click.echo(f"{id}")
@analytics.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--action', prompt='action', help='The action to be performed.', required=True)
def model_action_defaults(id, action):
"""Return default action spec"""
data = model_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@analytics.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--action', prompt='action', help='The action to be performed.', required=True)
def execute_action(id, action):
"""Run action"""
job_id = model_obj.run_action(id, None, [action])
click.echo(f"{job_id}")
| tao_front_end_services-main | cli/tao_cli/networks/analytics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def retinanet():
"""Create RetinaNet model click group"""
pass
@retinanet.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@retinanet.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@retinanet.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/retinanet.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO v4 tiny tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def yolo_v4_tiny():
"""Create YOLO v4 tiny model click group"""
pass
@yolo_v4_tiny.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v4_tiny.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v4_tiny.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/yolo_v4_tiny.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLO v4 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def yolo_v4():
"""Create YOLO v4 model click group"""
pass
@yolo_v4.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v4.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@yolo_v4.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/yolo_v4.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OCRNET tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def optical_inspection():
"""Create DetectNet V2 model click group"""
pass
@optical_inspection.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@optical_inspection.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@optical_inspection.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/optical_inspection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dino tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def dino():
"""Create Dino model click group"""
pass
@dino.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@dino.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@dino.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/dino.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Data Services - Data Format Conversion
#
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def annotations():
pass
@annotations.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@annotations.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
def model_create(network_arch):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, "")
click.echo(f"{id}")
@annotations.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_action_defaults(id):
"""Return default action spec"""
data = model_obj.get_action_spec(id, "convert")
click.echo(json.dumps(data, indent=2))
@annotations.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def execute_action(id):
"""Run action"""
job_id = model_obj.run_action(id, None, ["convert"])
click.echo(f"{job_id}")
| tao_front_end_services-main | cli/tao_cli/networks/annotations.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-class classification tf2 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def classification_tf2():
"""Create Multi-class classification tf2 model click group"""
pass
@classification_tf2.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@classification_tf2.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@classification_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/multi_class_classification_tf2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""EfficientDet tf2 tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def efficientdet_tf2():
"""Create EfficientDet tf2 model click group"""
pass
@efficientdet_tf2.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert_defaults(id, action):
"""Return default dataset convert spec"""
data = dataset_obj.get_action_spec(id, action)
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The dataset ID.', required=True)
@click.option('--action', prompt='action', help='The dataset convert action.', required=True)
def dataset_convert(id, action):
"""Run dataset_convert action"""
job_id = dataset_obj.run_action(id=id, job=None, action=[action])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def dataset_job_cancel(id, job):
"""Pause/Cancel a running dataset job"""
job = dataset_obj.dataset_job_cancel(id, job)
click.echo(f"{job}")
@efficientdet_tf2.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or gen_trt_engine job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_prune_defaults(id):
"""Return default prune spec"""
data = model_obj.get_action_spec(id, "prune")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train job ID.', required=False, default=None)
def model_prune(id, job):
"""Run prune action"""
job_id = model_obj.run_action(id, job, ["prune"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_retrain_defaults(id):
"""Return default retrain spec"""
data = model_obj.get_action_spec(id, "retrain")
click.echo(json.dumps(data, indent=2))
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The prune job ID.', required=False, default=None)
def model_retrain(id, job):
"""Run retrain action"""
job_id = model_obj.run_action(id, job, ["retrain"])
click.echo(f"{job_id}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@efficientdet_tf2.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/efficientdet_tf2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Segformer tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def segformer():
"""Create Segformer model click group"""
pass
@segformer.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@segformer.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_gen_trt_engine_defaults(id):
"""Return default gen_trt_engine spec"""
data = model_obj.get_action_spec(id, "gen_trt_engine")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The export job ID.', required=False, default=None)
def model_gen_trt_engine(id, job):
"""Run gen_trt_engine action"""
job_id = model_obj.run_action(id, job, ["gen_trt_engine"])
click.echo(f"{job_id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@segformer.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/segformer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Re-Identification tao-client modules"""
import click
import json
from tao_cli.cli_actions.dataset import Dataset
from tao_cli.cli_actions.model import Model
from tao_cli.constants import dataset_format, dataset_type, network_type
dataset_obj = Dataset()
model_obj = Model()
@click.group()
def re_identification():
"""Create Re-Identification model click group"""
pass
@re_identification.command()
@click.option('--dataset_type', prompt='dataset_type', type=click.Choice(dataset_type), help='The dataset type.', required=True)
@click.option('--dataset_format', prompt='dataset_format', type=click.Choice(dataset_format), help='The dataset format.', required=True)
def dataset_create(dataset_type, dataset_format):
"""Create a dataset and return the id"""
id = dataset_obj.dataset_create(dataset_type, dataset_format)
click.echo(f"{id}")
@re_identification.command()
@click.option('--network_arch', prompt='network_arch', type=click.Choice(network_type), help='Network architecture.', required=True)
@click.option('--encryption_key', prompt='encryption_key', help='Encryption_key.', required=True)
def model_create(network_arch, encryption_key):
"""Create a model and return the id"""
id = model_obj.model_create(network_arch, encryption_key)
click.echo(f"{id}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_train_defaults(id):
"""Return default train spec"""
data = model_obj.get_action_spec(id, "train")
click.echo(json.dumps(data, indent=2))
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_automl_defaults(id):
"""Return default automl parameters"""
data = model_obj.get_automl_defaults(id, "train")
click.echo(json.dumps(data, indent=2))
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The dataset convert job ID.', required=False, default=None)
def model_train(id, job):
"""Run train action"""
job_id = model_obj.run_action(id, job, ["train"])
click.echo(f"{job_id}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_evaluate_defaults(id):
"""Return default evaluate spec"""
data = model_obj.get_action_spec(id, "evaluate")
click.echo(json.dumps(data, indent=2))
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_evaluate(id, job):
"""Run evaluate action"""
job_id = model_obj.run_action(id, job, ["evaluate"])
click.echo(f"{job_id}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_export_defaults(id):
"""Return default export spec"""
data = model_obj.get_action_spec(id, "export")
click.echo(json.dumps(data, indent=2))
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune or retrain job ID.', required=False, default=None)
def model_export(id, job):
"""Run export action"""
job_id = model_obj.run_action(id, job, ["export"])
click.echo(f"{job_id}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
def model_inference_defaults(id):
"""Return default inference spec"""
data = model_obj.get_action_spec(id, "inference")
click.echo(json.dumps(data, indent=2))
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', help='The train, prune, retrain, export or convert job ID.', required=False, default=None)
def model_inference(id, job):
"""Run inference action"""
job_id = model_obj.run_action(id, job, ["inference"])
click.echo(f"{job_id}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_cancel(id, job):
"""Pause a running job"""
model_obj.model_job_cancel(id, job)
click.echo(f"{job}")
@re_identification.command()
@click.option('--id', prompt='id', help='The model ID.', required=True)
@click.option('--job', prompt='job', help='The job ID.', required=True)
def model_job_resume(id, job):
"""Resume a paused job"""
model_obj.model_job_resume(id, job)
click.echo(f"{job}")
| tao_front_end_services-main | cli/tao_cli/networks/re_identification.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Download metadata info for all ptm models supported"""
import os
import csv
import copy
import json
import uuid
import subprocess
import datetime
from handlers.utilities import read_network_config
def __get_existing_models(rootdir):
existing_models = []
for subdir in os.listdir(rootdir):
with open(rootdir + '/' + subdir + '/metadata.json', 'r', encoding='utf-8') as infile:
existing_models.append(json.load(infile))
return existing_models
def __model_exists(models, ngc_path):
return bool(next(filter(lambda x: x.get('ngc_path', None) == ngc_path, models), None))
def __get_pretrained_models_from_ngc():
ngc_models = []
cached_commands = {}
with open('pretrained_models.csv', 'r', encoding='utf-8') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
model = dict(row)
command = f"ngc registry model list {model['ngc_path']} --format_type json"
print(command)
cached_command = cached_commands.get(command)
if cached_command is None:
model_details = json.loads(subprocess.run(['/bin/bash', '-c', command], stdout=subprocess.PIPE, check=False).stdout.decode('utf-8'))
assert model_details
cached_commands[command] = model_details = model_details[0]
else:
model_details = cached_command
metadata = {}
metadata['public'] = True
metadata['read_only'] = True
metadata['network_arch'] = model['network_arch']
metadata['dataset_type'] = read_network_config(metadata["network_arch"])["api_params"]["dataset_type"]
metadata['actions'] = read_network_config(metadata["network_arch"])["api_params"]["actions"]
metadata['name'] = model['displayName']
metadata['description'] = model_details.get('description', 'TAO Pretrained Model')
metadata['logo'] = 'https://www.nvidia.com'
metadata['ptm'] = []
metadata['train_datasets'] = []
metadata['eval_dataset'] = None
metadata['calibration_dataset'] = None
metadata['inference_dataset'] = None
metadata['version'] = model_details.get('versionId', '')
metadata['created_on'] = metadata['last_modified'] = model_details.get('createdDate', datetime.datetime.now().isoformat())
metadata['ngc_path'] = model['ngc_path']
metadata['additional_id_info'] = None
ngc_models.append(metadata.copy())
return ngc_models
def __create_model(rootdir, metadata):
metadata['id'] = str(uuid.uuid4())
ptm_metadatas = [metadata]
if metadata["network_arch"] == "lprnet":
ptm_metadatas = []
for model_type in ("us", "ch"):
pc_metadata = copy.deepcopy(metadata)
pc_metadata['id'] = str(uuid.uuid4())
pc_metadata['additional_id_info'] = model_type
ptm_metadatas.append(pc_metadata)
if metadata["network_arch"] == "action_recognition" and metadata["ngc_path"] == "nvidia/tao/actionrecognitionnet:trainable_v1.0":
ptm_metadatas = []
for model_type in ("3d", "2d"):
ac_metadata = copy.deepcopy(metadata)
ac_metadata['id'] = str(uuid.uuid4())
ac_metadata['additional_id_info'] = model_type
ptm_metadatas.append(ac_metadata)
if metadata["network_arch"] == "action_recognition" and metadata["ngc_path"] == "nvidia/tao/actionrecognitionnet:trainable_v2.0":
ptm_metadatas = []
for platform in ("a100", "xavier"):
for model_type in ("3d", "2d"):
ac_metadata = copy.deepcopy(metadata)
ac_metadata['id'] = str(uuid.uuid4())
ac_metadata['additional_id_info'] = platform + "," + model_type
ptm_metadatas.append(ac_metadata)
for ptm_metadata in ptm_metadatas:
ptm_id = ptm_metadata['id']
path = rootdir + '/' + ptm_id
if not os.path.exists(path):
os.makedirs(path)
with open(path + '/metadata.json', 'w', encoding='utf-8') as outfile:
json.dump(ptm_metadata, outfile, indent=2, sort_keys=False)
def sync(path='/shared'):
"""Downloads metadata info for ngc hosted ptm models"""
admin_uuid = uuid.UUID(int=0)
rootdir = path + '/users/' + str(admin_uuid) + '/models'
if not os.path.exists(rootdir):
os.makedirs(rootdir)
existing_models = __get_existing_models(rootdir)
pretrained_models = __get_pretrained_models_from_ngc()
for ptm in pretrained_models:
if not __model_exists(existing_models, ptm.get('ngc_path', '')):
__create_model(rootdir, ptm)
if __name__ == '__main__':
sync('shared')
| tao_front_end_services-main | api/pretrained_models.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML main handler"""
from automl.controller import Controller
from automl.bayesian import Bayesian
from automl.hyperband import HyperBand
from automl.params import generate_hyperparams_to_search
from handlers.utilities import JobContext
import ast
import argparse
def automl_start(root, network, jc, resume, automl_algorithm, automl_max_recommendations, automl_delete_intermediate_ckpt, automl_R, automl_nu, metric, epoch_multiplier, automl_add_hyperparameters, automl_remove_hyperparameters):
"""Starts the automl controller"""
parameters = generate_hyperparams_to_search(jc.network, automl_add_hyperparameters, automl_remove_hyperparameters, "/".join(root.split("/")[0:-2]))
if resume:
if automl_algorithm.lower() in ("hyperband", "h"):
brain = HyperBand.load_state(root=root, parameters=parameters, R=int(automl_R), nu=int(automl_nu), network=network, epoch_multiplier=int(epoch_multiplier))
elif automl_algorithm.lower() in ("bayesian", "b"):
brain = Bayesian.load_state(root, parameters)
controller = Controller.load_state(root, network, brain, jc, automl_max_recommendations, automl_delete_intermediate_ckpt, metric, automl_algorithm.lower())
controller.start()
else:
if automl_algorithm.lower() in ("hyperband", "h"):
brain = HyperBand(root=root, parameters=parameters, R=int(automl_R), nu=int(automl_nu), network=network, epoch_multiplier=int(epoch_multiplier))
elif automl_algorithm.lower() in ("bayesian", "b"):
brain = Bayesian(root, parameters)
controller = Controller(root, network, brain, jc, automl_max_recommendations, automl_delete_intermediate_ckpt, metric, automl_algorithm.lower())
controller.start()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='AutoML controller', description='Run AutoML.')
parser.add_argument(
'--root',
type=str,
)
parser.add_argument(
'--automl_job_id',
type=str,
)
parser.add_argument(
'--network',
type=str,
)
parser.add_argument(
'--model_id',
type=str,
)
parser.add_argument(
'--resume',
type=str,
)
parser.add_argument(
'--automl_algorithm',
type=str,
)
parser.add_argument(
'--automl_max_recommendations',
type=str,
)
parser.add_argument(
'--automl_delete_intermediate_ckpt',
type=str,
)
parser.add_argument(
'--automl_R',
type=str,
)
parser.add_argument(
'--automl_nu',
type=str,
)
parser.add_argument(
'--metric',
type=str,
)
parser.add_argument(
'--epoch_multiplier',
type=str,
)
parser.add_argument(
'--automl_add_hyperparameters',
type=str,
)
parser.add_argument(
'--automl_remove_hyperparameters',
type=str,
)
args = parser.parse_args()
root = args.root
automl_job_id = args.automl_job_id
network = args.network
handler_id = args.model_id
jc = JobContext(automl_job_id, None, network, "train", handler_id)
resume = args.resume == "True"
automl_algorithm = args.automl_algorithm
automl_max_recommendations = args.automl_max_recommendations
automl_delete_intermediate_ckpt = args.automl_delete_intermediate_ckpt
automl_R = args.automl_R
automl_nu = args.automl_nu
metric = args.metric
epoch_multiplier = args.epoch_multiplier
automl_add_hyperparameters = ast.literal_eval(args.automl_add_hyperparameters)
automl_remove_hyperparameters = ast.literal_eval(args.automl_remove_hyperparameters)
automl_start(
root=root,
network=network,
jc=jc,
resume=resume,
automl_algorithm=automl_algorithm,
automl_max_recommendations=automl_max_recommendations,
automl_delete_intermediate_ckpt=automl_delete_intermediate_ckpt,
automl_R=automl_R,
automl_nu=automl_nu,
metric=metric,
epoch_multiplier=epoch_multiplier,
automl_add_hyperparameters=automl_add_hyperparameters,
automl_remove_hyperparameters=automl_remove_hyperparameters)
| tao_front_end_services-main | api/automl_start.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API modules defining schemas and endpoints"""
import sys
import json
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
from flask import Flask, request, jsonify, make_response, render_template, send_from_directory
from marshmallow import Schema, fields
from marshmallow_enum import EnumField, Enum
from filter_utils import filtering, pagination
from auth_utils import credentials, authentication, access_control
from health_utils import health_check
from handlers.app_handler import AppHandler as app_handler
from handlers.utilities import validate_uuid
from job_utils.workflow import Workflow
flask_plugin = FlaskPlugin()
marshmallow_plugin = MarshmallowPlugin()
#
# Create an APISpec
#
spec = APISpec(
title='TAO Toolkit API',
version='v5.0.0',
openapi_version='3.0.3',
info={"description": 'TAO Toolkit API document'},
tags=[
{"name": 'DATASET', "description": 'Endpoints related to Dataset'},
{"name": 'MODEL', "description": 'Endpoints related to Model'}
],
plugins=[flask_plugin, marshmallow_plugin],
)
#
# Enum stuff for APISpecs
#
def enum_to_properties(self, field, **kwargs):
"""
Add an OpenAPI extension for marshmallow_enum.EnumField instances
"""
if isinstance(field, EnumField):
return {'type': 'string', 'enum': [m.name for m in field.enum]}
return {}
marshmallow_plugin.converter.add_attribute_function(enum_to_properties)
#
# Global schemas and enums
#
class DatasetUploadSchema(Schema):
"""Class defining dataset upload schema"""
message = fields.Str(allow_none=True)
class ErrorRspSchema(Schema):
"""Class defining error response schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
error_desc = fields.Str()
error_code = fields.Int()
class JobStatusEnum(Enum):
"""Class defining job status enum"""
Done = 'Done'
Running = 'Running'
Error = 'Error'
Pending = "Pending"
#
# Flask app
#
app = Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
#
# JobResultSchema
#
class DetailedStatusSchema(Schema):
"""Class defining Status schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
date = fields.Str()
time = fields.Str()
message = fields.Str()
status = fields.Str()
class GraphSchema(Schema):
"""Class defining Graph schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
metric = fields.Str(allow_none=True)
x_min = fields.Int(allow_none=True)
x_max = fields.Int(allow_none=True)
y_min = fields.Float(allow_none=True)
y_max = fields.Float(allow_none=True)
values = fields.Dict(keys=fields.Int(allow_none=True), values=fields.Float(allow_none=True))
units = fields.Str(allow_none=True)
class CategoryWiseSchema(Schema):
"""Class defining CategoryWise schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
category = fields.Str()
value = fields.Float(allow_none=True)
class CategorySchema(Schema):
"""Class defining Category schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
metric = fields.Str()
category_wise_values = fields.List(fields.Nested(CategoryWiseSchema, allow_none=True))
class KPISchema(Schema):
"""Class defining KPI schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
metric = fields.Str(allow_none=True)
values = fields.Dict(keys=fields.Int(allow_none=True), values=fields.Float(allow_none=True))
class AutoMLResultsSchema(Schema):
"""Class defining AutoML results schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
metric = fields.Str(allow_none=True)
value = fields.Float(allow_none=True)
class StatsSchema(Schema):
"""Class defining results stats schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
metric = fields.Str(allow_none=True)
value = fields.Str(allow_none=True)
class JobResultSchema(Schema):
"""Class defining job results schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
detailed_status = fields.Nested(DetailedStatusSchema, allow_none=True)
graphical = fields.List(fields.Nested(GraphSchema, allow_none=True))
categorical = fields.List(fields.Nested(CategorySchema, allow_none=True))
kpi = fields.List(fields.Nested(KPISchema, allow_none=True))
automl_result = fields.List(fields.Nested(AutoMLResultsSchema, allow_none=True))
stats = fields.List(fields.Nested(StatsSchema, allow_none=True))
epoch = fields.Int(allow_none=True)
max_epoch = fields.Int(allow_none=True)
time_per_epoch = fields.Str(allow_none=True)
time_per_iter = fields.Str(allow_none=True)
cur_iter = fields.Int(allow_none=True)
eta = fields.Str(allow_none=True)
#
# DATASET API
#
class DatasetActions(Schema):
"""Class defining dataset actions schema"""
job = fields.UUID(allow_none=True)
actions = fields.List(fields.Str())
class DatasetTypeEnum(Enum):
"""Class defining dataset type enum"""
object_detection = 'object_detection'
semantic_segmentation = 'semantic_segmentation'
image_classification = 'image_classification'
instance_segmentation = 'instance_segmentation'
character_recognition = 'character_recognition'
bpnet = 'bpnet'
fpenet = 'fpenet'
action_recognition = 'action_recognition'
pointpillars = 'pointpillars'
pose_classification = 'pose_classification'
ml_recog = 'ml_recog'
ocdnet = 'ocdnet'
ocrnet = 'ocrnet'
optical_inspection = 'optical_inspection'
re_identification = 're_identification'
class DatasetFormatEnum(Enum):
"""Class defining dataset format enum"""
kitti = 'kitti'
pascal_voc = 'pascal_voc'
raw = 'raw'
coco_raw = 'coco_raw'
unet = 'unet'
coco = 'coco'
lprnet = 'lprnet'
default = 'default'
custom = 'custom'
classification_pyt = 'classification_pyt'
class DatasetReqSchema(Schema):
"""Class defining dataset request schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
name = fields.Str()
description = fields.Str()
version = fields.Str()
logo = fields.URL()
type = EnumField(DatasetTypeEnum)
format = EnumField(DatasetFormatEnum)
class DatasetJobResultCategoriesSchema(Schema):
"""Class defining dataset job result categories schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
category = fields.Str()
count = fields.Int()
class DatasetJobResultTotalSchema(Schema):
"""Class defining dataset job result total schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
images = fields.Int()
labels = fields.Int()
class DatasetJobSchema(Schema):
"""Class defining dataset job result total schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
id = fields.UUID()
parent_id = fields.UUID(allow_none=True)
created_on = fields.DateTime()
last_modified = fields.DateTime()
action = fields.Str()
status = EnumField(JobStatusEnum)
result = fields.Nested(JobResultSchema)
class DatasetRspSchema(Schema):
"""Class defining dataset response schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
id = fields.UUID()
created_on = fields.DateTime()
last_modified = fields.DateTime()
name = fields.Str()
description = fields.Str()
version = fields.Str()
logo = fields.URL(allow_none=True)
type = EnumField(DatasetTypeEnum)
format = EnumField(DatasetFormatEnum)
actions = fields.List(fields.Str())
jobs = fields.List(fields.Nested(DatasetJobSchema))
class DatasetListRspSchema(Schema):
"""Class defining dataset list response schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
datasets = fields.List(fields.Nested(DatasetRspSchema))
class DatasetJobListSchema(Schema):
"""Class defining dataset list schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
jobs = fields.List(fields.Nested(DatasetJobSchema))
@app.route('/api/v1/user/<user_id>/dataset', methods=['GET'])
def dataset_list(user_id):
"""List Datasets.
---
get:
tags:
- DATASET
summary: List Datasets
description: Returns the list of Datasets
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: skip
in: query
description: Optional skip for pagination
required: false
schema:
type: integer
- name: size
in: query
description: Optional size for pagination
required: false
schema:
type: integer
- name: sort
in: query
description: Optional sort
required: false
schema:
type: string
enum: ["date-descending", "date-ascending", "name-descending", "name-ascending" ]
- name: name
in: query
description: Optional name filter
required: false
schema:
type: string
- name: type
in: query
description: Optional type filter
required: false
schema:
type: string
enum: [ "object_detection", "semantic_segmentation", "image_classification" ]
responses:
200:
description: Returned list of Datasets
content:
application/json:
schema:
type: array
items: DatasetRspSchema
"""
message = validate_uuid(user_id=user_id)
if message:
return make_response(jsonify(message), 400)
datasets = app_handler.list_datasets(user_id)
filtered_datasets = filtering.apply(request.args, datasets)
paginated_datasets = pagination.apply(request.args, filtered_datasets)
pagination_total = len(filtered_datasets)
metadata = {"datasets": paginated_datasets}
schema = DatasetListRspSchema()
response = make_response(jsonify(schema.dump(schema.load(metadata))['datasets']))
response.headers['X-Pagination-Total'] = str(pagination_total)
return response
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>', methods=['GET'])
def dataset_retrieve(user_id, dataset_id):
"""Retrieve Dataset.
---
get:
tags:
- DATASET
summary: Retrieve Dataset
description: Returns the Dataset
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset to return
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned Dataset
content:
application/json:
schema: DatasetRspSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.retrieve_dataset(user_id, dataset_id)
# Get schema
schema = None
if response.code == 200:
schema = DatasetRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>', methods=['DELETE'])
def dataset_delete(user_id, dataset_id):
"""Delete Dataset.
---
delete:
tags:
- DATASET
summary: Delete Dataset
description: Cancels all related running jobs and returns the deleted Dataset
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset to delete
required: true
schema:
type: string
format: uuid
responses:
200:
description: Deleted Dataset
content:
application/json:
schema: DatasetRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.delete_dataset(user_id, dataset_id)
# Get schema
schema = None
if response.code == 200:
schema = DatasetRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset', methods=['POST'])
def dataset_create(user_id):
"""Create new Dataset.
---
post:
tags:
- DATASET
summary: Create new Dataset
description: Returns the new Dataset
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: DatasetReqSchema
description: Initial metadata for new Dataset (type and format required)
required: true
responses:
201:
description: Retuned the new Dataset
content:
application/json:
schema: DatasetRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.create_dataset(user_id, request_dict)
# Get schema
schema = None
if response.code == 201:
schema = DatasetRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>', methods=['PUT'])
def dataset_update(user_id, dataset_id):
"""Update Dataset.
---
put:
tags:
- DATASET
summary: Update Dataset
description: Returns the updated Dataset
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset to update
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: DatasetReqSchema
description: Updated metadata for Dataset
required: true
responses:
200:
description: Returned the updated Dataset
content:
application/json:
schema: DatasetRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.update_dataset(user_id, dataset_id, request_dict)
# Get schema
schema = None
if response.code == 200:
schema = DatasetRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>', methods=['PATCH'])
def dataset_partial_update(user_id, dataset_id):
"""Partial update Dataset.
---
patch:
tags:
- DATASET
summary: Partial update Dataset
description: Returns the updated Dataset
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset to update
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: DatasetReqSchema
description: Updated metadata for Dataset
required: true
responses:
200:
description: Returned the updated Dataset
content:
application/json:
schema: DatasetRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.update_dataset(user_id, dataset_id, request_dict)
# Get schema
schema = None
if response.code == 200:
schema = DatasetRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route("/api/v1/user/<user_id>/dataset/<dataset_id>/upload", methods=["POST"])
def dataset_upload(user_id, dataset_id):
"""Upload Dataset.
---
post:
tags:
- DATASET
summary: Upload Dataset
description: Upload training and testing data
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
requestBody:
content:
multipart/form-data:
schema: DatasetUploadSchema
description: Data file to upload (a tar.gz file)
required: true
responses:
201:
description: Upload sucessful
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
file_tgz = request.files.get("file", None)
# Get response
print("Triggering API call to upload data to server", file=sys.stderr)
response = app_handler.upload_dataset(user_id, dataset_id, file_tgz)
print("API call to upload data to server complete", file=sys.stderr)
# Get schema
schema = None
if response.code == 201:
schema = DatasetUploadSchema()
print("Returning success response", file=sys.stderr)
schema_dict = schema.dump({"message": "Data successfully uploaded"})
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/specs/<action>/schema', methods=['GET'])
def dataset_specs_schema(user_id, dataset_id, action):
"""Retrieve Specs schema.
---
get:
tags:
- DATASET
summary: Retrieve Specs schema
description: Returns the Specs schema for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID for Dataset
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
responses:
200:
description: Returned the Specs schema for given action
content:
application/json:
schema:
type: object
404:
description: User, Dataset or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.get_spec_schema(user_id, dataset_id, action, "dataset")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/specs/<action>', methods=['GET'])
def dataset_specs_retrieve(user_id, dataset_id, action):
"""Retrieve Dataset Specs.
---
get:
tags:
- DATASET
summary: Retrieve Dataset Specs
description: Returns the saved Dataset Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
responses:
200:
description: Returned the saved Dataset Specs for specified action
content:
application/json:
schema:
type: object
404:
description: User, Dataset or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.get_spec(user_id, dataset_id, action, "dataset")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/specs/<action>', methods=['POST'])
def dataset_specs_save(user_id, dataset_id, action):
"""Save Dataset Specs.
---
post:
tags:
- DATASET
summary: Save Dataset Specs
description: Save the Dataset Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
type: object
description: Dataset Specs
required: true
responses:
201:
description: Returned the saved Dataset Specs for specified action
content:
application/json:
schema:
type: object
400:
description: Invalid specs
content:
application/json:
schema: ErrorRspSchema
404:
description: User, Dataset or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.save_spec(user_id, dataset_id, action, request_dict, "dataset")
# Get schema
schema = None
if response.code == 201:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/specs/<action>', methods=['PUT'])
def dataset_specs_update(user_id, dataset_id, action):
"""Update Dataset Specs.
---
put:
tags:
- DATASET
summary: Update Dataset Specs
description: Update the Dataset Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
type: object
description: Dataset Specs
required: true
responses:
200:
description: Returned the updated Dataset Specs for specified action
content:
application/json:
schema:
type: object
400:
description: Invalid specs
content:
application/json:
schema: ErrorRspSchema
404:
description: User, Dataset or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.update_spec(user_id, dataset_id, action, request_dict, "dataset")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job', methods=['POST'])
def dataset_job_run(user_id, dataset_id):
"""Run Dataset Jobs.
---
post:
tags:
- DATASET
summary: Run Dataset Jobs
description: Asynchronously starts a list of Dataset Actions and returns corresponding Job IDs
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID for Dataset
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema:
type: array
items: DatasetActions
responses:
201:
description: Returned the list of Job IDs corresponding to requested Dataset Actions
content:
application/json:
schema:
type: array
items:
type: string
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
request_data = request.get_json(force=True).copy()
request_schema_data = DatasetActions().load(request_data)
requested_job = request_schema_data.get('job', None)
if requested_job:
requested_job = str(requested_job)
requested_actions = request_schema_data.get('actions', [])
# Get response
response = app_handler.job_run(user_id, dataset_id, requested_job, requested_actions, "dataset")
# Get schema
schema = None
if response.code == 201:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job', methods=['GET'])
def dataset_job_list(user_id, dataset_id):
"""List Jobs for Dataset.
---
get:
tags:
- DATASET
summary: List Jobs for Dataset
description: Returns the list of Jobs
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID for Dataset
required: true
schema:
type: string
format: uuid
- name: skip
in: query
description: Optional skip for pagination
required: false
schema:
type: integer
- name: size
in: query
description: Optional size for pagination
required: false
schema:
type: integer
- name: sort
in: query
description: Optional sort
required: false
schema:
type: string
enum: ["date-descending", "date-ascending" ]
responses:
200:
description: Returned list of Jobs
content:
application/json:
schema:
type: array
items: DatasetJobSchema
404:
description: User or Dataset not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_list(user_id, dataset_id, "dataset")
# Get schema
schema = None
if response.code == 200:
pagination_total = 0
metadata = {"jobs": response.data}
schema = DatasetJobListSchema()
response = make_response(jsonify(schema.dump(schema.load(metadata))['jobs']))
response.headers['X-Pagination-Total'] = str(pagination_total)
return response
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job/<job_id>', methods=['GET'])
def dataset_job_retrieve(user_id, dataset_id, job_id):
"""Retrieve Job for Dataset.
---
get:
tags:
- DATASET
summary: Retrieve Job for Dataset
description: Returns the Job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: Job ID
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned Job
content:
application/json:
schema: DatasetJobSchema
404:
description: User, Dataset or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_retrieve(user_id, dataset_id, job_id, "dataset")
# Get schema
schema = None
if response.code == 200:
schema = DatasetJobSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job/<job_id>/cancel', methods=['POST'])
def dataset_job_cancel(user_id, dataset_id, job_id):
"""Cancel Dataset Job.
---
post:
tags:
- DATASET
summary: Cancel Dataset Job
description: Cancel Dataset Job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID for Dataset
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: ID for Job
required: true
schema:
type: string
format: uuid
responses:
200:
description: Successfully requested cancelation of specified Job ID (asynchronous)
404:
description: User, Dataset or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_cancel(user_id, dataset_id, job_id, "dataset")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify({}), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job/<job_id>', methods=['DELETE'])
def dataset_job_delete(user_id, dataset_id, job_id):
"""Delete Dataset Job.
---
delete:
tags:
- DATASET
summary: Delete Dataset Job
description: delete Dataset Job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID for Dataset
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: ID for Job
required: true
schema:
type: string
format: uuid
responses:
200:
description: Successfully requested deletion of specified Job ID
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User, Dataset or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_delete(user_id, dataset_id, job_id, "dataset")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify({}), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/dataset/<dataset_id>/job/<job_id>/download', methods=['GET'])
def dataset_job_download(user_id, dataset_id, job_id):
"""Download Job Artifacts.
---
get:
tags:
- DATASET
summary: Download Job Artifacts
description: Download the Artifacts produced by a given job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: dataset_id
in: path
description: ID of Dataset
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: Job ID
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned Job Artifacts
content:
application/octet-stream:
schema:
type: string
format: binary
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, dataset_id=dataset_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_download(user_id, dataset_id, job_id, "dataset")
# Get schema
schema = None
if response.code == 200:
file_path = response.data # Response is assumed to have the file path
file_dir = "/".join(file_path.split("/")[:-1])
file_name = file_path.split("/")[-1] # infer the name
return send_from_directory(file_dir, file_name, as_attachment=True)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
#
# MODEL API
#
class ModelActions(Schema):
"""Class defining model actions schema"""
job = fields.UUID(allow_none=True)
actions = fields.List(fields.Str())
class ModelNetworkArchEnum(Enum):
"""Class defining model network architecure enum"""
# OD Networks tf
detectnet_v2 = 'detectnet_v2'
faster_rcnn = 'faster_rcnn'
yolo_v4 = 'yolo_v4'
yolo_v4_tiny = 'yolo_v4_tiny'
yolo_v3 = 'yolo_v3'
ssd = 'ssd'
dssd = 'dssd'
retinanet = 'retinanet'
# Other tf networks
unet = 'unet'
lprnet = 'lprnet'
classification_tf1 = 'classification_tf1'
classification_tf2 = 'classification_tf2'
efficientdet_tf1 = 'efficientdet_tf1'
efficientdet_tf2 = 'efficientdet_tf2'
mask_rcnn = 'mask_rcnn'
multitask_classification = 'multitask_classification'
# DriveIX networks
bpnet = 'bpnet'
fpenet = 'fpenet'
# PyT CV networks
action_recognition = 'action_recognition'
classification_pyt = 'classification_pyt'
mal = 'mal'
ml_recog = 'ml_recog'
ocdnet = 'ocdnet'
ocrnet = 'ocrnet'
optical_inspection = 'optical_inspection'
pointpillars = 'pointpillars'
pose_classification = 'pose_classification'
re_identification = 're_identification'
deformable_detr = 'deformable_detr'
dino = 'dino'
segformer = 'segformer'
# Data analytics networks
annotations = "annotations"
analytics = "analytics"
augmentation = "augmentation"
auto_label = "auto_label"
class ModelReqSchema(Schema):
"""Class defining model request schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
name = fields.Str()
description = fields.Str()
version = fields.Str()
logo = fields.URL()
ngc_path = fields.Str()
additional_id_info = fields.Str()
encryption_key = fields.Str(required=True)
network_arch = EnumField(ModelNetworkArchEnum)
ptm = fields.List(fields.UUID())
eval_dataset = fields.UUID()
inference_dataset = fields.UUID()
calibration_dataset = fields.UUID()
train_datasets = fields.List(fields.UUID())
read_only = fields.Bool()
public = fields.Bool()
class ModelJobSchema(Schema):
"""Class defining model job schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
id = fields.UUID()
parent_id = fields.UUID(allow_none=True)
created_on = fields.DateTime()
last_modified = fields.DateTime()
action = fields.Str()
status = EnumField(JobStatusEnum)
result = fields.Nested(JobResultSchema)
class ModelRspSchema(Schema):
"""Class defining model respnse schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
id = fields.UUID()
created_on = fields.DateTime()
last_modified = fields.DateTime()
name = fields.Str()
description = fields.Str()
version = fields.Str()
logo = fields.URL(allow_none=True)
ngc_path = fields.Str(allow_none=True)
additional_id_info = fields.Str(allow_none=True)
encryption_key = fields.Str()
network_arch = EnumField(ModelNetworkArchEnum)
ptm = fields.List(fields.UUID())
dataset_type = EnumField(DatasetTypeEnum)
eval_dataset = fields.UUID(allow_none=True)
inference_dataset = fields.UUID(allow_none=True)
calibration_dataset = fields.UUID(allow_none=True)
train_datasets = fields.List(fields.UUID())
read_only = fields.Bool()
public = fields.Bool()
actions = fields.List(fields.Str())
jobs = fields.List(fields.Nested(ModelJobSchema))
automl_enabled = fields.Bool(allow_none=True)
automl_algorithm = fields.Str(allow_none=True)
automl_max_recommendations = fields.Int(allow_none=True)
automl_delete_intermediate_ckpt = fields.Bool(allow_none=True)
automl_R = fields.Int(allow_none=True)
automl_nu = fields.Int(allow_none=True)
metric = fields.Str(allow_none=True)
epoch_multiplier = fields.Int(allow_none=True)
automl_add_hyperparameters = fields.Str(allow_none=True)
automl_remove_hyperparameters = fields.Str(allow_none=True)
class ModelListRspSchema(Schema):
"""Class defining model list response schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
models = fields.List(fields.Nested(ModelRspSchema))
class ModelJobListSchema(Schema):
"""Class defining model job list schema"""
class Meta:
"""Class enabling sorting field values by the order in which they are declared"""
ordered = True
jobs = fields.List(fields.Nested(ModelJobSchema))
@app.route('/api/v1/user/<user_id>/model', methods=['GET'])
def model_list(user_id):
"""List Models.
---
get:
tags:
- MODEL
summary: List Models
description: Returns the list of Models
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: skip
in: query
description: Optional skip for pagination
required: false
schema:
type: integer
- name: size
in: query
description: Optional size for pagination
required: false
schema:
type: integer
- name: sort
in: query
description: Optional sort
required: false
schema:
type: string
enum: ["date-descending", "date-ascending", "name-descending", "name-ascending" ]
- name: name
in: query
description: Optional name filter
required: false
schema:
type: string
- name: arch
in: query
description: Optional network architecture filter
required: false
schema:
type: string
enum: [ "detectnet_v2" ]
- name: read_only
in: query
description: Optional read_only filter
required: false
allowEmptyValue: true
schema:
type: boolean
responses:
200:
description: Returned the list of Models
content:
application/json:
schema:
type: array
items: ModelRspSchema
"""
message = validate_uuid(user_id=user_id)
if message:
return make_response(jsonify(message), 400)
models = app_handler.list_models(user_id)
filtered_models = filtering.apply(request.args, models)
paginated_models = pagination.apply(request.args, filtered_models)
pagination_total = len(filtered_models)
metadata = {"models": paginated_models}
schema = ModelListRspSchema()
response = make_response(jsonify(schema.dump(schema.load(metadata))['models']))
response.headers['X-Pagination-Total'] = str(pagination_total)
return response
@app.route('/api/v1/user/<user_id>/model/<model_id>', methods=['GET'])
def model_retrieve(user_id, model_id):
"""Retrieve Model.
---
get:
tags:
- MODEL
summary: Retrieve Model
description: Returns the Model
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model to return
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned the Model
content:
application/json:
schema: ModelRspSchema
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.retrieve_model(user_id, model_id)
# Get schema
schema = None
if response.code == 200:
schema = ModelRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>', methods=['DELETE'])
def model_delete(user_id, model_id):
"""Delete Model.
---
delete:
tags:
- MODEL
summary: Delete Model
description: Cancels all related running jobs and returns the deleted Model
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model to delete
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned the deleted Model
content:
application/json:
schema: ModelRspSchema
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.delete_model(user_id, model_id)
# Get schema
schema = None
if response.code == 200:
schema = ModelRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model', methods=['POST'])
def model_create(user_id):
"""Create new Model.
---
post:
tags:
- MODEL
summary: Create new Model
description: Returns the new Model
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: ModelReqSchema
description: Initial metadata for new Model (ptm or network_arch required)
required: true
responses:
201:
description: Returned the new Model
content:
application/json:
schema: ModelRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.create_model(user_id, request_dict)
# Get schema
schema = None
if response.code == 201:
schema = ModelRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>', methods=['PUT'])
def model_update(user_id, model_id):
"""Update Model.
---
put:
tags:
- MODEL
summary: Update Model
description: Returns the updated Model
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model to update
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: ModelReqSchema
description: Updated metadata for Model
required: true
responses:
200:
description: Returned the updated Model
content:
application/json:
schema: ModelRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.update_model(user_id, model_id, request_dict)
# Get schema
schema = None
if response.code == 200:
schema = ModelRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>', methods=['PATCH'])
def model_partial_update(user_id, model_id):
"""Partial update Model.
---
patch:
tags:
- MODEL
summary: Partial update Model
description: Returns the updated Model
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model to update
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema: ModelReqSchema
description: Updated metadata for Model
required: true
responses:
200:
description: Returned the updated Model
content:
application/json:
schema: ModelRspSchema
400:
description: Bad request, see reply body for details
content:
application/json:
schema: ErrorRspSchema
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.update_model(user_id, model_id, request_dict)
# Get schema
schema = None
if response.code == 200:
schema = ModelRspSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/specs/<action>/schema', methods=['GET'])
def model_specs_schema(user_id, model_id, action):
"""Retrieve Specs schema.
---
get:
tags:
- MODEL
summary: Retrieve Specs schema
description: Returns the Specs schema for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
responses:
200:
description: Returned the Specs schema for given action
content:
application/json:
schema:
type: object
404:
description: Dataset or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.get_spec_schema(user_id, model_id, action, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/specs/<action>', methods=['GET'])
def model_specs_retrieve(user_id, model_id, action):
"""Retrieve Model Specs.
---
get:
tags:
- MODEL
summary: Retrieve Model Specs
description: Returns the Model Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
responses:
200:
description: Returned the Model Specs for specified action
content:
application/json:
schema:
type: object
404:
description: User, Model or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.get_spec(user_id, model_id, action, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/specs/<action>', methods=['POST'])
def model_specs_save(user_id, model_id, action):
"""Save Model Specs.
---
post:
tags:
- MODEL
summary: Save Model Specs
description: Save the Model Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
type: object
description: Model Specs
required: true
responses:
201:
description: Returned the saved Model Specs for specified action
content:
application/json:
schema:
type: object
400:
description: Invalid specs
content:
application/json:
schema: ErrorRspSchema
404:
description: User, Model or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.save_spec(user_id, model_id, action, request_dict, "model")
# Get schema
schema = None
if response.code == 201:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/specs/<action>', methods=['PUT'])
def model_specs_update(user_id, model_id, action):
"""Update Model Specs.
---
put:
tags:
- MODEL
summary: Update Model Specs
description: Update the Model Specs for a given action
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model
required: true
schema:
type: string
format: uuid
- name: action
in: path
description: Action name
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
type: object
description: Model Specs
required: true
responses:
200:
description: Returned the updated Model Specs for specified action
content:
application/json:
schema:
type: object
400:
description: Invalid specs
content:
application/json:
schema: ErrorRspSchema
404:
description: User, Model or Action not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
request_dict = request.get_json(force=True)
# Get response
response = app_handler.save_spec(user_id, model_id, action, request_dict, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job', methods=['POST'])
def model_job_run(user_id, model_id):
"""Run Model Jobs.
---
post:
tags:
- MODEL
summary: Run Model Jobs
description: Asynchronously starts a list of Model Actions and returns corresponding Job IDs
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
requestBody:
content:
application/json:
schema:
type: array
items: ModelActions
responses:
201:
description: Returned the list of Job IDs corresponding to requested Model Actions
content:
application/json:
schema:
type: array
items:
type: string
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
request_data = request.get_json(force=True).copy()
request_schema_data = ModelActions().load(request_data)
requested_job = request_schema_data.get('job', None)
if requested_job:
requested_job = str(requested_job)
requested_actions = request_schema_data.get('actions', [])
# Get response
response = app_handler.job_run(user_id, model_id, requested_job, requested_actions, "model")
# Get schema
schema = None
if response.code == 201:
return make_response(jsonify(response.data), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job', methods=['GET'])
def model_job_list(user_id, model_id):
"""List Jobs for Model.
---
get:
tags:
- MODEL
summary: List Jobs for Model
description: Returns the list of Jobs
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
- name: skip
in: query
description: Optional skip for pagination
required: false
schema:
type: integer
- name: size
in: query
description: Optional size for pagination
required: false
schema:
type: integer
- name: sort
in: query
description: Optional sort
required: false
schema:
type: string
enum: ["date-descending", "date-ascending" ]
responses:
200:
description: Returned list of Jobs
content:
application/json:
schema:
type: array
items: ModelJobSchema
404:
description: User or Model not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_list(user_id, model_id, "model")
# Get schema
schema = None
if response.code == 200:
pagination_total = 0
metadata = {"jobs": response.data}
schema = ModelJobListSchema()
response = make_response(jsonify(schema.dump(schema.load(metadata))['jobs']))
response.headers['X-Pagination-Total'] = str(pagination_total)
return response
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job/<job_id>', methods=['GET'])
def model_job_retrieve(user_id, model_id, job_id):
"""Retrieve Job for Model.
---
get:
tags:
- MODEL
summary: Retrieve Job for Model
description: Returns the Job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: Job ID
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned Job
content:
application/json:
schema: ModelJobSchema
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_retrieve(user_id, model_id, job_id, "model")
# Get schema
schema = None
if response.code == 200:
schema = ModelJobSchema()
else:
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job/<job_id>/cancel', methods=['POST'])
def model_job_cancel(user_id, model_id, job_id):
"""Cancel Model Job (or pause training).
---
post:
tags:
- MODEL
summary: Cancel Model Job or pause training
description: Cancel Model Job or pause training
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: ID for Job
required: true
schema:
type: string
format: uuid
responses:
200:
description: Successfully requested cancelation or training pause of specified Job ID (asynchronous)
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_cancel(user_id, model_id, job_id, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify({}), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job/<job_id>', methods=['DELETE'])
def model_job_delete(user_id, model_id, job_id):
"""Cancel Model Job (or pause training).
---
post:
tags:
- MODEL
summary: Cancel Model Job or pause training
description: Cancel Model Job or pause training
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: ID for Job
required: true
schema:
type: string
format: uuid
responses:
200:
description: Successfully requested cancelation or training pause of specified Job ID (asynchronous)
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_delete(user_id, model_id, job_id, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify({}), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job/<job_id>/resume', methods=['POST'])
def model_job_resume(user_id, model_id, job_id):
"""Resume Model Job - train/retrain only.
---
post:
tags:
- MODEL
summary: Resume Model Job
description: Resume Model Job - train/retrain only
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID for Model
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: ID for Job
required: true
schema:
type: string
format: uuid
responses:
200:
description: Successfully requested resume of specified Job ID (asynchronous)
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.resume_model_job(user_id, model_id, job_id, "model")
# Get schema
schema = None
if response.code == 200:
return make_response(jsonify({}), response.code)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
@app.route('/api/v1/user/<user_id>/model/<model_id>/job/<job_id>/download', methods=['GET'])
def model_job_download(user_id, model_id, job_id):
"""Download Job Artifacts.
---
get:
tags:
- MODEL
summary: Download Job Artifacts
description: Download the Artifacts produced by a given job
parameters:
- name: user_id
in: path
description: User ID
required: true
schema:
type: string
format: uuid
- name: model_id
in: path
description: ID of Model
required: true
schema:
type: string
format: uuid
- name: job_id
in: path
description: Job ID
required: true
schema:
type: string
format: uuid
responses:
200:
description: Returned Job Artifacts
content:
application/octet-stream:
schema:
type: string
format: binary
404:
description: User, Model or Job not found
content:
application/json:
schema: ErrorRspSchema
"""
message = validate_uuid(user_id=user_id, model_id=model_id, job_id=job_id)
if message:
return make_response(jsonify(message), 400)
# Get response
response = app_handler.job_download(user_id, model_id, job_id, "model")
# Get schema
schema = None
if response.code == 200:
file_path = response.data # Response is assumed to have the file path
file_dir = "/".join(file_path.split("/")[:-1])
file_name = file_path.split("/")[-1] # infer the name
return send_from_directory(file_dir, file_name, as_attachment=True)
schema = ErrorRspSchema()
# Load metadata in schema and return
schema_dict = schema.dump(schema.load(response.data))
return make_response(jsonify(schema_dict), response.code)
#
# HEALTH API
#
@app.route('/api/v1/health', methods=['GET'])
def api_health():
"""api health endpoint"""
return make_response(jsonify(['liveness', 'readiness']))
@app.route('/api/v1/health/liveness', methods=['GET'])
def liveness():
"""api liveness endpoint"""
live_state = health_check.check_logging()
if live_state:
return make_response(jsonify("OK"), 201)
return make_response(jsonify("Error"), 400)
@app.route('/api/v1/health/readiness', methods=['GET'])
def readiness():
"""api readiness endpoint"""
ready_state = health_check.check_logging() and health_check.check_k8s() and Workflow.healthy()
if ready_state:
return make_response(jsonify("OK"), 201)
return make_response(jsonify("Error"), 400)
#
# BASIC API
#
@app.route('/', methods=['GET'])
def root():
"""api root endpoint"""
return make_response(jsonify(['api', 'openapi.yaml', 'openapi.json', 'redoc', 'swagger']))
@app.route('/api', methods=['GET'])
def version_list():
"""version list endpoint"""
return make_response(jsonify(['v1']))
@app.route('/api/v1', methods=['GET'])
def version_v1():
"""version endpoint"""
return make_response(jsonify(['login', 'user', 'auth', 'health']))
@app.route('/api/v1/user', methods=['GET'])
def user_list():
"""user list endpoint"""
error = {"error_desc": "Listing users is not authorized: Missing User ID", "error_code": 1}
return make_response(jsonify(ErrorRspSchema().dump(error)), 403)
@app.route('/api/v1/user/<user_id>', methods=['GET'])
def user(user_id):
"""user endpoint"""
message = validate_uuid(user_id=user_id)
if message:
return make_response(jsonify(message), 400)
return make_response(jsonify(['dataset', 'model']))
@app.route('/api/v1/auth', methods=['GET'])
def auth():
"""authentication endpoint"""
# retrieve jwt from headers
token = ''
url = request.headers.get('X-Original-Url', '')
print('URL: ' + str(url), flush=True)
authorization = request.headers.get('Authorization', '')
authorization_parts = authorization.split()
if len(authorization_parts) == 2 and authorization_parts[0].lower() == 'bearer':
token = authorization_parts[1]
print('Token: ...' + str(token)[-10:], flush=True)
# authentication
user_id, err = authentication.validate(token)
if err:
print("Unauthorized: " + err, flush=True)
return make_response(jsonify({}), 401)
# access control
err = access_control.validate(url, user_id)
if err:
print("Forbidden: " + err, flush=True)
return make_response(jsonify({}), 403)
return make_response(jsonify({'user_id': user_id}), 200)
@app.route('/openapi.yaml', methods=['GET'])
def openapi_yaml():
"""openapi_yaml endpoint"""
r = make_response(spec.to_yaml())
r.mimetype = 'text/x-yaml'
return r
@app.route('/openapi.json', methods=['GET'])
def openapi_json():
"""openapi_json endpoint"""
r = make_response(json.dumps(spec.to_dict(), indent=2))
r.mimetype = 'application/json'
return r
@app.route('/redoc', methods=['GET'])
def redoc():
"""redoc endpoint"""
return render_template('redoc.html')
@app.route('/swagger', methods=['GET'])
def swagger():
"""swagger endpoint"""
return render_template('swagger.html')
@app.route('/api/v1/login/<key>', methods=['GET'])
def login(key):
"""Login endpoint"""
creds, err = credentials.get_from_ngc(key)
if err:
print("Unauthorized: " + err, flush=True)
return make_response(jsonify({}), 401)
return make_response(jsonify(creds), 200)
#
# End of APIs
#
with app.test_request_context():
spec.path(view=login)
spec.path(view=dataset_list)
spec.path(view=dataset_retrieve)
spec.path(view=dataset_delete)
spec.path(view=dataset_create)
spec.path(view=dataset_update)
spec.path(view=dataset_partial_update)
spec.path(view=dataset_upload)
spec.path(view=dataset_specs_schema)
spec.path(view=dataset_specs_retrieve)
spec.path(view=dataset_specs_save)
spec.path(view=dataset_specs_update)
spec.path(view=dataset_job_run)
spec.path(view=dataset_job_list)
spec.path(view=dataset_job_retrieve)
spec.path(view=dataset_job_cancel)
spec.path(view=dataset_job_delete)
spec.path(view=dataset_job_download)
spec.path(view=model_list)
spec.path(view=model_retrieve)
spec.path(view=model_delete)
spec.path(view=model_create)
spec.path(view=model_update)
spec.path(view=model_partial_update)
spec.path(view=model_specs_schema)
spec.path(view=model_specs_retrieve)
spec.path(view=model_specs_save)
spec.path(view=model_specs_update)
spec.path(view=model_job_run)
spec.path(view=model_job_list)
spec.path(view=model_job_retrieve)
spec.path(view=model_job_cancel)
spec.path(view=model_job_delete)
spec.path(view=model_job_resume)
spec.path(view=model_job_download)
if __name__ == '__main__':
# app.run(host='0.0.0.0', port=8000)
app.run()
| tao_front_end_services-main | api/app.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Start API workflow"""
import threading
from job_utils.workflow import Workflow
Workflow.start()
for thread in threading.enumerate():
if thread.name == "WorkflowThreadTAO":
thread.join()
| tao_front_end_services-main | api/workflow.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function to convert spec csv to json schema"""
import csv
import json
__type_mapping = {
'collection': 'object',
'list': 'array',
'float': 'number',
'bool': 'boolean',
'integer': 'integer',
'string': 'string',
'str': 'string',
'int': 'integer',
'dict': 'object',
'const': 'const',
'ordered': 'ordered',
'categorical': 'categorical',
'ordered_int': 'ordered_int',
'enum': 'string'
}
def __basic_type_fix(value_type, value):
"""Converts spec values based on their datatype"""
if value in (None, ''):
return None
if value in ('inf', '-inf'):
return float(value)
if value_type in ('integer', 'ordered_int'):
return int(value)
if value_type == 'number':
return float(value)
if value_type == 'boolean':
return str(value).lower() == "true"
if value_type == 'array':
return json.loads(value)
if value_type == 'object':
return json.loads(value)
return value
def __array_type_fix(value_type, value):
"""Converts spec values in an array based on their datatype"""
if value in (None, ''):
return None
# We dont need this for list / dict (examples, valid_options only for simple types)
# if value_type in ["array","object"]:
# return None
values = value.replace(' ', '').split(',')
if value_type == 'integer':
return [int(i) for i in values]
if value_type == 'number':
return [float(i) for i in values]
if value_type == 'boolean':
return [str(i).lower() == "true" for i in values]
if value_type == 'array':
return [json.loads(i) for i in values]
if value_type == 'object':
return [json.loads(i) for i in values]
return values
def __merge(d1, d2):
"""Merges two dictionaries"""
for key in d2.keys():
if key not in d1:
d1[key] = d2[key]
elif d1[key] is None:
d1[key] = d2[key]
elif type(d1[key]) is list and type(d2[key]) is list:
if d1[key] != [] and type(d1[key][0]) is dict:
for i in range(0, min(len(d1[key]), len(d2[key]))):
__merge(d1[key][i], d2[key][i])
else:
d1[key] = d1[key] + [i for i in d2[key] if i not in d1[key]]
elif type(d2[key]) is not dict:
d1[key] = d2[key]
else:
__merge(d1[key], d2[key])
return d1
def harden_parameter_name(parameter):
"""Fix parameter names by removing flanking "." and remove all spaces"""
if not parameter:
return None
return parameter.rstrip(".").lstrip(".").replace(" ", "")
def harden_value_type(value_type):
"""If value type is an unknown, make it string"""
if value_type not in __type_mapping.keys():
return "string"
return value_type
def harden_numerical_value(value):
"""If the value cannot become a float, then return None"""
if value:
try:
float(value)
return value
except:
return None
else:
return None
def convert(path, classes=[]):
"""Convert csv spec to json schema"""
array_parameters = []
schema = {}
with open(path, mode='r', encoding='utf-8-sig') as f:
reader = csv.DictReader(f)
for row in reader:
# get row data
parameter = row.get('parameter')
# if parameter:
# parameter = harden_parameter_name(parameter)
display_name = row.get('display_name')
value_type = row.get('value_type')
# if value_type:
# value_type = harden_value_type(value_type)
description = row.get('description')
default_value = row.get('default_value')
examples = row.get('examples')
valid_min = row.get('valid_min')
# if valid_min:
# valid_min = harden_numerical_value(valid_min)
valid_max = row.get('valid_max')
# if valid_max:
# valid_max = harden_numerical_value(valid_max)
valid_options = row.get('valid_options')
required = row.get('required')
popular = row.get('popular')
automl_enabled = row.get('automl_enabled')
regex = row.get('regex')
link = row.get('link')
# convert value type
value_type = __type_mapping.get(value_type)
if value_type is None:
continue
if value_type == 'array':
array_parameters.append(parameter)
# fix data types
default_value = __basic_type_fix(value_type, default_value)
valid_min = __basic_type_fix(value_type, valid_min)
valid_max = __basic_type_fix(value_type, valid_max)
valid_options = __array_type_fix(value_type, valid_options)
examples = __array_type_fix(value_type, examples)
# compose object
params = parameter.split('.')
last_param = params.pop()
if value_type == 'const':
obj = {'type': 'object',
'properties': {last_param: {'const': default_value}}, 'default': {last_param: default_value}}
else:
obj = {'type': 'object',
'properties': {last_param: {'type': value_type}}}
# add known object details
props = obj['properties'][last_param]
if display_name not in (None, ''):
props['title'] = display_name
if description not in (None, ''):
props['description'] = description
if examples not in (None, []):
props['examples'] = examples
if default_value not in (None, ''):
props['default'] = default_value
obj['default'] = {last_param: default_value}
if valid_min is not None:
props['minimum'] = valid_min
if valid_max is not None:
props['maximum'] = valid_max
if valid_options not in (None, []):
props['enum'] = valid_options
if regex not in (None, '') and value_type == 'string':
props['pattern'] = regex
if link is not None and link.startswith('http'):
props['link'] = link
if required is not None and required.lower() == 'yes':
if obj.get('required') is None:
obj['required'] = []
obj['required'].append(last_param)
if popular is not None and popular.lower() == 'yes':
if obj.get('popular') is None:
obj['popular'] = []
obj['popular'].append(last_param)
if automl_enabled is not None and automl_enabled.lower() == 'true':
if obj.get('automl_default_parameters') is None:
obj['automl_default_parameters'] = []
obj['automl_default_parameters'].append(parameter)
# special override of default with array of class strings
isArray = parameter in array_parameters
if classes != [] and isArray:
if parameter == 'inferencer_config.target_classes':
obj['default'] = {last_param: classes}
# add object hierarchy
while len(params) > 0:
joined_params = '.'.join(params)
isArray = joined_params in array_parameters
isRequired = obj.get('required') is not None
isPopular = obj.get('popular') is not None
hasDefault = obj.get('default') is not None
isAutomlenabled = obj.get('automl_default_parameters') is not None
param = params.pop()
if isArray:
default = []
if hasDefault:
default = [obj['default']]
if classes != []:
# dynamic patching of default for given dataset classes
if joined_params == 'classwise_config':
if hasDefault:
default = [__merge({'key': c}, obj['default']) for c in classes]
else:
default = [{'key': c} for c in classes]
elif joined_params == 'bbox_handler_config.classwise_bbox_handler_config':
if hasDefault:
default = [__merge({'key': c, 'value': {'output_map': c}}, obj['default']) for c in classes]
else:
default = [{'key': c, 'value': {'output_map': c}} for c in classes]
elif joined_params == 'dataset_config.target_class_mapping':
default = [{'key': c, 'value': c} for c in classes]
obj = {
'type': 'object',
'properties': {
param: {
'type': 'array',
'items': obj,
'default': default
}
}
}
if hasDefault or default != []:
obj['default'] = {param: default}
else:
default = obj.get('default')
obj = {
'type': 'object',
'properties': {param: obj}
}
if hasDefault:
obj['default'] = {param: default}
if isRequired:
obj['required'] = [param]
if isPopular:
obj['popular'] = [param]
if isAutomlenabled:
if not schema.get('automl_default_parameters'):
schema['automl_default_parameters'] = [parameter]
else:
schema['automl_default_parameters'].append(parameter)
# update schema with obj
__merge(schema, obj)
# return json schema
return schema
| tao_front_end_services-main | api/specs_utils/csv_to_json_schema.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Json to yaml file conversion"""
import yaml
import json
def yml(data):
"""Writes the dictionary data into yaml file"""
if type(data) is dict:
data.pop("version", None)
return yaml.safe_dump(data)
def convert(path):
"""Reads from json and dumps into yaml file"""
data = '{}'
with open(path, mode='r', encoding='utf-8-sig') as f:
data = json.load(f)
return yml(data)
| tao_front_end_services-main | api/specs_utils/json_to_yaml.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API specs utils module"""
| tao_front_end_services-main | api/specs_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Json to kitti file conversion"""
import json
def kitti(data, level=0):
"""Writes the dictionary data into kitti file"""
if type(data) is dict and level == 0:
data.pop("version", None)
specs = []
level_space = ''
for _ in range(level):
level_space += ' '
for key in data:
if data[key] is None:
continue
if type(data[key]) is dict:
specs.append(level_space + key + ' {')
specs.append(kitti(data[key], level + 1))
specs.append(level_space + '}')
elif type(data[key]) is list:
for d in data[key]:
t = type(d)
s = str(d)
isEnum = bool(s.startswith('__') and s.endswith('__'))
if type(d) is dict:
specs.append(level_space + key + ' {')
specs.append(kitti(d, level + 1))
specs.append(level_space + '}')
# WARNING: LIST OF LIST NOT SUPPORTED
else:
if isEnum:
specs.append(level_space + key + ': ' + s[2:-2])
elif t in [bool, int, float]:
specs.append(level_space + key + ': ' + s)
else:
specs.append(level_space + key + ': "' + s + '"')
else:
t = type(data[key])
s = str(data[key])
isEnum = bool(s.startswith('__') and s.endswith('__'))
if isEnum:
specs.append(level_space + key + ': ' + s[2:-2])
elif t in [bool, int, float]:
specs.append(level_space + key + ': ' + s)
else:
specs.append(level_space + key + ': "' + s + '"')
return '\n'.join(specs)
def convert(path): # NOTE: Not calling this function. Just using kitti() in current workflow.
"""Reads from json and dumps into kitti file"""
data = '{}'
with open(path, mode='r', encoding='utf-8-sig') as f:
data = json.load(f)
# remove version from schema for now since containers do not yet support it
return kitti(data)
| tao_front_end_services-main | api/specs_utils/json_to_kitti.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Json spec schema hardening modules"""
import copy
from specs_utils import csv_to_json_schema
from jsonschema import validate as validationDriver, exceptions
def __merge(d1, d2):
"""Merge 2 dictionaries"""
for key in d2.keys():
if key not in d1:
d1[key] = d2[key]
elif d1[key] is None:
d1[key] = d2[key]
elif type(d1[key]) is list and type(d2[key]) is list:
if d1[key] != [] and type(d1[key][0]) is dict:
for i in range(0, min(len(d1[key]), len(d2[key]))):
__merge(d1[key][i], d2[key][i])
else:
d1[key] = d1[key] + [i for i in d2[key] if i not in d1[key]]
elif type(d2[key]) is not dict:
d1[key] = d2[key]
else:
__merge(d1[key], d2[key])
return d1
def harden(data, schema):
"""Harden the schema provided"""
return __merge(copy.deepcopy(schema['default']), data)
def validate(data, schema):
"""Validate the schema provided"""
try:
validationDriver(instance=data, schema=schema)
except exceptions.ValidationError as e:
return e.message
return None
# test code
if __name__ == '__main__':
schema = csv_to_json_schema.convert("specs/detectnet_v2/detectnet_v2 - train.csv")
# positive test
hardened_data = harden(data={'random_seed': 99}, schema=schema)
err = validate(data=hardened_data, schema=schema)
if err:
print(err)
# negative test
err = validate(data={'random_seed': 99}, schema=schema)
if err:
print(err)
| tao_front_end_services-main | api/specs_utils/hardening.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication utils session modules"""
import os
import yaml
import functools
import threading
def synchronized(wrapped):
"""Decorator function for thread synchronization"""
lock = threading.Lock()
@functools.wraps(wrapped)
def _wrap(*args, **kwargs):
with lock:
return wrapped(*args, **kwargs)
return _wrap
@synchronized
def set_session(creds):
"""Append/Pop sessions"""
sessions = []
session = creds
filename = os.path.join('/', 'shared', 'users', "sessions.yaml")
try:
with open(filename, "r", encoding='utf-8') as infile:
sessions = yaml.safe_load(infile)
if type(sessions) != list: # corrupted file?
sessions = []
if len(session) > 1000: # keep a max of 1000 active sessions
sessions.pop(0) # remove oldest known session
except:
pass
sessions.append(session)
with open(filename, "w", encoding='utf-8') as outfile:
yaml.dump(sessions, outfile, sort_keys=False)
@synchronized
def get(token):
"""Read session from sessions.yaml file"""
sessions = []
user = None
filename = os.path.join('/', 'shared', 'users', "sessions.yaml")
try:
with open(filename, "r", encoding='utf-8') as infile:
sessions = yaml.safe_load(infile)
if type(sessions) != list: # corrupted file?
sessions = []
except:
pass
for session in reversed(sessions):
if str(session.get('token')) == str(token):
user = session.get('user_id')
break
return user
| tao_front_end_services-main | api/auth_utils/sessions.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication utils credential modules"""
import jwt
import sys
import uuid
import requests
from auth_utils import __ngc_jwks_client, sessions
def get_from_ngc(key):
"""Get signing key from token"""
err = None
creds = None
try:
r = requests.get('https://authn.nvidia.com/token?service=ngc', headers={'Accept': 'application/json', 'Authorization': 'ApiKey ' + key})
if r.status_code != 200:
err = 'Credentials error: Invalid NGC_API_KEY: ' + key
return creds, err
token = r.json().get('token')
user = None
payload = {}
signing_key = __ngc_jwks_client.get_signing_key_from_jwt(token)
payload = jwt.decode(
token,
signing_key.key,
audience="ngc",
algorithms=["RS256"]
)
user = uuid.uuid5(uuid.UUID(int=0), payload.get('sub'))
creds = {'user_id': str(user), 'token': token}
except Exception as e:
err = 'Credentials error: ' + str(e)
if not err:
print('Adding trusted user: ' + str(creds.get('user_id')), file=sys.stderr)
sessions.set_session(creds)
return creds, err
| tao_front_end_services-main | api/auth_utils/credentials.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API authentication utils moduke"""
import jwt
__ngc_jwks_client = jwt.PyJWKClient("https://authn.nvidia.com/pubJWKS")
__starfleet_jwks_client = jwt.PyJWKClient("https://login.nvidia.com/.well-known/jwks.json")
| tao_front_end_services-main | api/auth_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Authentication utils validation modules"""
import jwt
import uuid
import sys
import os
from auth_utils import __ngc_jwks_client, __starfleet_jwks_client, sessions
def validate(token):
"""Validate Authentication"""
err = None
user = sessions.get(token)
if user is not None:
print('Found trusted user: ' + str(user), file=sys.stderr)
else:
user, err = _validate_ngc(token)
if not err:
print('Adding trusted user: ' + str(user), file=sys.stderr)
sessions.set_session({'user_id': str(user), 'token': token})
else:
user, err = _validate_starfleet(token)
if not err:
print('Adding trusted user: ' + str(user), file=sys.stderr)
sessions.set_session({'user_id': str(user), 'token': token})
return user, err
def _validate_ngc(token):
"""Validate Authentication via ngc"""
err = None
user = None
payload = {}
try:
signing_key = __ngc_jwks_client.get_signing_key_from_jwt(token)
payload = jwt.decode(
token,
signing_key.key,
audience="ngc",
algorithms=["RS256"]
)
user = uuid.uuid5(uuid.UUID(int=0), payload.get('sub'))
except Exception as e:
err = 'Token error: ' + str(e)
return user, err
def _validate_starfleet(token):
"""Validate Authentication via starfleet"""
client_id = os.getenv('AUTH_CLIENT_ID', default='bnSePYullXlG-504nOZeNAXemGF6DhoCdYR8ysm088w')
payload = {}
err = None
try:
signing_key = __starfleet_jwks_client.get_signing_key_from_jwt(token)
payload = jwt.decode(
token,
signing_key.key,
algorithms=["ES256"],
audience=client_id
)
except Exception as e:
err = 'Token error: ' + str(e)
user = payload.get('external_id')
if not err and not user:
err = 'Token error: unknown user'
return user, err
| tao_front_end_services-main | api/auth_utils/authentication.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# http://<server>:<port>/<namespace>/api/v1/user/<user_id>/model?<params>
# ['', '<namespace', 'api', 'v1', 'user', '<user_id>', 'model']
"""Authentication utils access control modeules"""
def _remove_prefix(text, prefix):
"""Removes prefix from given text and returns it"""
if text.startswith(prefix):
return text[len(prefix):]
return text
def validate(url, user_id):
"""Validate URL format"""
user_id = str(user_id)
err = "Invalid URI path for user " + user_id
tmp = _remove_prefix(url, 'http://')
tmp = _remove_prefix(tmp, 'https://')
tmp = _remove_prefix(tmp, tmp.split('/')[0])
tmp = tmp.split('?')[0]
parts = tmp.split('/')
# check for user ID match in URL path, with or without domain name in path
if (len(parts) >= 5 and parts[3] == 'user' and parts[4] == user_id):
err = None
elif (len(parts) >= 6 and parts[4] == 'user' and parts[5] == user_id):
err = None
return err
| tao_front_end_services-main | api/auth_utils/access_control.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML read parameters modules"""
from handlers.utilities import Code
from handlers.utilities import AUTOML_DISABLED_NETWORKS
from specs_utils import csv_to_json_schema
import os
import json
import pandas as pd
_VALID_TYPES = ["int", "integer",
"float",
"ordered_int", "bool",
"ordered", "categorical"]
def get_flatten_specs(dict_spec, flat_specs, parent=""):
"""Flatten nested dictionary"""
for key, value in dict_spec.items():
if isinstance(value, dict):
get_flatten_specs(value, flat_specs, parent + key + ".")
else:
flat_key = parent + key
flat_specs[flat_key] = value
def generate_hyperparams_to_search(network_arch, automl_add_hyperparameters, automl_remove_hyperparameters, handler_root):
"""Use train.csv spec of the network to choose the parameters of AutoML
Returns: a list of dict for AutoML supported networks
"""
if network_arch not in AUTOML_DISABLED_NETWORKS:
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CSV_PATH = os.path.join(DIR_PATH, "specs_utils", "specs", network_arch, f"{network_arch} - train.csv")
if not os.path.exists(CSV_PATH):
return Code(404, {}, "Default specs do not exist for action")
original_train_spec = csv_to_json_schema.convert(CSV_PATH)["default"]
original_spec_with_keys_flattened = {}
get_flatten_specs(original_train_spec, original_spec_with_keys_flattened)
with open(f"{handler_root}/specs/train.json", "r", encoding='utf-8') as f:
updated_train_spec = json.load(f)
updated_spec_with_keys_flattened = {}
get_flatten_specs(updated_train_spec, updated_spec_with_keys_flattened)
deleted_params = original_spec_with_keys_flattened.keys() - updated_spec_with_keys_flattened
data_frame = pd.read_csv(CSV_PATH)
data_frame = data_frame[data_frame['value_type'].isin(_VALID_TYPES)]
if network_arch == "faster_rcnn":
automl_default_params = data_frame.loc[data_frame['automl_enabled'] == True]['parameter'].tolist() # pylint: disable=C0121 # noqa: E712
if "model_config.input_image_config.size_height_width.height" in automl_default_params or "model_config.input_image_config.size_height_width.height" in automl_add_hyperparameters:
if "augmentation_config.preprocessing.output_image_height" in automl_remove_hyperparameters:
automl_remove_hyperparameters.remove("augmentation_config.preprocessing.output_image_height")
data_frame.loc[data_frame.parameter.isin(['augmentation_config.preprocessing.output_image_height']), 'automl_enabled'] = True
if "model_config.input_image_config.size_height_width.width" in automl_default_params or "model_config.input_image_config.size_height_width.width" in automl_add_hyperparameters:
if "augmentation_config.preprocessing.output_image_width" in automl_remove_hyperparameters:
automl_remove_hyperparameters.remove("augmentation_config.preprocessing.output_image_width")
data_frame.loc[data_frame.parameter.isin(['augmentation_config.preprocessing.output_image_width']), 'automl_enabled'] = True
data_frame = data_frame.loc[data_frame['automl_enabled'] != False] # pylint: disable=C0121 # noqa: E712
# Push params that are dependent on other params to the bottom
data_frame = data_frame.sort_values(by=['depends_on'])
data_frame = data_frame[::-1]
data_frame.loc[data_frame.parameter.isin(automl_remove_hyperparameters), 'automl_enabled'] = False
data_frame.loc[data_frame.parameter.isin(automl_add_hyperparameters), 'automl_enabled'] = True
automl_params = data_frame.loc[data_frame['automl_enabled'] == True] # pylint: disable=C0121 # noqa: E712
automl_params = automl_params.loc[~automl_params['parameter'].isin(deleted_params)]
automl_params = automl_params[["parameter", "value_type", "default_value", "valid_min", "valid_max", "valid_options", "math_cond", "parent_param", "depends_on"]]
return automl_params.to_dict('records')
return {}
| tao_front_end_services-main | api/automl/params.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML controller modules"""
import os
import time
import json
import sys
from copy import deepcopy
from automl.utils import Recommendation, ResumeRecommendation, JobStates, report_healthy
from handlers.utilities import StatusParser, _ITER_MODELS, _PURPOSE_BUILT_MODELS, read_network_config
from handlers.stateless_handlers import update_job_status
from job_utils.automl_job_utils import on_new_automl_job, on_delete_automl_job, on_cancel_automl_job
import uuid
import shutil
import glob
time_per_epoch = 0
time_per_epoch_counter = 0
class Controller:
"""
Abstractly, just a collection of threads and a switch to start and stop them
- start(): Start all threads needed to run AutoML
- stop(): Stop all threads started by start()
- generate_recommendations(): Runs the automl algorithm to generate and analyze recommendations
- read_results(): Listens to experiments
- write_results(): Routinely updates a controller_data.json to help Handlers
"""
def __init__(self, root, network, brain, automl_context, max_recommendations, delete_intermediate_ckpt, metric, automl_algorithm):
"""Initialize the Automl Controller class
Args:
root: handler root
network: model name
brain: Bayesian/Hyperband class object
automl_context: job context with regards to automl
max_recommendations: max_recommendation parameter value (for Bayesian)
delete_intermediate_ckpt: boolean value to delete/not-delete checkpoints which don't correspond to the best model
metric: metric name which will be used to choose best models
automl_algorithm: automl algorithm name
"""
self.brain = brain
self.recommendations = []
self.root = root
self.network = network
self.completed_recommendations = 0
self.max_recommendations = int(max_recommendations)
self.delete_intermediate_ckpt = bool(delete_intermediate_ckpt)
self.automl_algorithm = automl_algorithm
self.metric = metric
self.network_metric_mapping = {"action_recognition": "val_acc",
"bpnet": "loss",
"classification_pyt": "loss",
"classification_tf1": "validation_accuracy",
"classification_tf2": "val_accuracy",
"deformable_detr": "val_mAP50",
"detectnet_v2": "mean average precision",
"dino": "val_mAP50",
"dssd": "mean average precision",
"efficientdet_tf1": "AP50",
"efficientdet_tf2": "AP50",
"faster_rcnn": "mean average precision",
"fpenet": "evaluation_cost ",
"lprnet": "validation_accuracy",
"ml_recog": "val Precision at Rank 1",
"multitask_classification": "mean accuracy",
"mask_rcnn": "mask_AP",
"ocdnet": "hmean",
"ocrnet": "val_acc",
"optical_inspection": "val_acc",
"pointpillars": "loss",
"pose_classification": "val_acc",
"re_identification": "cmc_rank_1",
"retinanet": "mean average precision",
"ssd": "mean average precision",
"segformer": "Mean IOU",
"unet": "loss",
"yolo_v3": "mean average precision",
"yolo_v4": "mean average precision",
"yolo_v4_tiny": "mean average precision"}
if self.automl_algorithm in ("hyperband", "h") and self.network in ("bpnet", "multitask_classification", "unet"):
self.metric_key = "loss"
self.metric = "loss"
elif self.metric == "kpi":
self.metric_key = self.network_metric_mapping[self.network]
else:
self.metric_key = self.metric
self.brain.reverse_sort = True
self.min_max = max
if self.metric == "loss" or self.metric_key in ("loss", "evaluation_cost "):
self.brain.reverse_sort = False
self.min_max = min
self.best_epoch_number = 0
self.best_model_copied = False
self.ckpt_path = {}
self.old_bracket = 0
self.hyperband_cancel_condition_seen = False
self.eta = "Will be updated after completing one epoch"
self.remaining_epochs_in_experiment = float("inf")
self.average_time_per_epoch = float("inf")
self.automl_context = automl_context
self.on_new_automl_job = lambda jc: on_new_automl_job(self.automl_context, jc)
def start(self):
"""Starts the automl controller"""
report_healthy(self.root + "/controller.log", "Starting", clear=False)
self._execute_loop()
update_job_status(self.automl_context.handler_id, self.automl_context.id, status="Done")
on_delete_automl_job(self.automl_context.handler_id, self.automl_context.id)
def save_state(self):
"""Save the self.recommendations into a controller.json"""
recs_dict = [ele.__dict__ for ele in self.recommendations]
file_path = self.root + "/controller.json"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(recs_dict, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
@staticmethod
def load_state(root, network, brain, automl_context, max_recommendations, delete_intermediate_ckpt, metric, automl_algorithm):
"""Loads a Controller object from pre-existing root"""
ctrl = Controller(root, network, brain, automl_context, max_recommendations, delete_intermediate_ckpt, metric, automl_algorithm)
ctrl.recommendations = []
# Restore the recommendations
file_path = root + "/controller.json"
with open(file_path, 'r', encoding='utf-8') as f:
recs_dict = json.loads(f.read())
for rec_dict in recs_dict:
rec = Recommendation(rec_dict["id"], rec_dict["specs"])
rec.update_result(rec_dict["result"])
rec.update_status(rec_dict["status"])
rec.assign_job_id(rec_dict["job_id"])
ctrl.recommendations.append(rec)
# Handle temp_rec
# temp_rec is a recommendation that started, but never ended
# Usually, if the controller is stopped before a recommendation is done, it might have to be started / resumed again
file_path = root + "/current_rec.json"
with open(file_path, 'r', encoding='utf-8') as f:
temp_rec = json.loads(f.read())
ctrl.recommendations[temp_rec].update_status(JobStates.success)
ctrl.save_state()
# ctrl.on_new_automl_job(ctrl.recommendations[temp_rec])
return ctrl
def _execute_loop(self):
"""A loop that does the 3 things in order
1.See if any new recommendation is up to execute
2.Reads results of newly done experiments
3.Writes AutoML status into a file which can be shown to the end user
"""
update_job_status(self.automl_context.handler_id, self.automl_context.id, status="Running")
while True:
automl_status_file = self.root + "/controller.json"
if os.path.exists(automl_status_file):
with open(automl_status_file, encoding='utf-8') as f:
automl_status = json.load(f)
self.completed_recommendations = len(automl_status)
if (self.completed_recommendations == self.max_recommendations and automl_status[self.max_recommendations - 1]['status'] in ('success', 'failure') and self.automl_algorithm in ("bayesian", "b")) or (self.automl_algorithm in ("hyperband", "h") and self.brain.done()):
report_healthy(self.root + "/controller.log", "Stopping", clear=False)
# Find best model based on mAP
self.find_best_model()
if self.best_model_copied:
# Delete final extra checkpoints after finish training
for rec in self.recommendations:
expt_root = os.path.join(self.root, "experiment_" + str(rec.id))
self.get_best_checkpoint_path(expt_root, rec)
self.delete_not_best_model_checkpoints(expt_root, rec, True)
self.eta = 0.0
self.remaining_epochs_in_experiment = 0.0
self.write_results()
return
self.run_experiments()
self.read_results()
self.write_results()
time.sleep(4)
def run_experiments(self):
"""Generate recommendation from brain
if a new job is requested, add it to self.recommendations and execute it (add it to workflow)
if a resume is requested, add the relevant recommendation to the workflow
"""
if self.automl_algorithm in ("bayesian", "b") and len(self.recommendations) == self.max_recommendations:
return
history = deepcopy(self.recommendations)
recommended_specs = self.brain.generate_recommendations(history)
assert len(recommended_specs) in [0, 1], "At most one recommendation"
for spec in recommended_specs:
print("Recommendation gotten", file=sys.stderr)
self.best_epoch_number = 0
if type(spec) == dict:
# Save brain state and update current recommendation
self.hyperband_cancel_condition_seen = False
self.brain.save_state()
# update temp_rec
new_id = len(self.recommendations)
file_path = self.root + "/current_rec.json"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(new_id, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
# Run new recommendation
rec = Recommendation(new_id, spec)
job_id = str(uuid.uuid4()) # Assign job_id for this recommendation
rec.assign_job_id(job_id)
self.recommendations.append(rec)
self.save_state()
self.on_new_automl_job(rec)
report_healthy(self.root + "/controller.log", "Job started", clear=False)
elif type(spec) == ResumeRecommendation:
self.hyperband_cancel_condition_seen = False
rec_id = spec.id
# Save brain state and update current recommendation
self.brain.save_state()
# update temp_rec
file_path = self.root + "/current_rec.json"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(rec_id, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
assert self.recommendations[rec_id].id == rec_id # Make sure the self.recommendations[rec_id] indeed has 'id' field = rec_id
self.recommendations[rec_id].specs = spec.specs.copy()
self.recommendations[rec_id].update_status(JobStates.pending)
# Remove previous files (except checkpoints) from experiment folder.
expt_name = "experiment_" + str(rec_id)
expt_path = os.path.join(self.root, expt_name)
if os.path.exists(expt_path):
expt_file_name = glob.glob(expt_path + "/**/*.txt", recursive=True) + glob.glob(expt_path + "/**/*.json", recursive=True) + glob.glob(expt_path + "/**/*event*", recursive=True) + glob.glob(expt_path + "/**/*lightning_logs*", recursive=True)
for file_name in expt_file_name:
if os.path.isfile(file_name):
os.remove(file_name)
self.save_state()
self.on_new_automl_job(self.recommendations[rec_id])
report_healthy(self.root + "/controller.log", "Job started", clear=False)
def read_results(self):
"""Update results for each recommendation"""
flag = False
for rec in self.recommendations:
old_status = rec.status
job_name = rec.job_id
if not job_name:
continue
expt_name = "experiment_" + str(rec.id)
expt_root = os.path.join(self.root, expt_name)
# If rec already changed to Success, no need to check
if rec.status in [JobStates.success, JobStates.failure]:
if self.delete_intermediate_ckpt:
self.delete_checkpoint_files(expt_root)
# Remove the checkpoints from not best model
brain_file_path = self.root + "/brain.json"
if os.path.exists(brain_file_path):
with open(brain_file_path, 'r', encoding='utf-8') as u:
brain_dict = json.loads(u.read())
if self.automl_algorithm in ("bayesian", "b") or self.old_bracket != brain_dict.get("bracket", 0):
flag = self.delete_not_best_model_checkpoints(expt_root, rec, flag)
continue
status_file = os.path.join(self.root, expt_name, "status.json")
status_parser = StatusParser(status_file, self.network, expt_root)
new_results = status_parser.update_results()
self.calculate_eta(new_results)
validation_map_processed = False
# Force termination of the case for hyperband training
if self.automl_algorithm in ("hyperband", "h"):
brain_file_path = self.root + "/brain.json"
if os.path.exists(brain_file_path):
with open(brain_file_path, 'r', encoding='utf-8') as u:
brain_dict = json.loads(u.read())
# if the experiment is in the last set of bracket, do not cancel job.
for result_key in new_results.keys():
if self.hyperband_cancel_condition_seen or result_key in ("epoch", "cur_iter"):
if not isinstance(new_results.get(result_key, None), type(None)):
self.brain_epoch_number = float(brain_dict.get("epoch_number", float('inf')))
if len(brain_dict.get("ni", [float('-inf')])[str(brain_dict.get("bracket", 0))]) != (brain_dict.get("sh_iter", float('inf')) + 1):
if self.hyperband_cancel_condition_seen or new_results.get(result_key) > self.brain_epoch_number:
self.hyperband_cancel_condition_seen = True
# Cancel the current running job and change the job state to success
validation_map = self.read_metric(results=new_results)
if validation_map != 0.0:
rec.update_status(JobStates.success)
validation_map_processed = True
self.hyperband_cancel_condition_seen = False
on_cancel_automl_job(rec.job_id)
self.get_best_checkpoint_path(expt_root, rec)
self.delete_checkpoint_files(expt_root)
# Status is read from the status.json and not from K8s
# status.json needs to be reliable
status = ""
if rec.status == JobStates.success:
status = JobStates.success
elif new_results.get("detailed_status"):
status = new_results["detailed_status"].get("status", JobStates.pending).lower()
if not status:
status = JobStates.pending
if status in [JobStates.success, JobStates.failure]:
if not validation_map_processed:
validation_map = self.read_metric(results=new_results)
if status == JobStates.failure:
if self.brain.reverse_sort:
validation_map = 1e-7
else:
validation_map = float('inf')
if validation_map != 0.0:
rec.update_result(validation_map)
self.save_state()
if old_status != status:
rec.update_status(status)
self.save_state()
if status == JobStates.success:
container_log_file = f"{self.root}/experiment_{rec.id}/log.txt"
if os.path.exists(container_log_file):
with open(container_log_file, "a", encoding='utf-8') as f:
f.write("\nEOF\n")
if rec.status in [JobStates.success, JobStates.failure] and self.delete_intermediate_ckpt:
# Retain the latest checkpoint and remove others in experiment folder
self.get_best_checkpoint_path(expt_root, rec)
self.delete_checkpoint_files(expt_root)
if self.automl_algorithm in ("hyperband", "h"):
if os.path.exists(brain_file_path):
self.old_bracket = brain_dict.get("bracket", 0)
def calculate_eta(self, new_results):
"""Calculate estimated time remaining for automl job"""
global time_per_epoch
global time_per_epoch_counter
for result_key in new_results.keys():
if result_key in ("epoch", "cur_iter") and new_results.get(result_key):
current_epoch = new_results.get(result_key)
if result_key == "cur_iter":
time_per_key = "time_per_iter"
else:
time_per_key = "time_per_epoch"
time_per_epoch_string = new_results.get(time_per_key, "0:0:0.0")
if time_per_epoch_string:
format_time_per_epoch = time.strptime(time_per_epoch_string.split(".")[0], '%H:%M:%S')
time_per_epoch += (format_time_per_epoch.tm_hour * 60 * 60 + format_time_per_epoch.tm_min * 60 + format_time_per_epoch.tm_sec)
else:
time_per_epoch = 0
time_per_epoch_counter += 1
self.average_time_per_epoch = time_per_epoch / time_per_epoch_counter
if self.automl_algorithm in ("bayesian", "b"):
remaining_epochs = self.brain.num_epochs_per_experiment - current_epoch
self.remaining_epochs_in_experiment = remaining_epochs + (self.max_recommendations - self.completed_recommendations) * (self.brain.num_epochs_per_experiment)
self.eta = self.remaining_epochs_in_experiment * self.average_time_per_epoch
self.eta /= 60
self.eta = round(self.eta, 2)
elif self.automl_algorithm in ("hyperband", "h"):
current_sh_allowed_epochs = self.brain.ri[self.brain.bracket][self.brain.sh_iter] * self.brain.epoch_multiplier
current_sh_remaining_epochs = (self.brain.ni[self.brain.bracket][self.brain.sh_iter] - self.brain.expt_iter) * current_sh_allowed_epochs
if current_epoch < current_sh_allowed_epochs:
current_sh_remaining_epochs += ((current_sh_allowed_epochs - current_epoch))
future_sh_epochs = 0.0
for bracket in range(self.brain.bracket, len(self.brain.ni)):
for remaining_sh in range(self.brain.sh_iter + 1, len(self.brain.ri[bracket])):
current_sh_epochs = self.brain.ri[bracket][remaining_sh]
if remaining_sh != 0:
current_sh_epochs -= self.brain.ri[bracket][remaining_sh - 1]
future_sh_epochs += self.brain.ni[bracket][remaining_sh] * current_sh_epochs * self.brain.epoch_multiplier
self.remaining_epochs_in_experiment = current_sh_remaining_epochs + future_sh_epochs
self.eta = self.remaining_epochs_in_experiment * self.average_time_per_epoch
self.eta /= 60
self.eta = round(self.eta, 2)
def write_results(self):
"""Update stats value and write to automl_metadata.json"""
controller_json = os.path.join(self.root, "automl_metadata.json")
# Best mAP seen till now
result_dict = {}
try:
if self.recommendations[-1].result == 0.0:
result_dict[f"best_{self.metric_key}"] = self.min_max(self.recommendations[:-1], key=lambda rec: rec.result).result
else:
result_dict[f"best_{self.metric_key}"] = self.min_max(self.recommendations, key=lambda rec: rec.result).result
except:
result_dict[f"best_{self.metric_key}"] = 0.0
eta_msg_suffix = ""
if type(self.eta) == float:
eta_msg_suffix = " minutes remaining approximately"
result_dict["Estimated time for automl completion"] = str(self.eta) + eta_msg_suffix
result_dict["Current experiment number"] = len(self.recommendations)
if self.network in _ITER_MODELS:
result_dict["Number of iters yet to start"] = self.remaining_epochs_in_experiment
result_dict["Time per iter in seconds"] = round(self.average_time_per_epoch, 2)
else:
result_dict["Number of epochs yet to start"] = self.remaining_epochs_in_experiment
result_dict["Time per epoch in seconds"] = round(self.average_time_per_epoch, 2)
# Num failed jobs is one KPI
# Num successful jobs is one KPI
with open(controller_json, "w+", encoding='utf-8') as f:
f.write(json.dumps(result_dict, indent=4))
def find_best_model(self):
"""Find best model based on metric value chosen and copy those artifacts to best_model folder"""
print("Finding best recommendation config", file=sys.stderr)
try:
best_mAP = self.min_max(self.recommendations, key=lambda rec: rec.result).result
except:
best_mAP = 0.0
return
for rec in self.recommendations:
job_name = rec.job_id
if not job_name:
continue
expt_folder = os.path.join(self.root, "experiment_" + str(rec.id))
if os.path.exists(expt_folder) and (rec.status == JobStates.success and rec.result == best_mAP) and (glob.glob(expt_folder + "/**/*.tlt", recursive=True) + glob.glob(expt_folder + "/**/*.hdf5", recursive=True) + glob.glob(expt_folder + "/**/*.pth", recursive=True)):
self.best_model_copied = True
api_params = read_network_config(self.network)["api_params"]
spec_path = os.path.join(self.root, f"recommendation_{rec.id}.{api_params['spec_backend']}")
best_model_folder = os.path.join(self.root, "best_model")
shutil.copytree(expt_folder, best_model_folder)
shutil.copy(spec_path, os.path.join(self.root, "best_model"))
shutil.copy(os.path.join(self.root, "controller.json"), best_model_folder)
break
def get_best_checkpoint_path(self, path, recommendation):
"""Assign the checkpoint with the best metric value for supported models; for others call the 'find latest checkpoint method'"""
self.ckpt_path[path] = {}
if self.network in ("bpnet", "classification_pyt", "detectnet_v2", "fpenet", "pointpillars", "efficientdet_tf1", "faster_rcnn", "mask_rcnn", "segformer", "unet"):
format_epoch_number = str(self.best_epoch_number)
else:
format_epoch_number = f"{self.best_epoch_number:03}"
recommendation.best_epoch_number = format_epoch_number
find_trained_tlt = glob.glob(f"{path}/*{format_epoch_number}.tlt") + glob.glob(f"{path}/train/*{format_epoch_number}.tlt") + glob.glob(f"{path}/weights/*{format_epoch_number}.tlt")
find_trained_hdf5 = glob.glob(f"{path}/*{format_epoch_number}.hdf5") + glob.glob(f"{path}/train/*{format_epoch_number}.hdf5") + glob.glob(f"{path}/weights/*{format_epoch_number}.hdf5")
find_trained_pth = glob.glob(f"{path}/*{format_epoch_number}.pth") + glob.glob(f"{path}/train/*{format_epoch_number}.pth") + glob.glob(f"{path}/weights/*{format_epoch_number}.pth")
find_trained_ckzip = glob.glob(f"{path}/*{format_epoch_number}.ckzip") + glob.glob(f"{path}/train/*{format_epoch_number}.ckzip") + glob.glob(f"{path}/weights/*{format_epoch_number}.ckzip")
if find_trained_tlt:
self.ckpt_path[path]["tlt"] = find_trained_tlt[0]
if find_trained_hdf5:
self.ckpt_path[path]["hdf5"] = find_trained_hdf5[0]
if find_trained_pth:
self.ckpt_path[path]["pth"] = find_trained_pth[0]
if find_trained_ckzip:
self.ckpt_path[path]["ckzip"] = find_trained_ckzip[0]
def delete_checkpoint_files(self, path):
"""Remove the extra checkpoints generated after the on_cancel_automl_job"""
trained_files = glob.glob(path + "/**/*.tlt", recursive=True) + glob.glob(path + "/**/*.hdf5", recursive=True) + glob.glob(path + "/**/*.pth", recursive=True) + glob.glob(path + "/**/*.ckzip", recursive=True) + glob.glob(path + "/**/*.resume", recursive=True) + glob.glob(path + "/**/*lightning_logs*", recursive=True)
for files in trained_files:
if files not in self.ckpt_path[path].values():
if os.path.isfile(files):
os.remove(files)
def delete_not_best_model_checkpoints(self, path, rec, flag):
"""Remove the checkpoints which don't correspond to the best result"""
try:
if self.recommendations[-1].result == 0.0:
best_mAP = self.min_max(self.recommendations[:-1], key=lambda rec: rec.result).result
else:
best_mAP = self.min_max(self.recommendations, key=lambda rec: rec.result).result
except:
best_mAP = 0.0
if rec.result != best_mAP or bool(flag):
trained_files = glob.glob(path + "/**/*.tlt", recursive=True) + glob.glob(path + "/**/*.hdf5", recursive=True) + glob.glob(path + "/**/*.pth", recursive=True) + glob.glob(path + "/**/*.ckzip", recursive=True) + glob.glob(path + "/**/*.resume", recursive=True) + glob.glob(path + "/**/*event*", recursive=True) + glob.glob(path + "/**/*lightning_logs*", recursive=True)
for files in trained_files:
if os.path.isfile(files):
os.remove(files)
else:
flag = True
return flag
def trim_list(self, metric_list):
"""Retains only the tuples whose epoch numbers are <= required epochs"""
trimmed_list = []
for tuple_var in metric_list:
if tuple_var[0] >= 0:
if self.automl_algorithm in ("bayesian", "b") or (self.network == "pointpillars" and tuple_var[0] < self.brain_epoch_number) or (self.network != "pointpillars" and tuple_var[0] <= self.brain_epoch_number):
trimmed_list.append(tuple_var)
return trimmed_list
def read_metric(self, results):
"""
Parses the status parser object and returns the metric of interest
result: value from status_parser.update_results()
returns: the metric requested in normalized float
"""
metric_value = 0.0
if self.metric == "loss":
result_type = "graphical"
else:
result_type = "kpi"
try:
if result_type not in results.keys() or not results[result_type]:
return metric_value
for log in results[result_type]:
if self.metric == "loss":
criterion = "loss"
elif self.metric == "kpi":
criterion = self.network_metric_mapping[self.network]
else:
criterion = self.metric
if log["metric"] == criterion:
if log["values"]:
values_to_search = self.trim_list(log["values"].items())
if self.automl_algorithm in ("hyperband", "h"):
with open(self.root + "/brain.json", 'r', encoding='utf-8') as u:
brain_dict = json.loads(u.read())
if (len(brain_dict.get("ni", [float('-inf')])[str(brain_dict.get("bracket", 0))]) != (brain_dict.get("sh_iter", float('inf')) + 1)):
self.best_epoch_number, metric_value = values_to_search[-1]
else:
self.best_epoch_number, metric_value = sorted(values_to_search, key=lambda x: x[1], reverse=self.brain.reverse_sort)[0]
else:
self.best_epoch_number, metric_value = sorted(sorted(values_to_search, key=lambda x: x[0], reverse=True), key=lambda x: x[1], reverse=self.brain.reverse_sort)[0]
metric_value = float(metric_value)
except Exception as e:
print("Requested metric not found, defaulting to 0.0", file=sys.stderr)
print(str(e), file=sys.stderr)
if self.brain.reverse_sort:
metric_value = float('inf')
else:
metric_value = 0.0
if self.network in ("pointpillars", "fpenet"): # status json epoch number is 1 less than epoch number generated in checkppoint file
self.best_epoch_number += 1
elif self.network in _PURPOSE_BUILT_MODELS or self.network in ("deformable_detr", "dino"): # epoch number in checkpoint starts from 0 or models whose validation logs are generated before the training logs
self.best_epoch_number -= 1
print(f"Metric returned is {metric_value} at epoch/iter {self.best_epoch_number}", file=sys.stderr)
return metric_value + 1e-07
| tao_front_end_services-main | api/automl/controller.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hyperband AutoML algorithm modules"""
import numpy as np
import json
import os
import math
from automl.utils import ResumeRecommendation, JobStates, fix_input_dimension, get_valid_range, clamp_value, report_healthy
from handlers.utilities import load_json_spec
np.random.seed(95051)
class HyperBand:
"""Hyperband AutoML algorithm class"""
def __init__(self, root, parameters, R, nu, network, epoch_multiplier):
"""Initialize the Hyperband algorithm class
Args:
root: handler root
parameters: automl sweepable parameters
R: the maximum amount of resource that can be allocated to a single configuration
nu: an input that controls the proportion of configurations discarded in each round of SuccessiveHalving
epoch_multiplier: multiplying factor for epochs
"""
self.root = root
self.parameters = parameters
self.parent_params = {}
report_healthy(self.root + "/controller.log", "Hyperband init", clear=True)
self.epoch_multiplier = int(epoch_multiplier)
self.ni = {}
self.ri = {}
self.brackets_and_sh_sequence(R, nu)
self.epoch_number = 0
self.resume_epoch_number = 0
# State variables
self.bracket = 0 # Bracket
self.override_num_epochs(self.ri[self.bracket][-1] * self.epoch_multiplier)
self.sh_iter = 0 # SH iteration
self.experiments_considered = []
self.expt_iter = 0 # Recommendations within the SH
self.complete = False
self.reverse_sort = True
def brackets_and_sh_sequence(self, R, nu):
"""Generate ni,ri arrays based on R and nu values"""
smax = int(np.log(R) / np.log(nu))
for itr, s in enumerate(range(smax, 0, -1)): # This would be range(smax,-1,-1) to follow the paper, but the last bracket has n recommendations and no pruning and they are just random recs. Unless using a Bayesian HP recommendation, this can be 0
self.ni[itr] = []
self.ri[itr] = []
n = int(math.ceil(int((smax + 1) / (s + 1)) * (nu**s)))
r = int(R / (nu**s))
for s_idx in range(s + 1):
ni = int(n * (nu**(-s_idx)))
ri = int(r * (nu**s_idx))
self.ni[itr].append(ni)
self.ri[itr].append(ri)
def override_num_epochs(self, num_epochs):
"""Override num epochs parameter in train spec file"""
spec = load_json_spec(self.root + "/../specs/train.json")
for key1 in spec:
if key1 in ("training_config", "train_config", "train"):
for key2 in spec[key1]:
if key2 in ("num_epochs", "epochs", "n_epochs", "max_iters"):
spec[key1][key2] = num_epochs
elif key2 in ("train_config"):
for key3 in spec[key1][key2]:
if key3 == "runner":
for key4 in spec[key1][key2][key3]:
if key4 == "max_epochs":
spec[key1][key2][key3][key4] = num_epochs
elif key1 in ("num_epochs"):
spec[key1] = num_epochs
with open(self.root + "/../specs/train.json", 'w', encoding='utf-8') as f:
json.dump(spec, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
# Generate a random sample for a parameter from params.py
def sample_parameter(self, parameter_config):
"""Generate a random value for the parameter passed"""
tp = parameter_config.get("value_type")
default_value = parameter_config.get("default_value", None)
math_cond = parameter_config.get("math_cond", None)
parent_param = parameter_config.get("parent_param", None)
if tp in ("int", "integer"):
if parameter_config["parameter"] == "augmentation_config.preprocessing.output_image_height":
if "model_config.input_image_config.size_height_width.height" in self.parent_params.keys():
return self.parent_params["model_config.input_image_config.size_height_width.height"]
if parameter_config["parameter"] == "augmentation_config.preprocessing.output_image_width":
if "model_config.input_image_config.size_height_width.width" in self.parent_params.keys():
return self.parent_params["model_config.input_image_config.size_height_width.width"]
v_min = parameter_config.get("valid_min", "")
v_max = parameter_config.get("valid_max", "")
if v_min == "" or v_max == "":
return int(default_value)
if (type(v_min) != str and math.isnan(v_min)) or (type(v_max) != str and math.isnan(v_max)):
return int(default_value)
v_min = int(v_min)
if (type(v_max) != str and math.isinf(v_max)) or v_max == "inf":
v_max = int(default_value)
else:
v_max = int(v_max)
random_int = np.random.randint(v_min, v_max + 1)
if type(math_cond) == str:
factor = int(math_cond.split(" ")[1])
random_int = fix_input_dimension(random_int, factor)
if not (type(parent_param) == float and math.isnan(parent_param)):
if (type(parent_param) == str and parent_param != "nan" and parent_param == "TRUE") or (type(parent_param) == bool and parent_param):
self.parent_params[parameter_config.get("parameter")] = random_int
return random_int
if tp == "float":
v_min = parameter_config.get("valid_min", "")
v_max = parameter_config.get("valid_max", "")
if v_min == "" or v_max == "":
return float(default_value)
if (type(v_min) != str and math.isnan(v_min)) or (type(v_max) != str and math.isnan(v_max)):
return float(default_value)
v_min, v_max = get_valid_range(parameter_config, self.parent_params)
random_float = np.random.uniform(low=v_min, high=v_max)
random_float = clamp_value(random_float, v_min, v_max)
if not (type(parent_param) == float and math.isnan(parent_param)):
if (type(parent_param) == str and parent_param != "nan" and parent_param == "TRUE") or (type(parent_param) == bool and parent_param):
self.parent_params[parameter_config.get("parameter")] = random_float
return random_float
if tp == "bool":
return np.random.randint(0, 2) == 1
if tp == "ordered_int":
if parameter_config.get("valid_options", "") == "":
return default_value
valid_values = parameter_config.get("valid_options")
sample = int(np.random.choice(valid_values.split(",")))
return sample
if tp in ("categorical", "ordered"):
if parameter_config.get("valid_options", "") == "":
return default_value
valid_values = parameter_config.get("valid_options")
sample = np.random.choice(valid_values.split(","))
return sample
return default_value
def save_state(self):
"""Save the Hyperband algorithm related variables to brain.json"""
state_dict = {}
state_dict["bracket"] = self.bracket
state_dict["sh_iter"] = self.sh_iter
state_dict["expt_iter"] = self.expt_iter
state_dict["complete"] = self.complete
state_dict["epoch_number"] = self.epoch_number
state_dict["resume_epoch_number"] = self.resume_epoch_number
state_dict["epoch_multiplier"] = self.epoch_multiplier
state_dict["ni"] = self.ni
state_dict["ri"] = self.ri
file_path = self.root + "/brain.json"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(state_dict, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
@staticmethod
def load_state(root, parameters, R, nu, network, epoch_multiplier):
"""Load the Hyperband algorithm related variables to brain.json"""
file_path = root + "/brain.json"
if not os.path.exists(file_path):
return HyperBand(root, parameters, R, nu, network, epoch_multiplier)
with open(file_path, 'r', encoding='utf-8') as f:
json_loaded = json.loads(f.read())
brain = HyperBand(root, parameters, R, nu, network, epoch_multiplier)
# Load state (Remember everything)
brain.bracket = json_loaded["bracket"] # Bracket
brain.sh_iter = json_loaded["sh_iter"] # SH iteration
brain.expt_iter = json_loaded["expt_iter"] # Recommendations within the SH
brain.complete = json_loaded["complete"]
brain.epoch_number = json_loaded["epoch_number"]
brain.resume_epoch_number = json_loaded["resume_epoch_number"]
return brain
def _generate_one_recommendation(self, history):
"""Updates the counter variables and performs successive halving"""
if self.complete:
return None
num = self.ni[self.bracket][self.sh_iter]
if self.expt_iter == num:
self.expt_iter = 0
self.sh_iter += 1
if self.sh_iter == len(self.ni[self.bracket]):
self.sh_iter = 0
self.bracket += 1
if self.bracket in self.ri.keys():
self.override_num_epochs(self.ri[self.bracket][-1] * self.epoch_multiplier)
if self.bracket > max(list(self.ni.keys())):
self.complete = True
return None
if self.sh_iter == 0:
specs = self._generate_random_parameters()
self.epoch_number = self.ri[self.bracket][self.sh_iter] * self.epoch_multiplier
to_return = specs
else:
# Do successive halving on the last bracket
# Here, we are sloppy in defining the window, but we assume runs that run for more epochs will be better
# We take history[-bracket_size:] and prune this at every SH step
lower = -1 * self.ni.get(self.bracket, [0])[0]
self.resume_epoch_number = int(self.ri[self.bracket][self.sh_iter - 1] * self.epoch_multiplier)
if self.expt_iter == 0:
if self.sh_iter == 1:
self.experiments_considered = sorted(history[lower:], key=lambda rec: rec.result, reverse=self.reverse_sort)[0:self.ni[self.bracket][self.sh_iter]]
else:
for experiment in self.experiments_considered:
experiment.result = history[experiment.id].result
self.experiments_considered = sorted(self.experiments_considered, key=lambda rec: rec.result, reverse=self.reverse_sort)[0:self.ni[self.bracket][self.sh_iter]]
self.epoch_number = self.ri[self.bracket][self.sh_iter] * self.epoch_multiplier
resumerec = ResumeRecommendation(self.experiments_considered[self.expt_iter].id, self.experiments_considered[self.expt_iter].specs)
to_return = resumerec
self.expt_iter += 1
return to_return
def done(self):
"""Return if Hyperband algorithm is complete or not"""
return self.complete
def _generate_random_parameters(self):
"""Generates random parameter values for a recommendation"""
hyperparam_dict = {}
for param in self.parameters:
name = param["parameter"]
rec = self.sample_parameter(param)
hyperparam_dict[name] = rec
return hyperparam_dict
def generate_recommendations(self, history):
"""Generates recommendations for the controller to run"""
if history == []:
rec1 = self._generate_one_recommendation(history)
assert type(rec1) == dict
self.track_id = 0
return [rec1]
if history[self.track_id].status not in [JobStates.success, JobStates.failure]:
return []
rec = self._generate_one_recommendation(history)
if type(rec) == dict:
self.track_id = len(history)
return [rec]
if type(rec) == ResumeRecommendation:
self.track_id = rec.id
return [rec]
return []
| tao_front_end_services-main | api/automl/hyperband.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoML moduke"""
| tao_front_end_services-main | api/automl/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for AutoML"""
import os
import math
import glob
import datetime
from kubernetes import client
import time
def fix_input_dimension(dimension_value, factor=32):
"""Return dimension as a multiple of factor"""
if int(dimension_value) % factor == 0:
return dimension_value
return (int(dimension_value / factor) + 1) * factor
def clamp_value(value, v_min, v_max):
"""Clamps value within the given range"""
if value >= v_max:
epsilon = v_max / 10
if epsilon == 0.0:
epsilon = 0.0000001
value = v_max - epsilon
if value <= v_min:
epsilon = v_min / 10
if epsilon == 0.0:
epsilon = 0.0000001
value = v_min + epsilon
return value
def get_valid_range(parameter_config, parent_params):
"""Compute the clamp range for the given parameter"""
v_min = float(parameter_config.get("valid_min"))
v_max = float(parameter_config.get("valid_max"))
default_value = float(parameter_config.get("default_value"))
if math.isinf(v_min):
v_min = default_value
if math.isinf(v_max):
v_max = default_value
dependent_on_param = parameter_config.get("depends_on", None)
if type(dependent_on_param) == str:
dependent_on_param_op = dependent_on_param.split(" ")[0]
dependent_on_param_name = dependent_on_param.split(" ")[1]
if dependent_on_param_name in parent_params.keys():
limit_value = parent_params[dependent_on_param_name]
else:
limit_value = default_value
epsilon = 0.000001
if limit_value == epsilon:
epsilon /= 10
if dependent_on_param_op == ">":
v_min = limit_value + epsilon
elif dependent_on_param_op == ">=":
v_min = limit_value
elif dependent_on_param_op == "<":
v_max = limit_value - epsilon
elif dependent_on_param_op == "<=":
v_max = limit_value
return v_min, v_max
def report_healthy(path, message, clear=False):
"""Write health message to the provided file"""
mode = "w" if clear else "a"
with open(path, mode, encoding='utf-8') as f:
f.write(f"Healthy at {datetime.datetime.now().isoformat()}\n")
if message:
f.write(str(message) + "\n")
def wait_for_job_completion(job_id):
"""Check if the provided job_id is actively running and wait until completion"""
while True:
ret = client.BatchV1Api().list_job_for_all_namespaces()
active_jobs = [job.metadata.name for job in ret.items]
active_jobs = list(set(active_jobs))
if job_id not in active_jobs:
break
time.sleep(5)
def delete_lingering_checkpoints(epoch_number, path):
"""Delete checkpoints which are present even after job deletion"""
trained_files = glob.glob(path + "/**/*.tlt", recursive=True) + glob.glob(path + "/**/*.hdf5", recursive=True) + glob.glob(path + "/**/*.pth", recursive=True) + glob.glob(path + "/**/*.ckzip", recursive=True) + glob.glob(path + "/**/*lightning_logs*", recursive=True)
for file_name in trained_files:
if os.path.isfile(file_name):
if not (f"{epoch_number}.tlt" in file_name or f"{epoch_number}.hdf5" in file_name or f"{epoch_number}.pth" in file_name or f"{epoch_number}.ckzip" in file_name):
os.remove(file_name)
class Recommendation:
"""Recommendation class for AutoML recommendations"""
def __init__(self, identifier, specs):
"""Initialize the Recommendation class
Args:
identity: the id of the recommendation
specs: the specs/config of the recommendation
"""
assert type(identifier) == int
self.id = identifier
assert type(specs) == dict
self.specs = specs
self.job_id = None
self.status = JobStates.pending
self.result = 0.0
self.best_epoch_number = ""
def items(self):
"""Returns specs.items"""
return self.specs.items()
def get(self, key):
"""Returns value of requested key in the spec"""
return self.specs.get(key, None)
def assign_job_id(self, job_id):
"""Associates provided job id to the class objects job id"""
assert type(job_id) == str
self.job_id = job_id
def update_result(self, result):
"""Update the result value"""
result = float(result)
assert type(result) == float
self.result = result
def update_status(self, status):
"""Update the status value"""
assert type(status) == str
self.status = status
def __repr__(self):
"""Constructs a dictionary with the class members and returns them"""
return f"id: {self.id}\njob_id: {self.job_id}\nresult: {self.result}\nstatus: {self.status}"
class ResumeRecommendation:
"""Recommendation class for Hyperband resume experiments"""
def __init__(self, identity, specs):
"""Initialize the ResumeRecommendation class
Args:
identity: the id of the recommendation
specs: the specs/config of the recommendation
"""
self.id = identity
self.specs = specs
class JobStates():
"""Various states of an automl job"""
pending = "pending"
running = "running"
success = "success"
failure = "failure"
error = "error" # alias for failure
done = "done" # alias for success
| tao_front_end_services-main | api/automl/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bayesian AutoML algorithm modules"""
import numpy as np
import os
import json
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, Matern
from scipy.stats import norm
from scipy.optimize import minimize
from automl.utils import JobStates, fix_input_dimension, get_valid_range, clamp_value, report_healthy
from handlers.utilities import load_json_spec
np.random.seed(95051)
class Bayesian:
"""Bayesian AutoML algorithm class"""
def __init__(self, root, parameters):
"""Initialize the Bayesian algorithm class
Args:
root: handler root
parameters: automl sweepable parameters
"""
self.root = root
self.parameters = parameters
self.parent_params = {}
length_scale = [1.0] * len(self.parameters)
m52 = ConstantKernel(1.0) * Matern(length_scale=length_scale, nu=2.5)
# m52 = ConstantKernel(1.0) * Matern(length_scale=1.0, nu=2.5) # is another option
self.gp = GaussianProcessRegressor(
kernel=m52,
alpha=1e-10,
optimizer="fmin_l_bfgs_b",
n_restarts_optimizer=10,
random_state=95051
)
# The following 2 need to be stored
self.Xs = []
self.ys = []
self.xi = 0.01
self.num_restarts = 5
self.num_epochs_per_experiment = self.get_total_epochs()
report_healthy(self.root + "/controller.log", "Bayesian init", clear=True)
def convert_parameter(self, parameter_config, suggestion):
"""Convert 0 to 1 GP prediction into a possible value"""
tp = parameter_config.get("value_type")
default_value = parameter_config.get("default_value", None)
math_cond = parameter_config.get("math_cond", None)
parent_param = parameter_config.get("parent_param", None)
if tp in ("int", "integer"):
if parameter_config["parameter"] == "augmentation_config.preprocessing.output_image_height":
if "model_config.input_image_config.size_height_width.height" in self.parent_params.keys():
return self.parent_params["model_config.input_image_config.size_height_width.height"]
if parameter_config["parameter"] == "augmentation_config.preprocessing.output_image_width":
if "model_config.input_image_config.size_height_width.width" in self.parent_params.keys():
return self.parent_params["model_config.input_image_config.size_height_width.width"]
v_min = parameter_config.get("valid_min", "")
v_max = parameter_config.get("valid_max", "")
if v_min == "" or v_max == "":
return int(default_value)
if (type(v_min) != str and math.isnan(v_min)) or (type(v_max) != str and math.isnan(v_max)):
return int(default_value)
v_min = int(v_min)
if (type(v_max) != str and math.isinf(v_max)) or v_max == "inf":
v_max = int(default_value)
else:
v_max = int(v_max)
random_int = np.random.randint(v_min, v_max + 1)
if type(math_cond) == str:
factor = int(math_cond.split(" ")[1])
random_int = fix_input_dimension(random_int, factor)
if not (type(parent_param) == float and math.isnan(parent_param)):
if (type(parent_param) == str and parent_param != "nan" and parent_param == "TRUE") or (type(parent_param) == bool and parent_param):
self.parent_params[parameter_config.get("parameter")] = random_int
return random_int
if tp == "float":
v_min = parameter_config.get("valid_min", "")
v_max = parameter_config.get("valid_max", "")
if v_min == "" or v_max == "":
return float(default_value)
if (type(v_min) != str and math.isnan(v_min)) or (type(v_max) != str and math.isnan(v_max)):
return float(default_value)
v_min, v_max = get_valid_range(parameter_config, self.parent_params)
normalized = suggestion * (v_max - v_min) + v_min
quantized = clamp_value(normalized, v_min, v_max)
if not (type(parent_param) == float and math.isnan(parent_param)):
if (type(parent_param) == str and parent_param != "nan" and parent_param == "TRUE") or (type(parent_param) == bool and parent_param):
self.parent_params[parameter_config.get("parameter")] = quantized
return quantized
if tp == "bool":
return np.random.randint(0, 2) == 1
if tp == "ordered_int":
if parameter_config.get("valid_options", "") == "":
return default_value
valid_values = parameter_config.get("valid_options")
sample = int(np.random.choice(valid_values.split(",")))
return sample
if tp in ("categorical", "ordered"):
if parameter_config.get("valid_options", "") == "":
return default_value
valid_values = parameter_config.get("valid_options")
sample = np.random.choice(valid_values.split(","))
return sample
return default_value
def save_state(self):
"""Save the Bayesian algorithm related variables to brain.json"""
state_dict = {}
state_dict["Xs"] = np.array(self.Xs).tolist() # List of np arrays
state_dict["ys"] = np.array(self.ys).tolist() # List
file_path = self.root + "/brain.json"
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(state_dict, f,
separators=(',', ':'),
sort_keys=True,
indent=4)
@staticmethod
def load_state(root, parameters):
"""Load the Bayesian algorithm related variables to brain.json"""
file_path = root + "/brain.json"
if not os.path.exists(file_path):
return Bayesian(root)
with open(file_path, 'r', encoding='utf-8') as f:
json_loaded = json.loads(f.read())
Xs = []
for x in json_loaded["Xs"]:
Xs.append(np.array(x))
ys = json_loaded["ys"]
bayesian = Bayesian(root, parameters)
# Load state (Remember everything)
bayesian.Xs = Xs
bayesian.ys = ys
len_y = len(ys)
bayesian.gp.fit(np.array(Xs[:len_y]), np.array(ys))
return bayesian
def generate_recommendations(self, history):
"""Generates parameter values and appends to recommendations"""
if history == []:
# default recommendation => random points
# TODO: In production, this must be default values for a baseline
suggestions = np.random.rand(len(self.parameters))
self.Xs.append(suggestions)
recommendations = []
for param_dict, suggestion in zip(self.parameters, suggestions):
recommendations.append(self.convert_parameter(param_dict, suggestion))
return [dict(zip([param["parameter"] for param in self.parameters], recommendations))]
# This function will be called every 5 seconds or so.
# If no change in history, dont give a recommendation
# ie - wait for previous recommendation to finish
if history[-1].status not in [JobStates.success, JobStates.failure]:
return []
# Update the GP based on results
self.ys.append(history[-1].result)
self.update_gp()
# Generate one recommendation
# Generate "suggestions" which are in [0.0, 1.0] by optimizing EI
suggestions = self.optimize_ei() # length = len(self.parameters), np.array type
self.Xs.append(suggestions)
# Convert the suggestions to recommendations based on parameter type
# Assume one:one mapping between self.parameters and suggestions
recommendations = []
assert len(self.parameters) == len(suggestions)
for param_dict, suggestion in zip(self.parameters, suggestions):
recommendations.append(self.convert_parameter(param_dict, suggestion))
return [dict(zip([param["parameter"] for param in self.parameters], recommendations))]
def update_gp(self):
"""Update gausian regressor parameters"""
Xs_npy = np.array(self.Xs)
ys_npy = np.array(self.ys)
self.gp.fit(Xs_npy, ys_npy)
def optimize_ei(self):
"""Optmize expected improvement functions"""
best_ei = 1.0
best_x = None
dim = len(self.Xs[0])
bounds = [(0, 1)] * len(self.parameters)
for _ in range(self.num_restarts):
x0 = np.random.rand(dim)
res = minimize(self._expected_improvement, x0=x0, bounds=bounds, method='L-BFGS-B')
if res.fun < best_ei:
best_ei = res.fun
best_x = res.x
return best_x.reshape(-1)
"""
Used from:
http://krasserm.github.io/2018/03/21/bayesian-optimization/
"""
def _expected_improvement(self, X):
"""
Computes the EI at points X based on existing samples X_sample
and Y_sample using a Gaussian process surrogate model.
Args:
X: Points at which EI shall be computed (m x d).
X_sample: Sample locations (n x d).
Y_sample: Sample values (n x 1).
gpr: A GaussianProcessRegressor fitted to samples.
xi: Exploitation-exploration trade-off parameter.
Returns:
Expected improvements at points X.
"""
X = X.reshape(1, -1)
mu, sigma = self.gp.predict(X, return_std=True)
mu_sample = self.gp.predict(np.array(self.Xs))
sigma = sigma.reshape(-1, 1)
# Needed for noise-based model,
# otherwise use np.max(Y_sample).
# See also section 2.4 in [1]
mu_sample_opt = np.max(mu_sample)
with np.errstate(divide='warn'):
imp = mu - mu_sample_opt - self.xi
Z = imp / sigma
ei = imp * norm.cdf(Z) + sigma * norm.pdf(Z)
ei[sigma == 0.0] = 0.0
return -1 * ei[0, 0]
def get_total_epochs(self):
"""Get the epoch/iter number from train.json"""
spec = load_json_spec(self.root + "/../specs/train.json")
max_epoch = 100.0
for key1 in spec:
if key1 in ("training_config", "train_config", "train"):
for key2 in spec[key1]:
if key2 in ("num_epochs", "epochs", "n_epochs", "max_iters"):
max_epoch = float(spec[key1][key2])
elif key2 in ("train_config"):
for key3 in spec[key1][key2]:
if key3 == "runner":
for key4 in spec[key1][key2][key3]:
if key4 == "max_epochs":
max_epoch = float(spec[key1][key2][key3][key4])
elif key1 in ("num_epochs"):
max_epoch = float(spec[key1])
return max_epoch
| tao_front_end_services-main | api/automl/bayesian.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API health utils module"""
| tao_front_end_services-main | api/health_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Health check endpoints
- Liveliness
- Readiness
"""
import tempfile
import os
from kubernetes import client, config
def check_logging():
"""Checks if we are able to create and write into files"""
try:
file, path = tempfile.mkstemp()
with os.fdopen(file, 'w') as tmp:
tmp.write('Logging online!')
os.remove(path)
return True
except:
return False
def check_k8s():
"""Checks if we are able to initialize kubernetes client"""
try:
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r', encoding='utf-8') as f:
current_name_space = f.read()
os.getenv('NAMESPACE', default=current_name_space)
config.load_incluster_config()
client.BatchV1Api()
return True
except:
return False
| tao_front_end_services-main | api/health_utils/health_check.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for AutoML jobs"""
import sys
import threading
from handlers.stateless_handlers import get_public_models, get_handler_metadata
from handlers.utilities import download_ptm
from job_utils.workflow import Workflow, Job, Dependency
from job_utils import executor as jobDriver
def get_ptm_id_from_recommendation(specs, network_arch):
"""Dynamicaly obtain ptm id based on the backbone and num_layers chosen"""
backbone_arch = specs.get("backbone", "resnet")
num_layers = specs.get("num_layers", 34)
match_string = f":{backbone_arch}{num_layers}"
ptm_id = None
for model_id in get_public_models():
metadata = get_handler_metadata(model_id)
ngc_path_exists = metadata.get("ngc_path", None) is not None
correct_arch = metadata.get("network_arch", "") == network_arch
ptm_string_match = match_string in metadata.get("ngc_path", "")
if ngc_path_exists and correct_arch and ptm_string_match:
ptm_id = metadata.get("id", None)
return ptm_id
def on_new_automl_job(automl_context, recommendation):
"""Assigns dependencies for the automl recommendation job;
Creates job_context dictionary and enqueues the job to workflow
"""
# Controller interacts with this
# Download NGC pretrained model as a background process
ptm_id = get_ptm_id_from_recommendation(recommendation.specs, automl_context.network)
# Background process to download this PTM
if ptm_id:
job_run_thread = threading.Thread(target=download_ptm, args=(ptm_id,))
job_run_thread.start()
# automl_context is same as JobContext that was created for AutoML job
recommendation_id = recommendation.id
deps = []
deps.append(Dependency(type="automl", name=str(recommendation_id)))
if ptm_id:
deps.append(Dependency(type="automl_ptm", name=str(ptm_id)))
deps.append(Dependency(type="dataset"))
deps.append(Dependency(type="gpu"))
deps.append(Dependency(type="automl_specs"))
job = {
'id': automl_context.id,
'parent_id': None,
'priority': 2,
'action': "train",
'network': automl_context.network,
'handler_id': automl_context.handler_id,
'created_on': automl_context.created_on,
'last_modified': automl_context.last_modified,
'dependencies': deps
}
j = Job(**job)
Workflow.enqueue(j)
print("Recommendation submitted to workflow", file=sys.stderr)
def on_delete_automl_job(handler_id, job_id):
"""Dequeue the automl job"""
# AutoML handler stop would handle this
# automl_context is same as JobContext that was created for AutoML job
Workflow.dequeue(handler_id, job_id)
def on_cancel_automl_job(job_id):
"""Delete the job from k8's jobs"""
jobDriver.delete(job_id)
| tao_front_end_services-main | api/job_utils/automl_job_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API job utils module"""
| tao_front_end_services-main | api/job_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Workflow manager for normal model actions"""
import os
from job_utils.workflow import Workflow, Job, Dependency
from handlers.stateless_handlers import get_handler_spec_root
from handlers.utilities import load_json_spec
def on_new_job(job_contexts):
"""Assigns dependencies for a new job;
Creates job_context dictionary and enqueues the job to workflow
"""
for job_context in job_contexts:
deps = []
deps.append(Dependency(type="parent"))
deps.append(Dependency(type="specs"))
deps.append(Dependency(type="model"))
deps.append(Dependency(type="dataset"))
if job_context.action not in ["convert", "dataset_convert", "kmeans"]:
deps.append(Dependency(type="gpu"))
elif job_context.action in ("convert", "gen_trt_engine"):
spec_json_path = os.path.join(get_handler_spec_root(job_context.handler_id), job_context.action, ".json")
config = load_json_spec(spec_json_path)
if config.get("platform"):
deps.append(Dependency(type="gpu", name=config["platform"]))
job = {
'id': job_context.id,
'parent_id': job_context.parent_id,
'priority': 1,
'action': job_context.action,
'network': job_context.network,
'handler_id': job_context.handler_id,
'created_on': job_context.created_on,
'last_modified': job_context.last_modified,
'dependencies': deps
}
j = Job(**job)
Workflow.enqueue(j)
def on_delete_job(handler_id, job_id):
"""Dequeue a job"""
Workflow.dequeue(handler_id, job_id)
| tao_front_end_services-main | api/job_utils/workflow_driver.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Job Workflow modules"""
import os
import yaml
import threading
import functools
import datetime
import time
import uuid
import glob
from pathlib import Path
from queue import PriorityQueue
from dataclasses import dataclass, field, asdict
from handlers.utilities import read_network_config
from handlers.actions import ACTIONS_TO_FUNCTIONS, AutoMLPipeline
from handlers.stateless_handlers import get_root, get_handler_root
from job_utils.dependencies import dependency_type_map, dependency_check_default
def synchronized(wrapped):
"""Decorator function for synchronizing threaded functions"""
lock = threading.Lock()
@functools.wraps(wrapped)
def _wrap(*args, **kwargs):
with lock:
return wrapped(*args, **kwargs)
return _wrap
@dataclass
class IdedItem:
"""Base class for representing id's in uuid"""
id: uuid.UUID = field(default=uuid.uuid4())
@dataclass(order=True)
class PrioritizedItem:
"""Base class for prioritizing items"""
priority: int = field(default=1)
created_on: str = field(default=datetime.datetime.now().isoformat())
@dataclass
class Dependency:
"""Base class for representing dependecies"""
type: str = field(default=None)
name: str = field(default=None)
@dataclass
class Job(PrioritizedItem, IdedItem):
"""Class for representing jobs"""
last_modified: str = field(compare=False, default=datetime.datetime.now().isoformat())
action: str = field(compare=False, default=None)
dependencies: list = field(compare=False, default=None)
# More parameters for Job from JobContext
parent_id: uuid.UUID = field(compare=False, default=uuid.uuid4())
network: str = field(compare=False, default=None)
handler_id: uuid.UUID = field(compare=False, default=uuid.uuid4())
def dependency_check(job_context, dependency):
"""Checks if depencies for the job are met"""
dependency_check_fn = dependency_type_map.get(dependency.type, dependency_check_default)
dependency_met = dependency_check_fn(job_context, dependency)
return dependency_met
def execute_job(job_context):
"""Starts a thread on pipelines present in actions.py"""
isautoml = False
for dep in job_context.dependencies:
if dep.type == "automl":
isautoml = True
break
if not isautoml:
# Get action, network
action = job_context.action
network = job_context.network
# Get the correct ActionPipeline - build specs, build run command, launch K8s job, monitor status, run post-job steps
network_config = read_network_config(network)
action_pipeline_name = network_config["api_params"]["actions_pipe"].get(action, "")
action_pipeline = ACTIONS_TO_FUNCTIONS[action_pipeline_name]
_Actionpipeline = action_pipeline(job_context)
# Thread this!
job_run_thread = threading.Thread(target=_Actionpipeline.run, args=())
job_run_thread.start()
else:
# AUTOML Job
# TODO: At test time, sequentially run it and not as a thread to catch errors
_AutoMLPipeline = AutoMLPipeline(job_context)
job_run_thread = threading.Thread(target=_AutoMLPipeline.run, args=())
job_run_thread.start()
# AutoMLPipeline(job_context)
return True
@synchronized
def still_exists(job_to_check):
"""Checks if the the job is yet to be executed/queued or not"""
filename = os.path.join(get_handler_root(job_to_check.handler_id), "jobs.yaml")
jobs = read_jobs(filename)
for _, job in enumerate(jobs):
if job.id == job_to_check.id:
return True
return False
@synchronized
def report_healthy(message, clear=False):
"""Writes healthy message with timestamp"""
path = "/shared/health.txt"
Path(path).touch()
mode = "w" if clear else "a"
with open(path, mode, encoding='utf-8') as f:
f.write(f"Healthy at {datetime.datetime.now().isoformat()}\n")
if message:
f.write(str(message) + "\n")
@synchronized
def read_jobs(yaml_file):
"""Reads a job yaml file and convert to job contexts"""
jobs = []
if not os.path.exists(yaml_file):
return []
with open(yaml_file, "r", encoding='utf-8') as file:
jobs_raw = yaml.safe_load(file)
if not jobs_raw:
return []
# convert yaml to Jobs and put it in the priority queue
for job in jobs_raw:
j = Job(**job)
j.dependencies = []
for d in job.get('dependencies'):
j.dependencies.append(Dependency(**d))
jobs.append(j)
return jobs
@synchronized
def write_jobs(yaml_file, jobs):
"""Writes list of Job objects into the yaml_file"""
with open(yaml_file, 'w', encoding='utf-8') as file:
yaml.dump([asdict(i) for i in jobs], file, sort_keys=False)
@synchronized
def scan_for_jobs():
"""Scans for new jobs and queues them if dependencies are met"""
while True:
report_healthy("Workflow has waken up", clear=True)
# Create global queue
queue = PriorityQueue()
# Read all jobs.yaml files into one queue
pattern = get_root() + "**/**/**/jobs.yaml"
job_files = glob.glob(pattern)
for job_file in job_files:
for j in read_jobs(job_file):
queue.put(j)
len_q = len(queue.queue)
report_healthy(f"Found {len_q} pending jobs")
# Parse to dequeue
jobs_to_dequeue = []
list.sort(queue.queue)
for i in range(len(queue.queue)):
# check dependencies
job = queue.queue[i]
report_healthy(f"{job.id} with action {job.action}: Checking dependencies")
report_healthy(f"Total dependencies: {len(job.dependencies)}")
all_met = True
for dep in job.dependencies:
if not dependency_check(job, dep):
report_healthy(f"Unmet dependency: {dep.type}")
all_met = False
# if all dependencies are met
if all_met and still_exists(job):
# execute job
# check is job is there in the jobs.yaml still
report_healthy(f"{job.id} with action {job.action}: All dependencies met")
if execute_job(job):
# dequeue job
jobs_to_dequeue.append(job)
for job in jobs_to_dequeue:
Workflow.dequeue(job.handler_id, job.id)
report_healthy("Workflow going to sleep")
time.sleep(15)
class Workflow:
"""
Workflow is an abstraction that can run on multiple threads. Its use is to be
able to perform dependency checks and spawn off K8s jobs
Currently, jobs are packaged inside the ActionPipeline that runs as a thread
"""
@staticmethod
def start():
"""Method used to initialize the workflow. Starts a thread if thread is not there from before"""
# Make sure there is no other Workflow thread
for thread in threading.enumerate():
if thread.name == "WorkflowThreadTAO":
return False
t = threading.Thread(target=scan_for_jobs)
t.name = 'WorkflowThreadTAO'
t.daemon = True
t.start()
return True
@staticmethod
def enqueue(job):
"""Method used from outside to put a job into the workflow"""
# Simply prints the job inside the filename
# Called only by on_new_job()
filename = os.path.join(get_handler_root(job.handler_id), "jobs.yaml")
jobs = read_jobs(filename)
jobs.append(job)
write_jobs(filename, jobs)
@staticmethod
def dequeue(handler_id, job_id):
"""Method used from outside to remove a job from the workflow"""
# Simply remove the job from the filename
# Read all jobs
filename = os.path.join(get_handler_root(handler_id), "jobs.yaml")
jobs = read_jobs(filename)
# Delete job_id's job from the list
for idx, job in enumerate(jobs):
if job.id == job_id:
del jobs[idx]
# Write it back as is
write_jobs(filename, jobs)
@staticmethod
def healthy():
"""Method used to see if the workflow thread is running"""
try:
path = "/shared/health.txt"
# current time and last health modified time must be less than 100 seconds
return (time.time() - os.path.getmtime(path)) <= 100
except:
return False
| tao_front_end_services-main | api/job_utils/workflow.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dependency check modules
1. Dataset - train (tfrecords, labels, images), evaluate, inference, calibration datasets depending on task
2. Model - ptm, resume, .tlt from parent, .engine from parent, class map for some tasks, cal cache from parent for convert
3. Platflorm - GPU
4. Specs validation - Use Steve's code hardening
5. Parent job done? - Poll status from metadata
"""
import os
import json
from handlers.utilities import get_handler_root, load_json_spec, search_for_ptm, NO_PTM_MODELS
from handlers.stateless_handlers import get_handler_spec_root, get_handler_job_metadata, get_handler_metadata
from job_utils import executor
def dependency_check_parent(job_context, dependency):
"""Check if parent job is valid and in Done status"""
parent_job_id = job_context.parent_id
# If no parent job, this is always True
if parent_job_id is None:
return True
handler_id = job_context.handler_id
parent_status = get_handler_job_metadata(handler_id, parent_job_id).get("status", "Error")
parent_root = os.path.join(get_handler_root(handler_id), parent_job_id)
# Parent Job must be done
# Parent job output folder must exist
return bool(parent_status == "Done" and os.path.isdir(parent_root))
def dependency_check_specs(job_context, dependency):
"""Check if valid spec exists for the requested action"""
network = job_context.network
action = job_context.action
handler_id = job_context.handler_id
DIR_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CSV_PATH = os.path.join(DIR_PATH, "specs_utils", "specs", network, f"{network} - {action}.csv")
if not os.path.exists(CSV_PATH):
metadata = get_handler_metadata(handler_id)
# Try secondary format for CSV_PATH => "<network> - <action>__<dataset-format>.csv"
fmt = metadata.get("format", "_")
CSV_PATH = os.path.join(DIR_PATH, "specs_utils", "specs", network, f"{network} - {action}__{fmt}.csv")
handler_spec_root = get_handler_spec_root(handler_id)
spec_json_path = os.path.join(handler_spec_root, action + ".json")
load_json_spec(spec_json_path)
return bool(os.path.exists(spec_json_path))
def dependency_check_dataset(job_context, dependency):
"""Returns always true for dataset dependency check"""
return True
def dependency_check_model(job_context, dependency):
"""Checks if valid ptm model exists"""
network = job_context.network
handler_id = job_context.handler_id
handler_metadata = get_handler_metadata(handler_id)
# If it is a dataset, no model dependency
if "train_datasets" not in handler_metadata.keys():
return True
if network in NO_PTM_MODELS:
return True
ptm_ids = handler_metadata.get("ptm", None)
for ptm_id in ptm_ids:
if not ptm_id:
return False
if not search_for_ptm(get_handler_root(ptm_id), network=network):
return False
return True
def dependency_check_automl_specs(job_context, dependency):
"""Checks if train.json is present for automl job"""
spec_json_path = os.path.join(get_handler_spec_root(job_context.handler_id), "train.json")
return bool(os.path.exists(spec_json_path))
def dependency_check_gpu(job_context, dependency):
"""Check if GPU dependency is met"""
return executor.dependency_check(accelerator=dependency.name)
def dependency_check_default(job_context, dependency):
"""Returns a default value of False when dependency type is not present in dependency_type_map"""
return False
def dependency_check_automl(job_context, dependency):
"""Makes sure the controller.json has the rec_number requested at the time of creation"""
rec_number = int(dependency.name)
root = get_handler_root(job_context.handler_id)
# Check if recommendation number is there and can be loaded
file_path = root + f"/{job_context.id}/controller.json"
if not os.path.exists(file_path):
return False
with open(file_path, 'r', encoding='utf-8') as f:
recs_dict = json.loads(f.read())
try:
recs_dict[rec_number]
return True
except:
return False
def dependency_check_automl_ptm(job_context, dependency):
"""Checks if valid ptm model exists for automl job"""
network = job_context.network
ptm_id = dependency.name
if ptm_id:
return bool(search_for_ptm(get_handler_root(ptm_id), network=network))
return True
dependency_type_map = {
'parent': dependency_check_parent,
'specs': dependency_check_specs,
'dataset': dependency_check_dataset,
'model': dependency_check_model,
'gpu': dependency_check_gpu,
"automl": dependency_check_automl,
"automl_ptm": dependency_check_automl_ptm,
"automl_specs": dependency_check_automl_specs
}
| tao_front_end_services-main | api/job_utils/dependencies.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Kubernetes job manager modules"""
from kubernetes import client, config
import os
import sys
def create(job_name, image, command, num_gpu=-1, accelerator=None):
"""Creates a kubernetes job"""
command = 'umask 0 && ' + command
if num_gpu == -1:
num_gpu = int(os.getenv('NUM_GPU_PER_NODE', default='1'))
telemetry_opt_out = os.getenv('TELEMETRY_OPT_OUT', default='no')
wand_api_key = os.getenv('WANDB_API_KEY', default='')
clearml_web_host = os.getenv('CLEARML_WEB_HOST', default='https://app.clear.ml')
clearml_api_host = os.getenv('CLEARML_API_HOST', default='https://api.clear.ml')
clearml_files_host = os.getenv('CLEARML_FILES_HOST', default='https://files.clear.ml')
clearml_api_access_key = os.getenv('CLEARML_API_ACCESS_KEY', default='')
clearml_api_secret_key = os.getenv('CLEARML_API_SECRET_KEY', default='')
node_selector = {'accelerator': str(accelerator)}
if not accelerator:
node_selector = None
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r', encoding='utf-8') as f:
current_name_space = f.read()
name_space = os.getenv('NAMESPACE', default=current_name_space)
claim_name = os.getenv('CLAIMNAME', 'tao-toolkit-api-pvc')
image_pull_secret = os.getenv('IMAGEPULLSECRET', default='imagepullsecret')
config.load_incluster_config()
api_instance = client.BatchV1Api()
shared_volume_mount = client.V1VolumeMount(
name="shared-data",
mount_path="/shared")
dshm_volume_mount = client.V1VolumeMount(
name="dshm",
mount_path="/dev/shm")
resources = client.V1ResourceRequirements(
limits={
'nvidia.com/gpu': str(num_gpu)
})
capabilities = client.V1Capabilities(
add=['SYS_PTRACE']
)
security_context = client.V1SecurityContext(
capabilities=capabilities
)
num_gpu_env = client.V1EnvVar(
name="NUM_GPU_PER_NODE",
value=str(num_gpu))
telemetry_opt_out_env = client.V1EnvVar(
name="TELEMETRY_OPT_OUT",
value=telemetry_opt_out)
wandb_api_key_env = client.V1EnvVar(
name="WANDB_API_KEY",
value=wand_api_key)
clearml_web_host_env = client.V1EnvVar(
name="CLEARML_WEB_HOST",
value=clearml_web_host)
clearml_api_host_env = client.V1EnvVar(
name="CLEARML_API_HOST",
value=clearml_api_host)
clearml_files_host_env = client.V1EnvVar(
name="CLEARML_FILES_HOST",
value=clearml_files_host)
clearml_api_access_key_env = client.V1EnvVar(
name="CLEARML_API_ACCESS_KEY",
value=clearml_api_access_key)
clearml_api_secret_key_env = client.V1EnvVar(
name="CLEARML_API_SECRET_KEY",
value=clearml_api_secret_key)
container = client.V1Container(
name="container",
image=image,
env=[
num_gpu_env,
telemetry_opt_out_env,
wandb_api_key_env,
clearml_web_host_env, clearml_api_host_env, clearml_files_host_env, clearml_api_access_key_env, clearml_api_secret_key_env
],
command=["/bin/bash", "-c"],
args=[command],
resources=resources,
volume_mounts=[shared_volume_mount, dshm_volume_mount],
security_context=security_context)
shared_volume = client.V1Volume(
name="shared-data",
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=claim_name))
dshm_volume = client.V1Volume(
name="dshm",
empty_dir=client.V1EmptyDirVolumeSource(medium='Memory'))
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={"purpose": "tao-toolkit-job"}
),
spec=client.V1PodSpec(
image_pull_secrets=[client.V1LocalObjectReference(name=image_pull_secret)],
containers=[container],
volumes=[shared_volume, dshm_volume],
node_selector=node_selector,
restart_policy="Never"))
spec = client.V1JobSpec(
ttl_seconds_after_finished=100,
template=template,
backoff_limit=0)
job = client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=client.V1ObjectMeta(name=job_name),
spec=spec)
if os.getenv('BACKEND') == "moebius-cloud":
# Create an instance of the API class
# example https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CustomObjectsApi.md#create_namespaced_custom_object
api_instance = client.CustomObjectsApi(None)
group = 'moebius-job-manager.nvidia.io' # str | The custom resource's group name
version = 'v1alpha1' # str | The custom resource's version
namespace = name_space
plural = 'cloudjobs' # str | The custom resource's plural name. For TPRs this would be lowercase plural kind.
cloud_job_body = {
"apiVersion": "moebius-job-manager.nvidia.io/v1alpha1",
"kind": "CloudJob",
"metadata": {
"name": job_name + "-moebius",
"labels": {"job-name": job_name,
"purpose": "tao-toolkit-job",
"gputype": str(accelerator)
}
},
"spec": {"job": job,
"jobName": job_name,
"jobAction": "validate",
"jobGpu": str(accelerator),
"jobType": "train_model"
}
}
try:
api_instance.create_namespaced_custom_object(group, version, namespace, plural, body=cloud_job_body)
return
except Exception as e:
print("CloudJob creation failuers")
print(e)
return
else:
try:
api_instance.create_namespaced_job(
body=job,
namespace=name_space)
return
except:
return
def status(job_name):
"""Returns status of kubernetes job"""
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r', encoding='utf-8') as f:
current_name_space = f.read()
name_space = os.getenv('NAMESPACE', default=current_name_space)
config.load_incluster_config()
cloudjob_api_response = None
if os.getenv('BACKEND') == "moebius-cloud":
api_instance = client.CustomObjectsApi(None)
group = 'moebius-job-manager.nvidia.io' # str | The custom resource's group name
version = 'v1alpha1' # str | The custom resource's version
plural = 'cloudjobs' # str | The custom resource's plural name. For TPRs this would be lowercase plural kind.
name = job_name + "-moebius" # str | the custom object's name
try:
cloudjob_api_response = api_instance.get_namespaced_custom_object(group, version, name_space, plural, name)
except Exception as e:
print(e)
return "Error"
api_instance = client.BatchV1Api()
try:
api_response = api_instance.read_namespaced_job_status(
name=job_name,
namespace=name_space)
# print("Job status='%s'" % str(api_response.status), file=sys.stderr)
# active_pods = 0 if api_response.status.active is None else api_response.status.active #TODO: NOTE: Currently assuming one pod
if api_response.status.succeeded is not None:
return "Done"
if api_response.status.failed is not None:
return "Error"
return "Running"
except:
# moebius-cloud is in process of creating batchjob.
if os.getenv('BACKEND') == "moebius-cloud":
if cloudjob_api_response is not None:
return "Creating"
return "Error"
def delete(job_name):
"""Deletes a kubernetes job"""
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r', encoding='utf-8') as f:
current_name_space = f.read()
name_space = os.getenv('NAMESPACE', default=current_name_space)
config.load_incluster_config()
if os.getenv('BACKEND') == "moebius-cloud":
api_instance = client.CustomObjectsApi(None)
group = 'moebius-job-manager.nvidia.io' # str | The custom resource's group name
version = 'v1alpha1' # str | The custom resource's version
namespace = name_space
plural = 'cloudjobs' # str | The custom resource's plural name. For TPRs this would be lowercase plural kind.
namespace = name_space
name = job_name + "-moebius" # str | the custom object's name
grace_period_seconds = 10 # int | The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. (optional)
body = client.V1DeleteOptions() # V1DeleteOptions | (optional)
try:
api_response = api_instance.delete_namespaced_custom_object(group, version, namespace, plural, name, grace_period_seconds=grace_period_seconds, orphan_dependents=orphan_dependents, body=body, propagation_policy='Foreground')
return
except Exception as e:
print("CloudJob failed to delete.")
print(e)
return
else:
api_instance = client.BatchV1Api()
try:
api_response = api_instance.delete_namespaced_job(
name=job_name,
namespace=name_space,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
print(f"Job deleted. status='{str(api_response.status)}'", file=sys.stderr)
return
except:
print("Job failed to delete.", file=sys.stderr)
return
def list_namespace_jobs():
"""List kubernetes job in a namespace"""
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r', encoding='utf-8') as f:
current_name_space = f.read()
name_space = os.getenv('NAMESPACE', default=current_name_space)
config.load_incluster_config()
api_instance = client.BatchV1Api()
api_response = None
try:
api_response = api_instance.list_namespaced_job(namespace=name_space, label_selector="purpose=tao-toolkit-job", watch=False, limit=1000)
except:
pass
return api_response
def dependency_check(num_gpu=-1, accelerator=None):
"""Checks for GPU dependency"""
if num_gpu == -1:
num_gpu = int(os.getenv('NUM_GPU_PER_NODE', default='1'))
label_selector = 'accelerator=' + str(accelerator)
if not accelerator:
label_selector = None
config.load_incluster_config()
v1 = client.CoreV1Api()
nodes = {}
# how many GPUs allocatable per node
ret = v1.list_node(label_selector=label_selector)
if ret.items:
for i in ret.items:
if i.status and i.status.allocatable:
for k, v in i.status.allocatable.items():
if k == 'nvidia.com/gpu':
nodes[i.metadata.name] = int(v)
break
# how many GPUs requested for each node
ret = v1.list_pod_for_all_namespaces()
if ret.items:
for i in ret.items:
if i.spec.node_name is not None:
if i.spec and i.spec.containers:
for c in i.spec.containers:
if c.resources and c.resources.requests:
for k, v in c.resources.requests.items():
if k == 'nvidia.com/gpu':
current = nodes.get(i.spec.node_name, 0)
nodes[i.spec.node_name] = max(0, current - int(v))
# do I have enough GPUs on one of the nodes
for k, v in nodes.items():
if v >= num_gpu:
return True
return False
| tao_front_end_services-main | api/job_utils/executor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API response filtering modules"""
def apply(args, data):
"""Filter results based on the arguments provided"""
filter_sort = args.get('sort')
filter_name = args.get('name')
filter_type = args.get('type')
filter_arch = args.get('network_arch')
filter_read_only = args.get('read_only')
if filter_name is not None:
data = list(filter(lambda d: d.get('name') == filter_name, data))
if filter_type is not None:
data = list(filter(lambda d: d.get('type') == filter_type, data))
if filter_arch is not None:
data = list(filter(lambda d: d.get('network_arch') == filter_arch, data))
if filter_read_only is not None:
filter_read_only_as_boolean = filter_read_only == 'true'
data = list(filter(lambda d: d.get('read_only') == filter_read_only_as_boolean, data))
if filter_sort == 'name-ascending':
data = sorted(data, key=lambda d: '' + d.get('name') + ':' + d.get('version'), reverse=False)
elif filter_sort == 'name-descending':
data = sorted(data, key=lambda d: '' + d.get('name') + ':' + d.get('version'), reverse=True)
elif filter_sort == 'date-ascending':
data = sorted(data, key=lambda d: d.get('last_modified'), reverse=False)
else: # filter_sort == 'date-descending'
data = sorted(data, key=lambda d: d.get('last_modified'), reverse=True)
return data
| tao_front_end_services-main | api/filter_utils/filtering.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API filtering responses module"""
| tao_front_end_services-main | api/filter_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API response pagination modules"""
def apply(args, data):
"""Apply pagination to reduce the number of results that are returned"""
pagination_skip = args.get('skip')
pagination_size = args.get('size')
if pagination_skip is not None:
try:
data = data[int(pagination_skip):]
except:
pass
if pagination_size is not None:
try:
data = data[:int(pagination_size)]
except:
pass
return data
| tao_front_end_services-main | api/filter_utils/pagination.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset upload modules"""
import tarfile
import os
import glob
import sys
from handlers.utilities import Code
from handlers.utilities import run_system_command, get_handler_root
# Simple helper class for ease of code migration
class SimpleHandler:
"""Helper class holding dataset information"""
def __init__(self, handler_metadata):
"""Initialize the Handler helper class"""
self.root = get_handler_root(handler_metadata.get("id"))
self.type = handler_metadata.get("type")
self.format = handler_metadata.get("format")
def _extract_images(tar_path, dest):
"""Function to extract images, other directories on same level as images to root of dataset"""
# Infer how many components to strip to get images,labels to top of dataset directory
# Assumes: images, other necessary directories are in the same level
with tarfile.open(tar_path) as tar:
strip_components = 0
names = [tinfo.name for tinfo in tar.getmembers()]
for name in names:
if "/images/" in name:
strip_components = name.split("/").index("images")
break
# Build shell command for untarring
untar_command = f"tar -xf {tar_path} --strip-components={strip_components} -C {dest}/"
# Run shell command
print("Untarring data started", file=sys.stderr)
run_system_command(untar_command)
print("Untarring data complete", file=sys.stderr)
# Remove .tar.gz file
print("Removing data tar file", file=sys.stderr)
os.remove(tar_path)
print("Deleted data tar file", file=sys.stderr)
def write_dir_contents(directory, file):
"""Write contents of a directory to a file"""
with open(file, "w", encoding='utf-8') as f:
for dir_files in sorted(glob.glob(directory + "/*")):
f.write(dir_files + "\n")
def _untar_and_delete(tar_path, dest):
"""Run untar command and delete the tar file"""
# Build shell command for untarring
untar_command = f"tar -xf {tar_path} -C {dest}/"
# Run shell command
run_system_command(untar_command)
# Remove .tar.gz file
os.remove(tar_path)
def object_detection(tar_path, handler_metadata):
"""
OD Dataset structure
Upload - uploads and untars
- /images
- /labels
Convert
- /tfrecords (generated by dataset convert)
- /tfrecords/classes.json - A json file with a list
Augment
- Creates temp output folders and moves them to /images and /labels
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "images"))
if handler.format == "kitti":
assert os.path.exists(os.path.join(handler.root, "labels"))
elif handler.format == "coco":
assert os.path.exists(os.path.join(handler.root, "annotations.json"))
elif handler.format == "coco_raw":
assert os.path.exists(os.path.join(handler.root, "label_map.txt"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
instance_segmentation = object_detection
def semantic_segmentation(tar_path, handler_metadata):
"""
Upload - uploads and creates .txt files
- /images/
- /masks/
No Actions
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and masks paths exist
assert os.path.exists(os.path.join(handler.root, "images"))
write_dir_contents(os.path.join(handler.root, "images"), os.path.join(handler.root, "images.txt"))
if handler.format == "unet":
assert os.path.exists(os.path.join(handler.root, "masks"))
write_dir_contents(os.path.join(handler.root, "masks"), os.path.join(handler.root, "masks.txt"))
elif handler.format == "coco":
assert os.path.exists(os.path.join(handler.root, "annotations.json"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def character_recognition(tar_path, handler_metadata):
"""
LPRNET Dataset structure
Upload - uploads and untars
- /images
- /labels
- /characters.txt
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "image"))
if handler.format != "raw":
assert os.path.exists(os.path.join(handler.root, "label"))
assert os.path.exists(os.path.join(handler.root, "characters.txt"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def ocrnet(tar_path, handler_metadata):
"""
OCRNET Dataset structure
Upload - uploads and untars
- /train/gt_new.txt: train dataset
- /test/gt_new.txt: val dataset
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "character_list"))
assert os.path.exists(os.path.join(handler.root, "train")) or os.path.exists(os.path.join(handler.root, "test"))
if os.path.exists(os.path.join(handler.root, "train")):
os.system(f"chmod -R 777 {os.path.join(handler.root, 'train')}")
if os.path.exists(os.path.join(handler.root, "test")):
os.system(f"chmod -R 777 {os.path.join(handler.root, 'test')}")
assert os.path.exists(os.path.join(handler.root, "train/gt_new.txt")) or os.path.exists(os.path.join(handler.root, "test/gt_new.txt"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def ocdnet(tar_path, handler_metadata):
"""
OCDNET Dataset structure
Upload - uploads and untars
- /train/img: train images
- /train/gt: train ground_truth
- /test/img: val images
- /test/gt: val ground_truth
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "train/img")) or os.path.exists(os.path.join(handler.root, "test/img"))
assert os.path.exists(os.path.join(handler.root, "train/gt")) or os.path.exists(os.path.join(handler.root, "test/gt"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def optical_inspection(tar_path, handler_metadata):
"""
Optical Inspection Dataset structure
Upload - uploads and untars
- images: images
- dataset.csv: ground_truth
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "images")) and os.path.exists(os.path.join(handler.root, "dataset.csv"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def ml_recog(tar_path, handler_metadata):
"""
Metric Learning Recognition Dataset structure
Upload - uploads and untars
- metric_learning_recognition/
- retail-product-checkout-dataset/
- retail-product-checkout-dataset_classification_demo/
- known_classes
- unknown_classes
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "metric_learning_recognition"))
assert os.path.exists(os.path.join(handler.root, "metric_learning_recognition", "retail-product-checkout-dataset_classification_demo"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def image_classification(tar_path, handler_metadata):
"""
Raw:
images/
Default:
images/<class1>
images/<class2>
...
Custom:
images/
train.csv
val.csv
"""
handler = SimpleHandler(handler_metadata)
try:
print("Extracting images from data tarball file", file=sys.stderr)
_extract_images(tar_path, handler.root)
print("Extraction complete", file=sys.stderr)
# Validate images and labels paths exist
assert len(glob.glob(os.path.join(handler.root, "images*"))) == 1
if handler.format == "custom":
assert os.path.exists(os.path.join(handler.root, "val.csv"))
if handler.format == "classification_pyt":
assert os.path.exists(os.path.join(handler.root, "classes.txt"))
msg = "Upload successful"
print("Returning sucess code to the api call", file=sys.stderr)
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def bpnet(tar_path, handler_metadata):
"""
OD Dataset structure
Upload - uploads and untars
- /images
- /labels
Convert
- /tfrecords (generated by dataset convert)
- /tfrecords/classes.json - A json file with a list
Augment
- Creates temp output folders and moves them to /images and /labels
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "annotations"))
assert os.path.exists(os.path.join(handler.root, "annotations", "person_keypoints_train2017.json"))
assert os.path.exists(os.path.join(handler.root, "annotations", "person_keypoints_val2017.json"))
assert os.path.exists(os.path.join(handler.root, "train2017"))
assert os.path.exists(os.path.join(handler.root, "val2017"))
assert os.path.exists(os.path.join(handler.root, "coco_spec.json"))
os.system(f"chmod -R 777 {os.path.join(handler.root, 'coco_spec.json')}")
assert os.path.exists(os.path.join(handler.root, "bpnet_18joints.json"))
assert os.path.exists(os.path.join(handler.root, "infer_spec.yaml"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def fpenet(tar_path, handler_metadata):
"""
Default:
data/afw
data/afw.json or data/afw_10.json
...
"""
handler = SimpleHandler(handler_metadata)
try:
print("Extracting images from data tarball file", file=sys.stderr)
_extract_images(tar_path, handler.root)
print("Extraction complete", file=sys.stderr)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "data", "afw"))
assert (os.path.exists(os.path.join(handler.root, "data", "afw", "afw.json")) or os.path.exists(os.path.join(handler.root, "data", "afw_10", "afw_10.json")))
assert os.path.exists(os.path.join(handler.root, "data.json"))
os.system(f"chmod -R 777 {os.path.join(handler.root, 'data')}")
os.system(f"chmod -R 777 {os.path.join(handler.root, 'data.json')}")
msg = "Upload successful"
print("Returning sucess code to the api call", file=sys.stderr)
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def action_recognition(tar_path, handler_metadata):
"""
Default:
train
test
...
"""
handler = SimpleHandler(handler_metadata)
try:
print("Extracting images from data tarball file", file=sys.stderr)
_extract_images(tar_path, handler.root)
print("Extraction complete", file=sys.stderr)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "train"))
assert os.path.exists(os.path.join(handler.root, "test"))
msg = "Upload successful"
print("Returning sucess code to the api call", file=sys.stderr)
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def pointpillars(tar_path, handler_metadata):
"""
OD Dataset structure
Upload - uploads and untars
- /images
- /labels
- /velodyne
- /calib
"""
handler = SimpleHandler(handler_metadata)
try:
_extract_images(tar_path, handler.root)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "train", "label"))
assert os.path.exists(os.path.join(handler.root, "train", "lidar"))
assert os.path.exists(os.path.join(handler.root, "val", "label"))
assert os.path.exists(os.path.join(handler.root, "val", "lidar"))
msg = "Upload successful"
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def pose_classification(tar_path, handler_metadata):
"""
Default:
kinetics/nvidia : root_folder_path
files:
train_data.npy
train_label.npy
val_data.pkl
val_label.pkl
...
"""
handler = SimpleHandler(handler_metadata)
try:
print("Extracting images from data tarball file", file=sys.stderr)
_extract_images(tar_path, handler.root)
print("Extraction complete", file=sys.stderr)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "kinetics")) or os.path.exists(os.path.join(handler.root, "nvidia"))
model_type = ""
if os.path.exists(os.path.join(handler.root, "kinetics")):
model_type = "kinetics"
elif os.path.exists(os.path.join(handler.root, "nvidia")):
model_type = "nvidia"
assert os.path.exists(os.path.join(handler.root, model_type, "train_data.npy")) and os.path.exists(os.path.join(handler.root, model_type, "train_label.pkl")) and os.path.exists(os.path.join(handler.root, model_type, "val_data.npy")) and os.path.exists(os.path.join(handler.root, model_type, "val_label.pkl"))
msg = "Upload successful"
print("Returning sucess code to the api call", file=sys.stderr)
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
def re_identification(tar_path, handler_metadata):
"""
Default:
sample_train
sample_test
sample_test
...
"""
handler = SimpleHandler(handler_metadata)
try:
print("Extracting images from data tarball file", file=sys.stderr)
_extract_images(tar_path, handler.root)
print("Extraction complete", file=sys.stderr)
# Validate images and labels paths exist
assert os.path.exists(os.path.join(handler.root, "sample_train")) and os.path.exists(os.path.join(handler.root, "sample_test")) and os.path.exists(os.path.join(handler.root, "sample_query"))
msg = "Upload successful"
print("Returning sucess code to the api call", file=sys.stderr)
return Code(201, {}, msg)
except:
msg = "Invalid tar file / tar file with no images and/or labels directory"
return Code(400, {}, msg)
DS_UPLOAD_TO_FUNCTIONS = {"object_detection": object_detection,
"semantic_segmentation": semantic_segmentation,
"character_recognition": character_recognition,
"image_classification": image_classification,
"instance_segmentation": instance_segmentation,
"bpnet": bpnet,
"fpenet": fpenet,
"action_recognition": action_recognition,
"ml_recog": ml_recog,
"ocdnet": ocdnet,
"ocrnet": ocrnet,
"optical_inspection": optical_inspection,
"pointpillars": pointpillars,
"pose_classification": pose_classification,
"re_identification": re_identification}
| tao_front_end_services-main | api/handlers/ds_upload.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.