python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
def tryImport():
"""Import `openfermion` and `openfermionpyscf`."""
try:
import openfermion, openfermionpyscf
except ImportError as Error:
raise ImportError("This feature requires openfermionpyscf. ") from Error
return openfermion, openfermionpyscf
def create_molecular_hamiltonian(geometry: list,
basis='sto-3g',
multiplicity=1,
charge=0,
n_active_electrons=None,
n_active_orbitals=None):
'''
Create the molecular Hamiltonian corresponding to the provided
geometry, basis set, multiplicity, and charge. One can also specify the
number of active electrons and orbitals, thereby approximating the
molecular Hamiltonian and freezing core orbitals. This function delegates
to the `OpenFermion-PySCF` package and will throw an error if that module is
not available.
Arguments:
geometry: The geometry should be provided as a list of tuples,
where each tuple element contains the atom name and a tuple
of atom coordinates, e.g. [('H', (0.,0.,0.)), ('H', (0.,0.,.7474))].
basis: The basis set as a string.
multiplicity: The spin multiplicity as an int.
charge: The total charge of the molecular system as an int.
n_active_electrons: The number of electrons in the active space as an int.
n_active_orbitals: The number of spatial orbitals in the active space.
Returns:
A tuple containing the `cudaq.SpinOperator` representation for the molecular
Hamiltonian and the raw molecular data.
'''
of, ofpyscf = tryImport()
molecule = ofpyscf.run_pyscf(of.MolecularData(geometry, basis, multiplicity,
charge),
run_fci=True)
if n_active_electrons is None:
n_core_orbitals = 0
occupied_indices = None
else:
n_core_orbitals = (molecule.n_electrons - n_active_electrons) // 2
occupied_indices = list(range(n_core_orbitals))
if n_active_orbitals is None:
active_indices = None
else:
active_indices = list(
range(n_core_orbitals, n_core_orbitals + n_active_orbitals))
hamiltonian = molecule.get_molecular_hamiltonian(
occupied_indices=occupied_indices, active_indices=active_indices)
spin_op = of.jordan_wigner(hamiltonian)
from cudaq import SpinOperator
return SpinOperator(spin_op), molecule
def __internal_cpp_create_molecular_hamiltonian(geometry: list,
basis='sto-3g',
multiplicity=1,
charge=0,
n_active_electrons=None,
n_active_orbitals=None):
'''
Internal function meant for integration with CUDA Quantum C++.
(Does not require `import cudaq`)
Create the molecular Hamiltonian corresponding to the provided
geometry, basis set, multiplicity, and charge. One can also specify the
number of active electrons and orbitals, thereby approximating the
molecular Hamiltonian and freezing core orbitals. This function delegates
to the `OpenFermion-PySCF` package and will throw an error if that module is
not available.
Arguments:
geometry: The geometry should be provided as a list of tuples,
where each tuple element contains the atom name and a tuple
of atom coordinates, e.g. [('H', (0.,0.,0.)), ('H', (0.,0.,.7474))].
basis: The basis set as a string.
multiplicity: The spin multiplicity as an int.
charge: The total charge of the molecular system as an int.
n_active_electrons: The number of electrons in the active space as an int.
n_active_orbitals: The number of spatial orbitals in the active space.
Returns:
A tuple containing the Hamiltonian representation for the molecular
Hamiltonian and the raw molecular data.
'''
of, ofpyscf = tryImport()
molecule = ofpyscf.run_pyscf(of.MolecularData(geometry, basis, multiplicity,
charge),
run_fci=True)
if n_active_electrons is None:
n_core_orbitals = 0
occupied_indices = None
else:
n_core_orbitals = (molecule.n_electrons - n_active_electrons) // 2
occupied_indices = list(range(n_core_orbitals))
if n_active_orbitals is None:
active_indices = None
else:
active_indices = list(
range(n_core_orbitals, n_core_orbitals + n_active_orbitals))
hamiltonian = molecule.get_molecular_hamiltonian(
occupied_indices=occupied_indices, active_indices=active_indices)
spin_op = of.jordan_wigner(hamiltonian)
return spin_op, molecule
| cuda-quantum-main | python/cudaq/domains/chemistry/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
from .quantinuum import *
from .ionq import *
from .iqm import * | cuda-quantum-main | utils/mock_qpu/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq
from fastapi import FastAPI, HTTPException, Header
from typing import Union
import uvicorn, uuid, base64, ctypes
from pydantic import BaseModel
from llvmlite import binding as llvm
# Define the REST Server App
app = FastAPI()
class Input(BaseModel):
format: str
data: str
# Jobs look like the following type
class Job(BaseModel):
target: str
qubits: str
shots: int
input: Input
# Keep track of Job Ids to their Names
createdJobs = {}
# Could how many times the client has requested the Job
countJobGetRequests = 0
# Save how many qubits were needed for each test (emulates real backend)
numQubitsRequired = 0
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
target = llvm.Target.from_default_triple()
targetMachine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, targetMachine)
def getKernelFunction(module):
for f in module.functions:
if not f.is_declaration:
return f
return None
def getNumRequiredQubits(function):
for a in function.attributes:
if "requiredQubits" in str(a):
return int(
str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace(
"\"", ""))
# Here we test that the login endpoint works
@app.post("/login")
async def login(token: Union[str, None] = Header(alias="Authorization",
default=None)):
if token == None:
raise HTTPException(status_code(401), detail="Credentials not provided")
return {"id-token": "hello", "refresh-token": "refreshToken"}
# Here we expose a way to post jobs,
# Must have a Access Token, Job Program must be Adaptive Profile
# with entry_point tag
@app.post("/v0.3/jobs")
async def postJob(job: Job,
token: Union[str, None] = Header(alias="Authorization",
default=None)):
global createdJobs, shots, numQubitsRequired
if token == None:
raise HTTPException(status_code(401), detail="Credentials not provided")
print('Posting job with shots = ', job.shots)
newId = str(uuid.uuid4())
shots = job.shots
program = job.input.data
decoded = base64.b64decode(program)
m = llvm.module.parse_bitcode(decoded)
mstr = str(m)
assert ('entry_point' in mstr)
# Get the function, number of qubits, and kernel name
function = getKernelFunction(m)
if function == None:
raise Exception("Could not find kernel function")
numQubitsRequired = getNumRequiredQubits(function)
kernelFunctionName = function.name
print("Kernel name = ", kernelFunctionName)
print("Requires {} qubits".format(numQubitsRequired))
# JIT Compile and get Function Pointer
engine.add_module(m)
engine.finalize_object()
engine.run_static_constructors()
funcPtr = engine.get_function_address(kernelFunctionName)
kernel = ctypes.CFUNCTYPE(None)(funcPtr)
# Invoke the Kernel
cudaq.testing.toggleBaseProfile()
qubits, context = cudaq.testing.initialize(numQubitsRequired, job.shots)
kernel()
results = cudaq.testing.finalize(qubits, context)
results.dump()
createdJobs[newId] = results
engine.remove_module(m)
# Job "created", return the id
return {"id": newId, "jobs": {"status": "running"}}
# Retrieve the job, simulate having to wait by counting to 3
# until we return the job results
@app.get("/v0.3/jobs")
async def getJob(id: str):
global countJobGetRequests, createdJobs, numQubitsRequired
# Simulate asynchronous execution
if countJobGetRequests < 3:
countJobGetRequests += 1
return {"jobs": [{"status": "running"}]}
countJobGetRequests = 0
res = {
"jobs": [{
"status": "completed",
"qubits": numQubitsRequired,
"results_url": "/v0.3/jobs/{}/results".format(id)
}]
}
return res
@app.get("/v0.3/jobs/{jobId}/results")
async def getResults(jobId: str):
global countJobGetRequests, createdJobs
counts = createdJobs[jobId]
counts.dump()
retData = {}
N = 0
for bits, count in counts.items():
N += count
for bits, count in counts.items():
retData[bits] = float(count / N)
res = retData
return res
def startServer(port):
uvicorn.run(app, port=port, host='0.0.0.0', log_level="info")
if __name__ == '__main__':
startServer(62455)
| cuda-quantum-main | utils/mock_qpu/ionq/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import asyncio
import uuid
from typing import Optional
import math
from cmath import exp
# Use IQM Client Tools to verify data structures
import iqm_client
import uvicorn
from fastapi import FastAPI, HTTPException, Request
from pydantic import BaseModel
import numpy as np
# Testing constants
good_access_token = "Bearer good_access_token"
server_qpu_architecture = "Apollo"
operations = [] # TBA
qubits = [] # TBA
qubit_connectivity = [] # TBA
# Define the REST Server App
app = FastAPI()
class Counts(BaseModel):
"""State histogram"""
measurement_keys: list[str]
counts: dict[str, int]
# Keep job artifacts
class Job(BaseModel):
"""Job information"""
id: str
status: iqm_client.Status
result: Optional[iqm_client.RunResult] = None
counts_batch: Optional[list[Counts]] = None
metadata: iqm_client.Metadata
# New job created response
class PostJobsResponse(BaseModel):
"""POST /jobs response"""
id: str
# Jobs storage
createdJobs: dict[str, Job] = {}
def _contract_einsum(A: np.ndarray, U: np.ndarray, indices: list[int],
a_dims: list[int], arity):
"""Unitary operator A acting on the given subsystems of the register,
multiplied by the full-register propagator U."""
A = A.reshape(2 * a_dims)
u_inds = np.arange(2 * arity)
# some u indexes are contracted and replaced with new indices
new_inds = np.arange(len(a_dims)) + len(u_inds)
a_inds = list(new_inds) + indices
# output indexes are same as input indexes, but with the contracted ones replaced with the new ones
out_inds = u_inds.copy()
out_inds[indices] = new_inds
return np.einsum(A, a_inds, U, u_inds, out_inds)
def _generate_measurement_strings(n, bs=""):
if n - 1:
yield from _generate_measurement_strings(n - 1, bs + "0")
yield from _generate_measurement_strings(n - 1, bs + "1")
else:
yield bs + "0"
yield bs + "1"
def _make_phased_rx_unitary_matrix(theta: float, phi: float) -> np.ndarray:
"""Return the unitary matrix for a phased RX gate."""
cos = math.cos(theta / 2)
sin = math.sin(theta / 2)
exp_m = exp(-1j * phi)
exp_p = exp(1j * phi)
r_gate = np.array([[cos, -1j * exp_m * sin], [-1j * exp_p * sin, cos]])
return r_gate
def _make_cz_unitary_matrix() -> np.ndarray:
"""Return the unitary matrix for a CZ gate."""
CZ = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
return CZ
def _extract_qubit_position_from_qubit_name(qubit_names: str) -> int:
"""Extract the qubit position from the qubit name."""
return int(qubit_names[2:]) - 1
def _partial_trace(N, rho, keep):
"""Calculate the partial trace of a density matrix"""
trace_out = sorted(set(range(N)) - set(keep), reverse=True)
if len(trace_out) == 0:
return rho.reshape(
2**N, 2**N) # No tracing needed, return the reshaped matrix
# Reshape into tensor with shape (2,2,...,2,2,...,2), 2N times
rho = rho.reshape([2] * 2 * N)
# Trace over the unwanted qubits
for q in trace_out:
rho = np.trace(rho, axis1=q, axis2=q + N)
N -= 1 # Adjust N as one qubit is traced out
return rho
def _validate_measurements(job: Job, circuit: iqm_client.Circuit) -> bool:
"""Check that the circuit contains measurements"""
measurements = [
instruction for instruction in circuit.instructions
if instruction.name == "measurement"
]
if len(measurements) == 0:
job.status = iqm_client.Status.FAILED
job.result = iqm_client.RunResult(
status=job.status,
metadata=job.metadata,
message="Circuit contains no measurements",
)
createdJobs[job.id] = job
return False
return True
def _validate_connectivity(job: Job, circuit: iqm_client.Circuit) -> bool:
"""C""check connectivity partially matches Apollo"""
qubit_pairs = [
instruction.qubits
for instruction in circuit.instructions
if len(instruction.qubits) == 2
]
if ("QB2", "QB3") in qubit_pairs or ("QB3", "QB2") in qubit_pairs:
job.status = iqm_client.Status.FAILED
job.result = iqm_client.RunResult(
status=job.status,
metadata=job.metadata,
message=
"Some circuits in the batch have gates between uncoupled qubits:",
)
createdJobs[job.id] = job
return False
return True
def _gather_circuit_information(
instructions: list[iqm_client.Instruction],) -> tuple[set[int], int]:
"""Gather qubits from the circuit"""
measurement_qubits: set[int] = set()
all_qubits: set[int] = set()
for instruction in instructions:
all_qubits.update(
_extract_qubit_position_from_qubit_name(qb)
for qb in list(instruction.qubits))
if instruction.name == "measurement":
measurement_qubits.update(
_extract_qubit_position_from_qubit_name(qb)
for qb in list(instruction.qubits))
return measurement_qubits, len(all_qubits)
def _simulate_circuit(instructions: list[iqm_client.Instruction],
shots: int) -> dict[str, int]:
"""Simulate the circuit"""
# extract qubits information from measurements
measurement_qubits_positions, number_of_qubits = _gather_circuit_information(
instructions)
# calculate circuit operator and measure qubits
dims = [2] * number_of_qubits
D = np.prod(dims)
operator: np.ndarray = np.eye(int(D), dtype=complex)
operator = operator.reshape(2 * dims)
for instruction in instructions:
if instruction.name == "phased_rx":
qubit_position = _extract_qubit_position_from_qubit_name(
instruction.qubits[0])
r_gate = _make_phased_rx_unitary_matrix(
float(instruction.args["angle_t"]) * (2.0 * np.pi),
float(instruction.args["phase_t"]) * (2.0 * np.pi),
)
# arity here is `number_of_qubits` because `operator` is an operation over all the qubits
operator = _contract_einsum(r_gate, operator, [qubit_position],
[2] * 1, number_of_qubits)
elif instruction.name == "cz":
control_qubit_position = _extract_qubit_position_from_qubit_name(
instruction.qubits[0])
target_qubit_position = _extract_qubit_position_from_qubit_name(
instruction.qubits[1])
cz_gate = _make_cz_unitary_matrix()
# arity here is `number_of_qubits` because `operator` is an operation over all the qubits
operator = _contract_einsum(
cz_gate,
operator,
[control_qubit_position, target_qubit_position],
[2] * 2,
number_of_qubits,
)
else:
continue
operator = operator.reshape((D, D))
# apply the constructed operator to the initial state
initial_state = np.array([0] * 2**number_of_qubits, dtype=complex)
initial_state[0] = 1
final_state = np.matmul(operator, initial_state)
# density matrix
density_matrix = np.outer(final_state, np.conj(final_state))
# make partial density matrix for the measured subset of qubits
partial_trace = _partial_trace(number_of_qubits, density_matrix,
measurement_qubits_positions)
probabilities = np.diag(partial_trace)
return {
ms: int(prob * shots) for ms, prob in zip(
_generate_measurement_strings(len(measurement_qubits_positions)),
probabilities,
)
}
async def compile_and_submit_job(job: Job):
"""Analyze measurements and construct corresponding counts"""
request = job.metadata.request
circuits = request.circuits
job.counts_batch = []
for circuit in circuits:
if not _validate_measurements(job, circuit):
return
if not _validate_connectivity(job, circuit):
return
# Simulate the circuit
counts = _simulate_circuit(circuit.instructions, request.shots)
job.counts_batch.append(
Counts(counts=counts, measurement_keys=[circuit.name]))
job.status = iqm_client.Status.READY
job.result = iqm_client.RunResult(status=job.status, metadata=job.metadata)
createdJobs[job.id] = job
@app.get("/quantum-architecture")
async def get_quantum_architecture(
request: Request) -> iqm_client.QuantumArchitecture:
"""Get the quantum architecture"""
access_token = request.headers.get("Authorization")
if access_token != good_access_token:
raise HTTPException(401)
return iqm_client.QuantumArchitecture(
quantum_architecture=iqm_client.QuantumArchitectureSpecification(
name=server_qpu_architecture,
operations=operations,
qubits=qubits,
qubit_connectivity=qubit_connectivity,
))
@app.post("/jobs")
async def post_jobs(job_request: iqm_client.RunRequest,
request: Request) -> PostJobsResponse:
"""Register a new job and start execution"""
access_token = request.headers.get("Authorization")
if access_token != good_access_token:
raise HTTPException(401)
metadata = iqm_client.Metadata(request=job_request)
new_job_id = str(uuid.uuid4())
new_job = Job(
id=new_job_id,
status=iqm_client.Status.PENDING_COMPILATION,
request=job_request,
metadata=metadata,
)
createdJobs[new_job_id] = new_job
# start compilation and execution
asyncio.create_task(compile_and_submit_job(new_job))
await asyncio.sleep(0.0)
return PostJobsResponse(id=new_job_id)
@app.get("/jobs/{job_id}/status")
async def get_jobs_status(job_id: str, request: Request) -> iqm_client.Status:
"""Get the status of a job"""
access_token = request.headers.get("Authorization")
if access_token != good_access_token:
raise HTTPException(401)
if job_id not in createdJobs:
raise HTTPException(404)
return createdJobs[job_id].status
@app.get("/jobs/{job_id}/counts")
async def get_jobs(job_id: str, request: Request):
"""Get the result of a job"""
access_token = request.headers.get("Authorization")
if access_token != good_access_token:
raise HTTPException(401)
if job_id not in createdJobs:
raise HTTPException(404)
job = createdJobs[job_id]
# TODO: return the actual counts, check the requested measurements
results = {
"status":
job.status,
"message":
job.result.message if job.result and job.result.message else None,
"counts_batch":
job.counts_batch,
}
return results
def startServer(port):
uvicorn.run(app, port=port, host="0.0.0.0", log_level="debug")
if __name__ == "__main__":
startServer(9100)
| cuda-quantum-main | utils/mock_qpu/iqm/mock_iqm_server.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import json
import sys
from datetime import datetime
def write_a_mock_tokens_file(tokens_file_path):
tokens_data = {
"access_token": "good_access_token",
}
json_str = json.dumps(tokens_data)
with open(tokens_file_path, "w") as f:
f.write(json_str)
if __name__ == "__main__":
tokens_file_path = sys.argv[1]
write_a_mock_tokens_file(tokens_file_path)
| cuda-quantum-main | utils/mock_qpu/iqm/mock_iqm_cortex_cli.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
import cudaq
from fastapi import FastAPI, HTTPException, Header
from typing import Union
import uvicorn, uuid, base64, ctypes
from pydantic import BaseModel
from llvmlite import binding as llvm
# Define the REST Server App
app = FastAPI()
# Jobs look like the following type
class Job(BaseModel):
name: str
program: str
count: int
# Keep track of Job Ids to their Names
createdJobs = {}
# Could how many times the client has requested the Job
countJobGetRequests = 0
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
target = llvm.Target.from_default_triple()
targetMachine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, targetMachine)
def getKernelFunction(module):
for f in module.functions:
if not f.is_declaration:
return f
return None
def getNumRequiredQubits(function):
for a in function.attributes:
if "requiredQubits" in str(a):
return int(
str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace(
"\"", ""))
# Here we test that the login endpoint works
@app.post("/login")
async def login(token: Union[str, None] = Header(alias="Authorization",
default=None)):
if 'token' == None:
raise HTTPException(status_code(401), detail="Credentials not provided")
return {"id-token": "hello", "refresh-token": "refreshToken"}
# Here we expose a way to post jobs,
# Must have a Access Token, Job Program must be Adaptive Profile
# with entry_point tag
@app.post("/job")
async def postJob(job: Job,
token: Union[str, None] = Header(alias="Authorization",
default=None)):
global createdJobs, shots
if 'token' == None:
raise HTTPException(status_code(401), detail="Credentials not provided")
print('Posting job with name = ', job.name, job.count)
name = job.name
newId = str(uuid.uuid4())
program = job.program
decoded = base64.b64decode(program)
m = llvm.module.parse_bitcode(decoded)
mstr = str(m)
assert ('entry_point' in mstr)
# Get the function, number of qubits, and kernel name
function = getKernelFunction(m)
if function == None:
raise Exception("Could not find kernel function")
numQubitsRequired = getNumRequiredQubits(function)
kernelFunctionName = function.name
print("Kernel name = ", kernelFunctionName)
print("Requires {} qubits".format(numQubitsRequired))
# JIT Compile and get Function Pointer
engine.add_module(m)
engine.finalize_object()
engine.run_static_constructors()
funcPtr = engine.get_function_address(kernelFunctionName)
kernel = ctypes.CFUNCTYPE(None)(funcPtr)
# Invoke the Kernel
cudaq.testing.toggleBaseProfile()
qubits, context = cudaq.testing.initialize(numQubitsRequired, job.count)
kernel()
results = cudaq.testing.finalize(qubits, context)
results.dump()
createdJobs[newId] = (name, results)
engine.remove_module(m)
# Job "created", return the id
return {"job": newId}
# Retrieve the job, simulate having to wait by counting to 3
# until we return the job results
@app.get("/job/{jobId}")
async def getJob(jobId: str):
global countJobGetRequests, createdJobs, shots
# Simulate asynchronous execution
if countJobGetRequests < 3:
countJobGetRequests += 1
return {"status": "running"}
countJobGetRequests = 0
name, counts = createdJobs[jobId]
retData = []
for bits, count in counts.items():
retData += [bits] * count
res = {"status": "completed", "results": {"mz0": retData}}
return res
def startServer(port):
uvicorn.run(app, port=port, host='0.0.0.0', log_level="info")
if __name__ == '__main__':
startServer(62454)
| cuda-quantum-main | utils/mock_qpu/quantinuum/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
from typing import Union
import base64
import ctypes
import cudaq
import uuid
import uvicorn
from fastapi import FastAPI, HTTPException, Header
from llvmlite import binding as llvm
from pydantic import BaseModel
# Define the REST Server App
app = FastAPI()
# Jobs look like the following type
class Task(BaseModel):
task_id: str
program: str
config: str
class TaskBody(BaseModel):
tasks: list[Task]
class AuthModel(BaseModel):
email: str
password: str
# Keep track of Job Ids to their Names
createdJobs = {}
# Could how many times the client has requested the Job
countJobGetRequests = 0
llvm.initialize()
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
target = llvm.Target.from_default_triple()
targetMachine = target.create_target_machine()
backing_mod = llvm.parse_assembly("")
engine = llvm.create_mcjit_compiler(backing_mod, targetMachine)
def getKernelFunction(module):
for f in module.functions:
if not f.is_declaration:
return f
return None
def getNumRequiredQubits(function):
for a in function.attributes:
if "requiredQubits" in str(a):
return int(
str(a).split("requiredQubits\"=")[-1].split(" ")[0].replace(
"\"", ""))
# Here we test that the login endpoint works
@app.post("/auth")
async def login(auth_info: AuthModel):
return {"access_token": "auth_token"}
# Here we expose a way to post jobs,
# Must have a Access Token, Job Program must be Adaptive Profile
# with entry_point tag
@app.post("/tasks/submit")
async def postJob(
tasks: Union[TaskBody, Task],
# access_token: Union[str, None] = Header(alias="Authorization",default=None)
):
global createdJobs, shots
# if access_token == None:
# raise HTTPException(status_code(401), detail="Credentials not provided")
if isinstance(tasks, Task):
tasks = TaskBody(tasks=[
tasks,
])
for task in tasks.tasks:
newId = task.task_id
program = task.program
decoded = base64.b64decode(program)
m = llvm.module.parse_bitcode(decoded)
mstr = str(m)
assert ('entry_point' in mstr)
# Get the function, number of qubits, and kernel name
function = getKernelFunction(m)
if function == None:
raise Exception("Could not find kernel function")
numQubitsRequired = getNumRequiredQubits(function)
kernelFunctionName = function.name
print("Kernel name = ", kernelFunctionName)
print("Requires {} qubits".format(numQubitsRequired))
# JIT Compile and get Function Pointer
engine.add_module(m)
engine.finalize_object()
engine.run_static_constructors()
funcPtr = engine.get_function_address(kernelFunctionName)
kernel = ctypes.CFUNCTYPE(None)(funcPtr)
# Invoke the Kernel
cudaq.testing.toggleBaseProfile()
qubits, context = cudaq.testing.initialize(numQubitsRequired, 1000)
kernel()
results = cudaq.testing.finalize(qubits, context)
results.dump()
createdJobs[newId] = (task.task_id, results)
engine.remove_module(m)
# Job "created", return the id
return {"job": newId}
# Retrieve the job, simulate having to wait by counting to 3
# until we return the job results
@app.get("/tasks/{jobId}/results")
async def getJob(jobId: str):
global countJobGetRequests, createdJobs, shots
countJobGetRequests = 0
name, counts = createdJobs[jobId]
retData = {}
for bits, count in counts.items():
retData[str(bits)] = count
return {"results": retData}
@app.post("/tasks")
async def getJob(n=1):
return [uuid.uuid4() for _ in range(n)]
def startServer(port):
uvicorn.run(app, port=port, host='0.0.0.0', log_level="info")
if __name__ == '__main__':
startServer(62454)
| cuda-quantum-main | utils/mock_qpu/oqc/__init__.py |
# ============================================================================ #
# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. #
# All rights reserved. #
# #
# This source code and the accompanying materials are made available under #
# the terms of the Apache License 2.0 which accompanies this distribution. #
# ============================================================================ #
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'NVIDIA CUDA Quantum'
copyright = '2023, NVIDIA Corporation & Affiliates'
author = 'NVIDIA Corporation & Affiliates'
# The version info for the project you're documenting, acts as replacement for
# |version| used in various places throughout the docs.
# The short X.Y version.
version = os.getenv("CUDA_QUANTUM_VERSION", "latest")
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autodoc', # to get documentation from python doc comments
'sphinx.ext.autosummary',
'sphinx.ext.autosectionlabel',
'sphinx.ext.doctest', # test example codes in docs
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
#'sphinx.ext.mathjax',
'sphinx.ext.napoleon', # support google/numpy style docstrings
#'sphinx.ext.linkcode',
'sphinx_reredirects',
'breathe',
'enum_tools.autoenum', # for pretty-print Python enums
'myst_parser', # for including markdown files
]
imgmath_latex_preamble = r'\usepackage{braket}'
imgmath_image_format = 'svg'
imgmath_font_size = 14
#imgmath_dvipng_args = ['-gamma', '1.5', '-D', '110', '-bg', 'Transparent']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = {
'.rst': 'restructuredtext',
'.md': 'markdown',
}
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['**/_*', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'code' # NOTE: the following may be a better choice to error on the side of flagging anything that is referenced but but not declared
#default_role = 'cpp:any' # see https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# autosummary is buggy: this must be py instead of cpp so that the domain setting
# can be propagated to the autogen'd rst files.
# primary_domain = 'py'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation" : False,
"sticky_navigation" : False,
}
html_css_files = ['_static/cudaq_override.css']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'cudaqDoc'
def setup(app):
app.add_css_file('cudaq_override.css')
# -- Options for BREATHE -------------------------------------------------
breathe_projects = { "cudaq": "_doxygen/xml" }
breathe_default_project = "cudaq"
breathe_show_enumvalue_initializer = True
# -- Other options -------------------------------------------------
autosummary_generate = True
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
}
redirects = {
"versions": "../latest/releases.html"
}
nitpick_ignore = [
('cpp:identifier', 'GlobalRegisterName'),
('cpp:identifier', 'CountsDictionary::iterator'),
('cpp:identifier', 'CountsDictionary::const_iterator'),
('cpp:identifier', 'State'),
('cpp:identifier', 'pauli'),
('cpp:identifier', 'Job'),
('cpp:identifier', 'mlir'),
('cpp:identifier', 'mlir::Value'),
('cpp:identifier', 'mlir::Type'),
('cpp:identifier', 'mlir::MLIRContext'),
('cpp:identifier', 'mlir::ImplicitLocOpBuilder'),
('cpp:identifier', 'BinarySymplecticForm'),
('cpp:identifier', 'CountsDictionary'),
('py:class', 'function'),
('py:class', 'type'),
('py:class', 'cudaq::spin_op'),
]
napoleon_google_docstring = True
napoleon_numpy_docstring = False
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
| cuda-quantum-main | docs/sphinx/conf.py |
import cudaq
import numpy as np
# Set the target to our density matrix simulator.
cudaq.set_target('density-matrix-cpu')
# CUDA Quantum supports custom noise models through the definition of
# `KrausChannel`'s. In this case, we will define a set of `KrausOperator`'s
# that affect the same noise as the `AmplitudeDampingChannel`. This
# channel will model the energy dissipation within our system via
# environmental interactions. With a variable probability, it will
# return the qubit to the |0> state.
# We will begin by defining an empty noise model that we will add
# our Kraus Channel to.
noise = cudaq.NoiseModel()
# We will define our Kraus Operators within functions, as to
# allow for easy control over the noise probability.
def kraus_operators(probability):
"""See Nielsen, Chuang Chapter 8.3.5 for definition source."""
kraus_0 = np.array([[1, 0], [0, np.sqrt(1 - probability)]],
dtype=np.complex128)
kraus_1 = np.array([[0, 0], [np.sqrt(probability), 0]], dtype=np.complex128)
return [kraus_0, kraus_1]
# Manually defined amplitude damping channel with `1.0` probability
# of the qubit decaying to the ground state.
amplitude_damping = cudaq.KrausChannel(kraus_operators(1.0))
# We will apply this channel to any Hadamard gate on the qubit.
# Meaning, after each Hadamard on the qubit, there will be a
# probability of `1.0` that the qubit decays back to ground.
noise.add_channel('h', [0], amplitude_damping)
# Now we may define our simple kernel function and allocate a qubit.
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# Then we apply a Hadamard gate to the qubit.
# This will bring it to `1/sqrt(2) (|0> + |1>)`, where it will remain
# with a probability of `1 - p = 0.0`.
kernel.h(qubit)
# Measure.
kernel.mz(qubit)
# Now we're ready to run the noisy simulation of our kernel.
# Note: We must pass the noise model to sample via key-word.
noisy_result = cudaq.sample(kernel, noise_model=noise)
noisy_result.dump()
# Our results should show all measurements in the |0> state, indicating
# that the noise has successfully impacted the system.
# To confirm this, we can run the simulation again without noise.
# The qubit will now have a 50/50 mix of measurements between
# |0> and |1>.
noiseless_result = cudaq.sample(kernel)
noiseless_result.dump()
| cuda-quantum-main | docs/sphinx/examples/python/noise_kraus_operator.py |
import cudaq
import random
def random_bitstring(length: int):
bitstring = ""
for bit in range(length):
bitstring += str(random.randint(0, 1))
return bitstring
def oracle(kernel: cudaq.Kernel, register: cudaq.QuakeValue,
auxillary_qubit: cudaq.QuakeValue, hidden_bitstring: str):
"""
The inner-product oracle for Bernstein Vazirani.
"""
for index, bit in enumerate(hidden_bitstring):
if bit == "0":
# Apply identity operation to the qubit if it's
# to be in the 0-state.
# In this case, we do nothing.
pass
else:
# Otherwise, apply a `cx` gate with the current qubit as
# the control and the auxillary qubit as the target.
kernel.cx(control=register[index], target=auxillary_qubit)
def bernstein_vazirani(qubit_count: int):
"""
Returns a kernel implementing the Bernstein Vazirani algorithm
for a random, hidden bitstring.
"""
kernel = cudaq.make_kernel()
# Allocate the specified number of qubits - this
# corresponds to the length of the hidden bitstring.
qubits = kernel.qalloc(qubit_count)
# Allocate an extra auxillary qubit.
auxillary_qubit = kernel.qalloc()
# Prepare the auxillary qubit.
kernel.h(auxillary_qubit)
kernel.z(auxillary_qubit)
# Place the rest of the register in a superposition state.
kernel.h(qubits)
# Generate a random, hidden bitstring for the oracle
# to encode. Note: we define the bitstring here so
# as to be able to return it for verification.
hidden_bitstring = random_bitstring(qubit_count)
# Query the oracle.
oracle(kernel, qubits, auxillary_qubit, hidden_bitstring)
# Apply another set of Hadamards to the register.
kernel.h(qubits)
# Apply measurement gates to just the `qubits`
# (excludes the auxillary qubit).
kernel.mz(qubits)
return kernel, hidden_bitstring
# If you have a NVIDIA GPU you can use this example to see
# that the GPU-accelerated backends can easily handle a
# larger number of qubits compared the CPU-only backend.
# Depending on the available memory on your GPU, you can
# set the number of qubits to around 30 qubits, and un-comment
# the `cudaq.set_target(nvidia)` line.
# Note: Without setting the target to the `nvidia` backend,
# a 30 qubit simulation simply seems to hang; that is
# because it takes a long time for the CPU-only backend
# to handle this number of qubits!
qubit_count = 5 # set to around 30 qubits for `nvidia` target
# ```
# cudaq.set_target("nvidia")
# ```
kernel, hidden_bitstring = bernstein_vazirani(qubit_count)
result = cudaq.sample(kernel)
print(f"encoded bitstring = {hidden_bitstring}")
print(f"measured state = {result.most_probable()}")
print(f"Were we successful? {hidden_bitstring == result.most_probable()}")
| cuda-quantum-main | docs/sphinx/examples/python/bernstein_vazirani.py |
import cudaq
# Set the target to our density matrix simulator.
cudaq.set_target('density-matrix-cpu')
# CUDA Quantum supports several different models of noise. In this
# case, we will examine the modeling of decoherence of the qubit phase.
# This will occur from "phase flip" errors, wherein the qubit has a
# user-specified probability of undergoing a Z-180 rotation.
# We will begin by defining an empty noise model that we will add
# our phase flip channel to.
noise = cudaq.NoiseModel()
# Phase flip channel with `1.0` probability of the qubit
# undergoing a phase rotation of 180 degrees (π).
phase_flip = cudaq.PhaseFlipChannel(1.0)
# We will apply this channel to any Z gate on the qubit.
# Meaning, after each Z gate on qubit 0, there will be a
# probability of `1.0` that the qubit undergoes an extra
# Z rotation.
noise.add_channel('z', [0], phase_flip)
kernel = cudaq.make_kernel()
# Single qubit initialized to the |0> state.
qubit = kernel.qalloc()
# Place qubit in superposition state.
kernel.h(qubit)
# Rotate the phase around Z by 180 degrees (π).
kernel.z(qubit)
# Apply another Hadamard and measure.
kernel.h(qubit)
kernel.mz(qubit)
# Without noise, we'd expect the qubit to end in the |1>
# state due to the phase rotation between the two Hadamard
# gates.
counts = cudaq.sample(kernel)
counts.dump()
# With noise, our Z-gate will effectively cancel out due
# to the presence of a phase flip error on the gate with a
# probability of `1.0`. This will put us back in the |0> state.
noisy_counts = cudaq.sample(kernel, noise_model=noise)
noisy_counts.dump()
| cuda-quantum-main | docs/sphinx/examples/python/noise_phase_flip.py |
import cudaq
# Set the target to our density matrix simulator.
cudaq.set_target('density-matrix-cpu')
# CUDA Quantum supports several different models of noise. In this case,
# we will examine the modeling of decoherence of the qubit state. This
# will occur from "bit flip" errors, wherein the qubit has a user-specified
# probability of undergoing an X-180 rotation.
# We will begin by defining an empty noise model that we will add
# these decoherence channels to.
noise = cudaq.NoiseModel()
# Bit flip channel with `1.0` probability of the qubit flipping 180 degrees.
bit_flip = cudaq.BitFlipChannel(1.0)
# We will apply this channel to any X gate on the qubit, giving each X-gate
# a probability of `1.0` of undergoing an extra X-gate.
noise.add_channel('x', [0], bit_flip)
# Now we may define our simple kernel function and allocate a register
# of qubits to it.
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# Apply an X-gate to the qubit.
# It will remain in the |1> state with a probability of `1 - p = 0.0`.
kernel.x(qubit)
# Measure.
kernel.mz(qubit)
# Now we're ready to run the noisy simulation of our kernel.
# Note: We must pass the noise model to sample via key-word.
noisy_result = cudaq.sample(kernel, noise_model=noise)
noisy_result.dump()
# Our results should show all measurements in the |0> state, indicating
# that the noise has successfully impacted the system.
# To confirm this, we can run the simulation again without noise.
# We should now see the qubit in the |1> state.
noiseless_result = cudaq.sample(kernel)
noiseless_result.dump()
| cuda-quantum-main | docs/sphinx/examples/python/noise_bit_flip.py |
import cudaq
# Set the target to our density matrix simulator.
cudaq.set_target('density-matrix-cpu')
# CUDA Quantum supports several different models of noise. In this
# case, we will examine the modeling of depolarization noise. This
# depolarization will result in the qubit state decaying into a mix
# of the basis states, |0> and |1>, with a user provided probability.
# We will begin by defining an empty noise model that we will add
# our depolarization channel to.
noise = cudaq.NoiseModel()
# Depolarization channel with `1.0` probability of the qubit state
# being scrambled.
depolarization = cudaq.DepolarizationChannel(1.0)
# We will apply the channel to any Y-gate on qubit 0. Meaning,
# for each Y-gate on our qubit, the qubit will have a `1.0`
# probability of decaying into a mixed state.
noise.add_channel('y', [0], depolarization)
# Now we may define our simple kernel function and allocate
# a qubit to it.
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# First we apply a Y-gate to qubit 0.
# This will bring the qubit to the |1> state, where it will remain
# with a probability of `1 - p = 0.0`.
kernel.y(qubit)
kernel.mz(qubit)
# Without noise, the qubit should still be in the |1> state.
counts = cudaq.sample(kernel)
counts.dump()
# With noise, the measurements should be a roughly 50/50
# mix between the |0> and |1> states.
noisy_counts = cudaq.sample(kernel, noise_model=noise)
noisy_counts.dump()
| cuda-quantum-main | docs/sphinx/examples/python/noise_depolarization.py |
import cudaq
from cudaq import spin
import numpy as np
# Here we build up a kernel for QAOA with `p` layers, with each layer
# containing the alternating set of unitaries corresponding to the problem
# and the mixer Hamiltonians. The algorithm leverages the VQE algorithm
# to compute the Max-Cut of a rectangular graph illustrated below.
# v0 0---------------------0 v1
# | |
# | |
# | |
# | |
# v3 0---------------------0 v2
# The Max-Cut for this problem is 0101 or 1010.
# The problem Hamiltonian
hamiltonian = 0.5 * spin.z(0) * spin.z(1) + 0.5 * spin.z(1) * spin.z(2) \
+ 0.5 * spin.z(0) * spin.z(3) + 0.5 * spin.z(2) * spin.z(3)
# Problem parameters.
qubit_count: int = 4
layer_count: int = 2
parameter_count: int = 2 * layer_count
def kernel_qaoa() -> cudaq.Kernel:
"""QAOA ansatz for Max-Cut"""
kernel, thetas = cudaq.make_kernel(list)
qreg = kernel.qalloc(qubit_count)
# Create superposition
kernel.h(qreg)
# Loop over the layers
for i in range(layer_count):
# Loop over the qubits
# Problem unitary
for j in range(qubit_count):
kernel.cx(qreg[j], qreg[(j + 1) % qubit_count])
kernel.rz(2.0 * thetas[i], qreg[(j + 1) % qubit_count])
kernel.cx(qreg[j], qreg[(j + 1) % qubit_count])
# Mixer unitary
for j in range(qubit_count):
kernel.rx(2.0 * thetas[i + layer_count], qreg[j])
return kernel
# Specify the optimizer and its initial parameters. Make it repeatable.
cudaq.set_random_seed(13)
optimizer = cudaq.optimizers.COBYLA()
np.random.seed(13)
optimizer.initial_parameters = np.random.uniform(-np.pi / 8.0, np.pi / 8.0,
parameter_count)
print("Initial parameters = ", optimizer.initial_parameters)
# Pass the kernel, spin operator, and optimizer to `cudaq.vqe`.
optimal_expectation, optimal_parameters = cudaq.vqe(
kernel=kernel_qaoa(),
spin_operator=hamiltonian,
optimizer=optimizer,
parameter_count=parameter_count)
# Print the optimized value and its parameters
print("Optimal value = ", optimal_expectation)
print("Optimal parameters = ", optimal_parameters)
# Sample the circuit using the optimized parameters
counts = cudaq.sample(kernel_qaoa(), optimal_parameters)
counts.dump()
| cuda-quantum-main | docs/sphinx/examples/python/qaoa_maxcut.py |
import cudaq
# Set the target to our density matrix simulator.
cudaq.set_target('density-matrix-cpu')
# CUDA Quantum supports several different models of noise. In this case,
# we will examine the modeling of energy dissipation within our system
# via environmental interactions. The result of this "amplitude damping"
# is to return the qubit to the |0> state with a user-specified probability.
# We will begin by defining an empty noise model that we will add
# our damping channel to.
noise = cudaq.NoiseModel()
# Amplitude damping channel with `1.0` probability of the qubit
# decaying to the ground state.
amplitude_damping = cudaq.AmplitudeDampingChannel(1.0)
# We will apply this channel to any Hadamard gate on the qubit.
# Meaning, after each Hadamard on the qubit, there will be a
# probability of `1.0` that the qubit decays back to ground.
noise.add_channel('h', [0], amplitude_damping)
# Now we may define our simple kernel function and allocate a qubit.
kernel = cudaq.make_kernel()
qubit = kernel.qalloc()
# Then we apply a Hadamard gate to the qubit.
# This will bring it to `1/sqrt(2) (|0> + |1>)`, where it will remain
# with a probability of `1 - p = 0.0`.
kernel.h(qubit)
# Measure.
kernel.mz(qubit)
# Now we're ready to run the noisy simulation of our kernel.
# Note: We must pass the noise model to sample via key-word.
noisy_result = cudaq.sample(kernel, noise_model=noise)
noisy_result.dump()
# Our results should show all measurements in the |0> state, indicating
# that the noise has successfully impacted the system.
# To confirm this, we can run the simulation again without noise.
# The qubit will now have a 50/50 mix of measurements between
# |0> and |1>.
noiseless_result = cudaq.sample(kernel)
noiseless_result.dump()
| cuda-quantum-main | docs/sphinx/examples/python/noise_amplitude_damping.py |
import cudaq
from cudaq import spin
from typing import List, Tuple
# We will be optimizing over a custom objective function that takes a vector
# of parameters as input and returns either the cost as a single float,
# or in a tuple of (cost, gradient_vector) depending on the optimizer used.
# In this case, we will use the spin Hamiltonian and ansatz from `simple_vqe.py`
# and find the `thetas` that minimize the expectation value of the system.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
kernel, thetas = cudaq.make_kernel(list)
qubits = kernel.qalloc(2)
kernel.x(qubits[0])
kernel.ry(thetas[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
# Define the optimizer that we'd like to use.
optimizer = cudaq.optimizers.Adam()
# Since we'll be using a gradient-based optimizer, we can leverage
# CUDA Quantum's gradient helper class to automatically compute the gradient
# vector for us. The use of this class for gradient calculations is
# purely optional and can be replaced with your own custom gradient
# routine.
gradient = cudaq.gradients.CentralDifference()
def objective_function(parameter_vector: List[float],
hamiltonian=hamiltonian,
gradient_strategy=gradient,
kernel=kernel) -> Tuple[float, List[float]]:
"""
Note: the objective function may also take extra arguments, provided they
are passed into the function as default arguments in python.
"""
# Call `cudaq.observe` on the spin operator and ansatz at the
# optimizer provided parameters. This will allow us to easily
# extract the expectation value of the entire system in the
# z-basis.
# We define the call to `cudaq.observe` here as a lambda to
# allow it to be passed into the gradient strategy as a
# function. If you were using a gradient-free optimizer,
# you could purely define `cost = cudaq.observe().expectation_z()`.
get_result = lambda parameter_vector: cudaq.observe(
kernel, hamiltonian, parameter_vector, shots_count=100).expectation_z()
# `cudaq.observe` returns a `cudaq.ObserveResult` that holds the
# counts dictionary and the `expectation_z`.
cost = get_result(parameter_vector)
print(f"<H> = {cost}")
# Compute the gradient vector using `cudaq.gradients.STRATEGY.compute()`.
gradient_vector = gradient_strategy.compute(parameter_vector, get_result,
cost)
# Return the (cost, gradient_vector) tuple.
return cost, gradient_vector
cudaq.set_random_seed(13) # make repeatable
energy, parameter = optimizer.optimize(dimensions=1,
function=objective_function)
print(f"\nminimized <H> = {round(energy,16)}")
print(f"optimal theta = {round(parameter[0],16)}")
| cuda-quantum-main | docs/sphinx/examples/python/advanced_vqe.py |
import cudaq
from cudaq import spin
# We begin by defining the spin Hamiltonian for the system that we are working
# with. This is achieved through the use of `cudaq.SpinOperator`'s, which allow
# for the convenient creation of complex Hamiltonians out of Pauli spin operators.
hamiltonian = 5.907 - 2.1433 * spin.x(0) * spin.x(1) - 2.1433 * spin.y(
0) * spin.y(1) + .21829 * spin.z(0) - 6.125 * spin.z(1)
# Next, using the `cudaq.Kernel`, we define the variational quantum circuit
# that we'd like to use as an ansatz.
# Create a kernel that takes a list of floats as a function argument.
kernel, thetas = cudaq.make_kernel(list)
# Allocate 2 qubits.
qubits = kernel.qalloc(2)
kernel.x(qubits[0])
# Apply an `ry` gate that is parameterized by the first
# `QuakeValue` entry of our list, `thetas`.
kernel.ry(thetas[0], qubits[1])
kernel.cx(qubits[1], qubits[0])
# Note: the kernel must not contain measurement instructions.
# The last thing we need is to pick an optimizer from the suite of `cudaq.optimizers`.
# We can optionally tune this optimizer through its initial parameters, iterations,
# optimization bounds, etc. before passing it to `cudaq.vqe`.
optimizer = cudaq.optimizers.COBYLA()
# optimizer.max_iterations = ...
# optimizer...
# Finally, we can pass all of that into `cudaq.vqe` and it will automatically run our
# optimization loop and return a tuple of the minimized eigenvalue of our `spin_operator`
# and the list of optimal variational parameters.
energy, parameter = cudaq.vqe(
kernel=kernel,
spin_operator=hamiltonian,
optimizer=optimizer,
# list of parameters has length of 1:
parameter_count=1)
print(f"\nminimized <H> = {round(energy,16)}")
print(f"optimal theta = {round(parameter[0],16)}")
| cuda-quantum-main | docs/sphinx/examples/python/simple_vqe.py |
import cudaq
# We begin by defining the `Kernel` that we will construct our
# program with.
kernel = cudaq.make_kernel()
# Next, we can allocate qubits to the kernel via `qalloc(qubit_count)`.
# An empty call to `qalloc` will return a single qubit.
qubit = kernel.qalloc()
# Now we can begin adding instructions to apply to this qubit!
# Here we'll just add every non-parameterized
# single qubit gate that is supported by CUDA Quantum.
kernel.h(qubit)
kernel.x(qubit)
kernel.y(qubit)
kernel.z(qubit)
kernel.t(qubit)
kernel.s(qubit)
# Next, we add a measurement to the kernel so that we can sample
# the measurement results on our simulator!
kernel.mz(qubit)
# Finally, we can execute this kernel on the state vector simulator
# by calling `cudaq.sample`. This will execute the provided kernel
# `shots_count` number of times and return the sampled distribution
# as a `cudaq.SampleResult` dictionary.
result = cudaq.sample(kernel)
# Now let's take a look at the `SampleResult` we've gotten back!
print(result) # or result.dump()
| cuda-quantum-main | docs/sphinx/examples/python/intro.py |
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# To use different targets in the same file, you must update
# it via another call to `cudaq.set_target()`
cudaq.set_target("iqm",
url="http://localhost/cocos",
**{"qpu-architecture": "Adonis"})
# Adonis QPU architecture:
# QB1
# |
# QB2 - QB3 - QB4
# |
# QB5
# Create the kernel we'd like to execute on IQM.
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(5)
kernel.h(qubits[2]) # QB3
kernel.cx(qubits[2], qubits[0])
kernel.mz(qubits)
# Execute on IQM Server and print out the results.
# Option A:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by IQM Server. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
# Option B:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from IQM Server.
counts = cudaq.sample(kernel)
print(counts)
| cuda-quantum-main | docs/sphinx/examples/python/providers/iqm.py |
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# By default, we will submit to the Quantinuum syntax checker.
cudaq.set_target("quantinuum")
# Create the kernel we'd like to execute on Quantinuum.
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
kernel.mz(qubits[0])
kernel.mz(qubits[1])
# Submit to Quantinuum's endpoint and confirm the program is valid.
# Option A:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been executed by the Quantinuum service.
# We will use the synchronous call to submit to the syntax
# checker to confirm the validity of the program.
syntax_check = cudaq.sample(kernel)
if (syntax_check):
print("Syntax check passed! Kernel is ready for submission.")
# Now we can update the target to the Quantinuum emulator and
# execute our program.
cudaq.set_target("quantinuum", machine="H1-2E")
# Option B:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by Quantinuum. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or wee can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
| cuda-quantum-main | docs/sphinx/examples/python/providers/quantinuum.py |
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# To use different targets in the same file, you must update
# it via another call to `cudaq.set_target()`
cudaq.set_target("ionq")
# Create the kernel we'd like to execute on IonQ.
kernel = cudaq.make_kernel()
qubits = kernel.qalloc(2)
kernel.h(qubits[0])
kernel.cx(qubits[0], qubits[1])
# Note: All qubits will be measured at the end upon performing
# the sampling. You may encounter a pre-flight error on IonQ
# backends if you include explicit measurements.
# Execute on IonQ and print out the results.
# Option A:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by IonQ. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or wee can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
# Option B:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from IonQ.
counts = cudaq.sample(kernel)
print(counts)
| cuda-quantum-main | docs/sphinx/examples/python/providers/ionq.py |
#!/usr/bin/env python
#################################################################################################
# Copyright (c) 2010, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by Todd Gamblin, [email protected].
# LLNL-CODE-417602
# All rights reserved.
#
# This file is part of Libra. For details, see http://github.com/tgamblin/libra.
# Please also read the LICENSE file for further information.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this list of
# conditions and the disclaimer below.
# * Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the disclaimer (as noted below) in the documentation and/or other materials
# provided with the distribution.
# * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#################################################################################################
usage_string = \
'''Usage: wrap.py [-fgd] [-i pmpi_init] [-c mpicc_name] [-o file] wrapper.w [...]
Python script for creating PMPI wrappers. Roughly follows the syntax of
the Argonne PMPI wrapper generator, with some enhancements.
Options:"
-d Just dump function declarations parsed out of mpi.h
-f Generate fortran wrappers in addition to C wrappers.
-g Generate reentry guards around wrapper functions.
-s Skip writing #includes, #defines, and other front-matter (for non-C output).
-c exe Provide name of MPI compiler (for parsing mpi.h). Default is \'mpicc\'.
-I dir Provide an extra include directory to use when parsing mpi.h.
-i pmpi_init Specify proper binding for the fortran pmpi_init function.
Default is \'pmpi_init_\'. Wrappers compiled for PIC will guess the
right binding automatically (use -DPIC when you compile dynamic libs).
-o file Send output to a file instead of stdout.
by Todd Gamblin, [email protected]
'''
import tempfile, getopt, subprocess, sys, os, re, StringIO, types, itertools
# Default values for command-line parameters
mpicc = 'mpicc' # Default name for the MPI compiler
includes = [] # Default set of directories to inlucde when parsing mpi.h
pmpi_init_binding = "pmpi_init_" # Default binding for pmpi_init
output_fortran_wrappers = False # Don't print fortran wrappers by default
output_guards = False # Don't print reentry guards by default
skip_headers = False # Skip header information and defines (for non-C output)
dump_prototypes = False # Just exit and dump MPI protos if false.
# Possible legal bindings for the fortran version of PMPI_Init()
pmpi_init_bindings = ["PMPI_INIT", "pmpi_init", "pmpi_init_", "pmpi_init__"]
# Possible function return types to consider, used for declaration parser.
# In general, all MPI calls we care about return int. We include double
# to grab MPI_Wtick and MPI_Wtime, but we'll ignore the f2c and c2f calls
# that return MPI_Datatypes and other such things.
rtypes = ['int', 'double' ]
# If we find these strings in a declaration, exclude it from consideration.
exclude_strings = [ "c2f", "f2c", "typedef" ]
# Regular expressions for start and end of declarations in mpi.h. These are
# used to get the declaration strings out for parsing with formal_re below.
begin_decl_re = re.compile("(" + "|".join(rtypes) + ")\s+(MPI_\w+)\s*\(")
exclude_re = re.compile("|".join(exclude_strings))
end_decl_re = re.compile("\).*\;")
# Regular Expression for splitting up args. Matching against this
# returns three groups: type info, arg name, and array info
formal_re = re.compile(
"\s*(" + # Start type
"(?:const)?\s*" + # Initial const
"\w+" # Type name (note: doesn't handle 'long long', etc. right now)
")\s*(" + # End type, begin pointers
"(?:\s*\*(?:\s*const)?)*" + # Look for 0 or more pointers with optional 'const'
")\s*" # End pointers
"(?:(\w+)\s*)?" + # Argument name. Optional.
"(\[.*\])?\s*$" # Array type. Also optional. Works for multidimensions b/c it's greedy.
)
# Fortran wrapper suffix
f_wrap_suffix = "_fortran_wrapper"
# Initial includes and defines for wrapper files.
wrapper_includes = '''
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#ifndef _EXTERN_C_
#ifdef __cplusplus
#define _EXTERN_C_ extern "C"
#else /* __cplusplus */
#define _EXTERN_C_
#endif /* __cplusplus */
#endif /* _EXTERN_C_ */
#ifdef MPICH_HAS_C2F
_EXTERN_C_ void *MPIR_ToPointer(int);
#endif // MPICH_HAS_C2F
#ifdef PIC
/* For shared libraries, declare these weak and figure out which one was linked
based on which init wrapper was called. See mpi_init wrappers. */
#pragma weak pmpi_init
#pragma weak PMPI_INIT
#pragma weak pmpi_init_
#pragma weak pmpi_init__
#endif /* PIC */
_EXTERN_C_ void pmpi_init(MPI_Fint *ierr);
_EXTERN_C_ void PMPI_INIT(MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init_(MPI_Fint *ierr);
_EXTERN_C_ void pmpi_init__(MPI_Fint *ierr);
'''
# Default modifiers for generated bindings
default_modifiers = ["_EXTERN_C_"] # _EXTERN_C_ is #defined (or not) in wrapper_includes. See above.
# Set of MPI Handle types
mpi_handle_types = set(["MPI_Comm", "MPI_Errhandler", "MPI_File", "MPI_Group", "MPI_Info",
"MPI_Op", "MPI_Request", "MPI_Status", "MPI_Datatype", "MPI_Win" ])
# MPI Calls that have array parameters, and mappings from the array parameter positions to the position
# of the 'count' paramters that determine their size
mpi_array_calls = {
"MPI_Startall" : { 1:0 },
"MPI_Testall" : { 1:0, 3:0 },
"MPI_Testany" : { 1:0 },
"MPI_Testsome" : { 1:0, 4:0 },
"MPI_Type_create_struct" : { 3:0 },
"MPI_Type_get_contents" : { 6:1 },
"MPI_Type_struct" : { 3:0 },
"MPI_Waitall" : { 1:0, 2:0 },
"MPI_Waitany" : { 1:0 },
"MPI_Waitsome" : { 1:0, 4:0 }
}
def find_matching_paren(string, index, lparen='(', rparen=')'):
"""Find the closing paren corresponding to the open paren at <index>
in <string>. Optionally, can provide other characters to match on.
If found, returns the index of the matching parenthesis. If not found,
returns -1.
"""
if not string[index] == lparen:
raise ValueError("Character at index %d is '%s'. Expected '%s'"
% (index, string[index], lparen))
index += 1
count = 1
while index < len(string) and count > 0:
while index < len(string) and string[index] not in (lparen, rparen):
index += 1
if string[index] == lparen:
count += 1
elif string[index] == rparen:
count -= 1
if count == 0:
return index
else:
return -1
def isindex(str):
"""True if a string is something we can index an array with."""
try:
int(str)
return True
except ValueError:
return False
def once(function):
if not hasattr(function, "did_once"):
function()
function.did_once = True
# Returns MPI_Blah_[f2c,c2f] prefix for a handle type. MPI_Datatype is a special case.
def conversion_prefix(handle_type):
if handle_type == "MPI_Datatype":
return "MPI_Type"
else:
return handle_type
# Special join function for joining lines together. Puts "\n" at the end too.
def joinlines(list, sep="\n"):
if list:
return sep.join(list) + sep
else:
return ""
# Possible types of Tokens in input.
LBRACE, RBRACE, TEXT, IDENTIFIER = range(4)
class Token:
"""Represents tokens; generated from input by lexer and fed to parse()."""
def __init__(self, type, value, line=0):
self.type = type # Type of token
self.value = value # Text value
self.line = line
def __str__(self):
return "'%s'" % re.sub(r'\n', "\\\\n", self.value)
def isa(self, type):
return self.type == type
class LineTrackingLexer(object):
"""Base class for Lexers that keep track of line numbers."""
def __init__(self, lexicon):
self.line_no = -1
self.scanner = re.Scanner(lexicon)
def make_token(self, type, value):
token = Token(type, value, self.line_no)
self.line_no += value.count("\n")
return token
def lex(self, text):
self.line_no = 0
tokens, remainder = self.scanner.scan(text)
if remainder:
sys.stderr.write("Unlexable input:\n%s\n" % remainder)
sys.exit(1)
self.line_no = -1
return tokens
class OuterRegionLexer(LineTrackingLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'({(?!{)|}(?!})|[^{}])*', self.text)])
def lbrace(self, scanner, token): return self.make_token(LBRACE, token)
def rbrace(self, scanner, token): return self.make_token(RBRACE, token)
def text(self, scanner, token): return self.make_token(TEXT, token)
class OuterCommentLexer(OuterRegionLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'/\*(.|[\r\n])*?\*/', self.text), # multiline comment
(r'//(.|[\r\n])*?(?=[\r\n])', self.text), # single line comment
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'({(?!{)|}(?!})|/(?![/*])|[^{}/])*', self.text)])
class InnerLexer(OuterRegionLexer):
def __init__(self):
super(OuterRegionLexer, self).__init__([
(r'{{', self.lbrace),
(r'}}', self.rbrace),
(r'(["\'])?((?:(?!\1)[^\\]|\\.)*)\1', self.quoted_id),
(r'([^\s]+)', self.identifier),
(r'\s+', None)])
def identifier(self, scanner, token): return self.make_token(IDENTIFIER, token)
def quoted_id(self, scanner, token):
# remove quotes from quoted ids. Note that ids and quoted ids are pretty much the same thing;
# the quotes are just optional. You only need them if you need spaces in your expression.
return self.make_token(IDENTIFIER, re.sub(r'^["\'](.*)["\']$', '\\1', token))
# Global current filename and function name for error msgs
cur_filename = ""
cur_function = None
class WrapSyntaxError:
"""Simple Class for syntax errors raised by the wrapper generator (rather than python)"""
pass
def syntax_error(msg):
# TODO: make line numbers actually work.
sys.stderr.write("%s:%d: %s\n" % (cur_filename, 0, msg))
if cur_function:
sys.stderr.write(" While handling %s.\n" % cur_function)
raise WrapSyntaxError
################################################################################
# MPI Semantics:
# Classes in this section describe MPI declarations and types. These are used
# to parse the mpi.h header and to generate wrapper code.
################################################################################
class Scope:
""" This is the very basic class for scopes in the wrapper generator. Scopes
are hierarchical and support nesting. They contain string keys mapped
to either string values or to macro functions.
Scopes also keep track of the particular macro they correspond to (macro_name).
"""
def __init__(self, enclosing_scope=None):
self.map = {}
self.enclosing_scope = enclosing_scope
self.macro_name = None # For better debugging error messages
def __getitem__(self, key):
if key in self.map: return self.map[key]
elif self.enclosing_scope: return self.enclosing_scope[key]
else: raise KeyError(key + " is not in scope.")
def __contains__(self, key):
if key in self.map: return True
elif self.enclosing_scope: return key in self.enclosing_scope
else: return False
def __setitem__(self, key, value):
self.map[key] = value
def include(self, map):
"""Add entire contents of the map (or scope) to this scope."""
self.map.update(map)
################################################################################
# MPI Semantics:
# Classes in this section describe MPI declarations and types. These are used
# to parse the mpi.h header and to generate wrapper code.
################################################################################
# Map from function name to declaration created from mpi.h.
mpi_functions = {}
class Param:
"""Descriptor for formal parameters of MPI functions.
Doesn't represent a full parse, only the initial type information,
name, and array info of the argument split up into strings.
"""
def __init__(self, type, pointers, name, array, pos):
self.type = type # Name of arg's type (might include things like 'const')
self.pointers = pointers # Pointers
self.name = name # Formal parameter name (from header or autogenerated)
self.array = array # Any array type information after the name
self.pos = pos # Position of arg in declartion
self.decl = None # This gets set later by Declaration
def setDeclaration(self, decl):
"""Needs to be called by Declaration to finish initing the arg."""
self.decl = decl
def isHandleArray(self):
"""True if this Param represents an array of MPI handle values."""
return (self.decl.name in mpi_array_calls
and self.pos in mpi_array_calls[self.decl.name])
def countParam(self):
"""If this Param is a handle array, returns the Param that represents the count of its elements"""
return self.decl.args[mpi_array_calls[self.decl.name][self.pos]]
def isHandle(self):
"""True if this Param is one of the MPI builtin handle types."""
return self.type in mpi_handle_types
def isStatus(self):
"""True if this Param is an MPI_Status. MPI_Status is handled differently
in c2f/f2c calls from the other handle types.
"""
return self.type == "MPI_Status"
def fortranFormal(self):
"""Prints out a formal parameter for a fortran wrapper."""
# There are only a few possible fortran arg types in our wrappers, since
# everything is a pointer.
if self.type == "MPI_Aint" or self.type.endswith("_function"):
ftype = self.type
else:
ftype = "MPI_Fint"
# Arrays don't come in as pointers (they're passed as arrays)
# Everything else is a pointer.
if self.pointers:
pointers = self.pointers
elif self.array:
pointers = ""
else:
pointers = "*"
# Put it all together and return the fortran wrapper type here.
arr = self.array or ''
return "%s %s%s%s" % (ftype, pointers, self.name, arr)
def cType(self):
if not self.type:
return ''
else:
arr = self.array or ''
pointers = self.pointers or ''
return "%s%s%s" % (self.type, pointers, arr)
def cFormal(self):
"""Prints out a formal parameter for a C wrapper."""
if not self.type:
return self.name # special case for '...'
else:
arr = self.array or ''
pointers = self.pointers or ''
return "%s %s%s%s" % (self.type, pointers, self.name, arr)
def castType(self):
arr = self.array or ''
pointers = self.pointers or ''
if '[]' in arr:
if arr.count('[') > 1:
pointers += '(*)' # need extra parens for, e.g., int[][3] -> int(*)[3]
else:
pointers += '*' # justa single array; can pass pointer.
arr = arr.replace('[]', '')
return "%s%s%s" % (self.type, pointers, arr)
def __str__(self):
return self.cFormal()
class Declaration:
""" Descriptor for simple MPI function declarations.
Contains return type, name of function, and a list of args.
"""
def __init__(self, rtype, name):
self.rtype = rtype
self.name = name
self.args = []
def addArgument(self, arg):
arg.setDeclaration(self)
self.args.append(arg)
def __iter__(self):
for arg in self.args: yield arg
def __str__(self):
return self.prototype()
def retType(self):
return self.rtype
def formals(self):
return [arg.cFormal() for arg in self.args]
def types(self):
return [arg.cType() for arg in self.args]
def argsNoEllipsis(self):
return filter(lambda arg: arg.name != "...", self.args)
def returnsErrorCode(self):
"""This is a special case for MPI_Wtime and MPI_Wtick.
These functions actually return a double value instead of an int error code.
"""
return self.rtype == "int"
def argNames(self):
return [arg.name for arg in self.argsNoEllipsis()]
def getArgName(self, index):
return self.argsNoEllipsis()[index].name
def fortranFormals(self):
formals = map(Param.fortranFormal, self.argsNoEllipsis())
if self.name == "MPI_Init": formals = [] # Special case for init: no args in fortran
ierr = []
if self.returnsErrorCode(): ierr = ["MPI_Fint *ierr"]
return formals + ierr
def fortranArgNames(self):
names = self.argNames()
if self.name == "MPI_Init": names = []
ierr = []
if self.returnsErrorCode(): ierr = ["ierr"]
return names + ierr
def prototype(self, modifiers=""):
if modifiers: modifiers = joinlines(modifiers, " ")
return "%s%s %s(%s)" % (modifiers, self.retType(), self.name, ", ".join(self.formals()))
def pmpi_prototype(self, modifiers=""):
if modifiers: modifiers = joinlines(modifiers, " ")
return "%s%s P%s(%s)" % (modifiers, self.retType(), self.name, ", ".join(self.formals()))
def fortranPrototype(self, name=None, modifiers=""):
if not name: name = self.name
if modifiers: modifiers = joinlines(modifiers, " ")
if self.returnsErrorCode():
rtype = "void" # Fortran calls use ierr parameter instead
else:
rtype = self.rtype
return "%s%s %s(%s)" % (modifiers, rtype, name, ", ".join(self.fortranFormals()))
types = set()
all_pointers = set()
def enumerate_mpi_declarations(mpicc, includes):
""" Invokes mpicc's C preprocessor on a C file that includes mpi.h.
Parses the output for declarations, and yields each declaration to
the caller.
"""
# Create an input file that just includes <mpi.h>
tmpfile = tempfile.NamedTemporaryFile('w+b', -1, '.c')
tmpname = "%s" % tmpfile.name
tmpfile.write('#include <mpi.h>')
tmpfile.write("\n")
tmpfile.flush()
# Run the mpicc -E on the temp file and pipe the output
# back to this process for parsing.
string_includes = ["-I"+dir for dir in includes]
mpicc_cmd = "%s -E %s" % (mpicc, " ".join(string_includes))
try:
popen = subprocess.Popen("%s %s" % (mpicc_cmd, tmpname), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except IOError:
sys.stderr.write("IOError: couldn't run '" + mpicc_cmd + "' for parsing mpi.h\n")
sys.exit(1)
# Parse out the declarations from the MPI file
mpi_h = popen.stdout
for line in mpi_h:
line = line.strip()
begin = begin_decl_re.search(line)
if begin and not exclude_re.search(line):
# Grab return type and fn name from initial parse
return_type, fn_name = begin.groups()
# Accumulate rest of declaration (possibly multi-line)
while not end_decl_re.search(line):
line += " " + mpi_h.next().strip()
# Split args up by commas so we can parse them independently
fn_and_paren = r'(%s\s*\()' % fn_name
match = re.search(fn_and_paren, line)
lparen = match.start(1) + len(match.group(1)) - 1
rparen = find_matching_paren(line, lparen)
if rparen < 0:
raise ValueError("Malformed declaration in header: '%s'" % line)
arg_string = line[lparen+1:rparen]
arg_list = map(lambda s: s.strip(), arg_string.split(","))
# Handle functions that take no args specially
if arg_list == ['void']:
arg_list = []
# Parse formal parameter descriptors out of args
decl = Declaration(return_type, fn_name)
arg_num = 0
for arg in arg_list:
if arg == '...': # Special case for Pcontrol.
decl.addArgument(Param(None, None, '...', None, arg_num))
else:
match = formal_re.match(arg)
if not match:
sys.stderr.write("MATCH FAILED FOR: '%s' in %s\n" % (arg, fn_name))
sys.exit(1)
type, pointers, name, array = match.groups()
types.add(type)
all_pointers.add(pointers)
# If there's no name, make one up.
if not name: name = "arg_" + str(arg_num)
decl.addArgument(Param(type.strip(), pointers, name, array, arg_num))
arg_num += 1
yield decl
mpi_h.close()
return_code = popen.wait()
if return_code != 0:
sys.stderr.write("Error: Couldn't run '%s' for parsing mpi.h.\n" % mpicc_cmd)
sys.stderr.write(" Process exited with code %d.\n" % return_code)
sys.exit(1)
# Do some cleanup once we're done reading.
tmpfile.close()
def write_enter_guard(out, decl):
"""Prevent us from entering wrapper functions if we're already in a wrapper function.
Just call the PMPI function w/o the wrapper instead."""
if output_guards:
out.write(" if (in_wrapper) return P%s(%s);\n" % (decl.name, ", ".join(decl.argNames())))
out.write(" in_wrapper = 1;\n")
def write_exit_guard(out):
"""After a call, set in_wrapper back to 0 so we can enter the next call."""
if output_guards:
out.write(" in_wrapper = 0;\n")
def write_c_wrapper(out, decl, return_val, write_body):
"""Write the C wrapper for an MPI function."""
# Write the PMPI prototype here in case mpi.h doesn't define it
# (sadly the case with some MPI implementaitons)
out.write(decl.pmpi_prototype(default_modifiers))
out.write(";\n")
# Now write the wrapper function, which will call the PMPI function we declared.
out.write(decl.prototype(default_modifiers))
out.write(" { \n")
out.write(" %s %s = 0;\n" % (decl.retType(), return_val))
write_enter_guard(out, decl)
write_body(out)
write_exit_guard(out)
out.write(" return %s;\n" % return_val)
out.write("}\n\n")
def write_fortran_binding(out, decl, delegate_name, binding, stmts=None):
"""Outputs a wrapper for a particular fortran binding that delegates to the
primary Fortran wrapper. Optionally takes a list of statements to execute
before delegating.
"""
out.write(decl.fortranPrototype(binding, default_modifiers))
out.write(" { \n")
if stmts:
out.write(joinlines(map(lambda s: " " + s, stmts)))
if decl.returnsErrorCode():
# regular MPI fortran functions use an error code
out.write(" %s(%s);\n" % (delegate_name, ", ".join(decl.fortranArgNames())))
else:
# wtick and wtime return a value
out.write(" return %s(%s);\n" % (delegate_name, ", ".join(decl.fortranArgNames())))
out.write("}\n\n")
class FortranDelegation:
"""Class for constructing a call to a Fortran wrapper delegate function. Provides
storage for local temporary variables, copies of parameters, callsites for MPI-1 and
MPI-2, and writebacks to local pointer types.
"""
def __init__(self, decl, return_val):
self.decl = decl
self.return_val = return_val
self.temps = set()
self.copies = []
self.writebacks = []
self.actuals = []
self.mpich_actuals = []
def addTemp(self, type, name):
"""Adds a temp var with a particular name. Adds the same var only once."""
temp = " %s %s;" % (type, name)
self.temps.add(temp)
def addActual(self, actual):
self.actuals.append(actual)
self.mpich_actuals.append(actual)
def addActualMPICH(self, actual):
self.mpich_actuals.append(actual)
def addActualMPI2(self, actual):
self.actuals.append(actual)
def addWriteback(self, stmt):
self.writebacks.append(" %s" % stmt)
def addCopy(self, stmt):
self.copies.append(" %s" % stmt)
def write(self, out):
assert len(self.actuals) == len(self.mpich_actuals)
call = " %s = %s" % (self.return_val, self.decl.name)
mpich_call = "%s(%s);\n" % (call, ", ".join(self.mpich_actuals))
mpi2_call = "%s(%s);\n" % (call, ", ".join(self.actuals))
out.write(" %s %s = 0;\n" % (self.decl.retType(), self.return_val))
if mpich_call == mpi2_call and not (self.temps or self.copies or self.writebacks):
out.write(mpich_call)
else:
out.write("#if (!defined(MPICH_HAS_C2F) && defined(MPICH_NAME) && (MPICH_NAME == 1)) /* MPICH test */\n")
out.write(mpich_call)
out.write("#else /* MPI-2 safe call */\n")
out.write(joinlines(self.temps))
out.write(joinlines(self.copies))
out.write(mpi2_call)
out.write(joinlines(self.writebacks))
out.write("#endif /* MPICH test */\n")
def write_fortran_wrappers(out, decl, return_val):
"""Writes primary fortran wrapper that handles arg translation.
Also outputs bindings for this wrapper for different types of fortran compilers.
"""
delegate_name = decl.name + f_wrap_suffix
out.write(decl.fortranPrototype(delegate_name, ["static"]))
out.write(" { \n")
call = FortranDelegation(decl, return_val)
if decl.name == "MPI_Init":
# Use out.write() here so it comes at very beginning of wrapper function
out.write(" int argc = 0;\n");
out.write(" char ** argv = NULL;\n");
call.addActual("&argc");
call.addActual("&argv");
call.write(out)
out.write(" *ierr = %s;\n" % return_val)
out.write("}\n\n")
# Write out various bindings that delegate to the main fortran wrapper
write_fortran_binding(out, decl, delegate_name, "MPI_INIT", ["fortran_init = 1;"])
write_fortran_binding(out, decl, delegate_name, "mpi_init", ["fortran_init = 2;"])
write_fortran_binding(out, decl, delegate_name, "mpi_init_", ["fortran_init = 3;"])
write_fortran_binding(out, decl, delegate_name, "mpi_init__", ["fortran_init = 4;"])
return
# This look processes the rest of the call for all other routines.
for arg in decl.args:
if arg.name == "...": # skip ellipsis
continue
if not (arg.pointers or arg.array):
if not arg.isHandle():
# These are pass-by-value arguments, so just deref and pass thru
dereferenced = "*%s" % arg.name
call.addActual(dereferenced)
else:
# Non-ptr, non-arr handles need to be converted with MPI_Blah_f2c
# No special case for MPI_Status here because MPI_Statuses are never passed by value.
call.addActualMPI2("%s_f2c(*%s)" % (conversion_prefix(arg.type), arg.name))
call.addActualMPICH("(%s)(*%s)" % (arg.type, arg.name))
else:
if not arg.isHandle():
# Non-MPI handle pointer types can be passed w/o dereferencing, but need to
# cast to correct pointer type first (from MPI_Fint*).
call.addActual("(%s)%s" % (arg.castType(), arg.name))
else:
# For MPI-1, assume ints, cross fingers, and pass things straight through.
call.addActualMPICH("(%s*)%s" % (arg.type, arg.name))
conv = conversion_prefix(arg.type)
temp = "temp_%s" % arg.name
# For MPI-2, other pointer and array types need temporaries and special conversions.
if not arg.isHandleArray():
call.addTemp(arg.type, temp)
call.addActualMPI2("&%s" % temp)
if arg.isStatus():
call.addCopy("%s_f2c(%s, &%s);" % (conv, arg.name, temp))
call.addWriteback("%s_c2f(&%s, %s);" % (conv, temp, arg.name))
else:
call.addCopy("%s = %s_f2c(*%s);" % (temp, conv, arg.name))
call.addWriteback("*%s = %s_c2f(%s);" % (arg.name, conv, temp))
else:
# Make temporary variables for the array and the loop var
temp_arr_type = "%s*" % arg.type
call.addTemp(temp_arr_type, temp)
call.addTemp("int", "i")
# generate a copy and a writeback statement for this type of handle
if arg.isStatus():
copy = " %s_f2c(&%s[i], &%s[i])" % (conv, arg.name, temp)
writeback = " %s_c2f(&%s[i], &%s[i])" % (conv, temp, arg.name)
else:
copy = " temp_%s[i] = %s_f2c(%s[i])" % (arg.name, conv, arg.name)
writeback = " %s[i] = %s_c2f(temp_%s[i])" % (arg.name, conv, arg.name)
# Generate the call surrounded by temp array allocation, copies, writebacks, and temp free
count = "*%s" % arg.countParam().name
call.addCopy("%s = (%s)malloc(sizeof(%s) * %s);" %
(temp, temp_arr_type, arg.type, count))
call.addCopy("for (i=0; i < %s; i++)" % count)
call.addCopy("%s;" % copy)
call.addActualMPI2(temp)
call.addWriteback("for (i=0; i < %s; i++)" % count)
call.addWriteback("%s;" % writeback)
call.addWriteback("free(%s);" % temp)
call.write(out)
if decl.returnsErrorCode():
out.write(" *ierr = %s;\n" % return_val)
else:
out.write(" return %s;\n" % return_val)
out.write("}\n\n")
# Write out various bindings that delegate to the main fortran wrapper
write_fortran_binding(out, decl, delegate_name, decl.name.upper())
write_fortran_binding(out, decl, delegate_name, decl.name.lower())
write_fortran_binding(out, decl, delegate_name, decl.name.lower() + "_")
write_fortran_binding(out, decl, delegate_name, decl.name.lower() + "__")
################################################################################
# Macros:
# - functions annotated as @macro or @bodymacro define the global macros and
# basic pieces of the generator.
# - include_decl is used to include MPI declarations into function scopes.
################################################################################
# Table of global macros
macros = {}
# This decorator adds macro functions to the outermost function scope.
def macro(macro_name, **attrs):
def decorate(fun):
macros[macro_name] = fun # Add macro to outer scope under supplied name
fun.has_body = False # By default, macros have no body.
for key in attrs: # Optionally set/override attributes
setattr(fun, key, attrs[key])
return fun
return decorate
def handle_list(list_name, list, args):
"""This function handles indexing lists used as macros in the wrapper generator.
There are two syntaxes:
{{<list_name>}} Evaluates to the whole list, e.g. 'foo, bar, baz'
{{<list_name> <index>}} Evaluates to a particular element of a list.
"""
if not args:
return list
else:
len(args) == 1 or syntax_error("Wrong number of args for list expression.")
try:
return list[int(args[0])]
except ValueError:
syntax_error("Invald index value: '%s'" % args[0])
except IndexError:
syntax_error("Index out of range in '%s': %d" % (list_name, index))
class TypeApplier:
"""This class implements a Macro function for applying something callable to
args in a decl with a particular type.
"""
def __init__(self, decl):
self.decl = decl
def __call__(self, out, scope, args, children):
len(args) == 2 or syntax_error("Wrong number of args in apply macro.")
type, macro_name = args
for arg in self.decl.args:
if arg.cType() == type:
out.write("%s(%s);\n" % (macro_name, arg.name))
def include_decl(scope, decl):
"""This function is used by macros to include attributes MPI declarations in their scope."""
scope["ret_type"] = decl.retType()
scope["args"] = decl.argNames()
scope["nargs"] = len(decl.argNames())
scope["types"] = decl.types()
scope["formals"] = decl.formals()
scope["apply_to_type"] = TypeApplier(decl)
scope.function_name = decl.name
# These are old-stype, deprecated names.
def get_arg(out, scope, args, children):
return handle_list("args", decl.argNames(), args)
scope["get_arg"] = get_arg
scope["applyToType"] = scope["apply_to_type"]
scope["retType"] = scope["ret_type"]
scope["argList"] = "(%s)" % ", ".join(scope["args"])
scope["argTypeList"] = "(%s)" % ", ".join(scope["formals"])
def all_but(fn_list):
"""Return a list of all mpi functions except those in fn_list"""
all_mpi = set(mpi_functions.keys())
diff = all_mpi - set(fn_list)
return [x for x in diff]
@macro("foreachfn", has_body=True)
def foreachfn(out, scope, args, children):
"""Iterate over all functions listed in args."""
args or syntax_error("Error: foreachfn requires function name argument.")
global cur_function
fn_var = args[0]
for fn_name in args[1:]:
cur_function = fn_name
if not fn_name in mpi_functions:
syntax_error(fn_name + " is not an MPI function")
fn = mpi_functions[fn_name]
fn_scope = Scope(scope)
fn_scope[fn_var] = fn_name
include_decl(fn_scope, fn)
for child in children:
child.evaluate(out, fn_scope)
cur_function = None
@macro("fn", has_body=True)
def fn(out, scope, args, children):
"""Iterate over listed functions and generate skeleton too."""
args or syntax_error("Error: fn requires function name argument.")
global cur_function
fn_var = args[0]
for fn_name in args[1:]:
cur_function = fn_name
if not fn_name in mpi_functions:
syntax_error(fn_name + " is not an MPI function")
fn = mpi_functions[fn_name]
return_val = "_wrap_py_return_val"
fn_scope = Scope(scope)
fn_scope[fn_var] = fn_name
include_decl(fn_scope, fn)
fn_scope["ret_val"] = return_val
fn_scope["returnVal"] = fn_scope["ret_val"] # deprecated name.
c_call = "%s = P%s(%s);" % (return_val, fn.name, ", ".join(fn.argNames()))
if fn_name == "MPI_Init" and output_fortran_wrappers:
def callfn(out, scope, args, children):
# All this is to deal with fortran, since fortran's MPI_Init() function is different
# from C's. We need to make sure to delegate specifically to the fortran init wrapping.
# For dynamic libs, we use weak symbols to pick it automatically. For static libs, need
# to rely on input from the user via pmpi_init_binding and the -i option.
out.write(" if (fortran_init) {\n")
out.write("#ifdef PIC\n")
out.write(" if (!PMPI_INIT && !pmpi_init && !pmpi_init_ && !pmpi_init__) {\n")
out.write(" fprintf(stderr, \"ERROR: Couldn't find fortran pmpi_init function. Link against static library instead.\\n\");\n")
out.write(" exit(1);\n")
out.write(" }")
out.write(" switch (fortran_init) {\n")
out.write(" case 1: PMPI_INIT(&%s); break;\n" % return_val)
out.write(" case 2: pmpi_init(&%s); break;\n" % return_val)
out.write(" case 3: pmpi_init_(&%s); break;\n" % return_val)
out.write(" case 4: pmpi_init__(&%s); break;\n" % return_val)
out.write(" default:\n")
out.write(" fprintf(stderr, \"NO SUITABLE FORTRAN MPI_INIT BINDING\\n\");\n")
out.write(" break;\n")
out.write(" }\n")
out.write("#else /* !PIC */\n")
out.write(" %s(&%s);\n" % (pmpi_init_binding, return_val))
out.write("#endif /* !PIC */\n")
out.write(" } else {\n")
out.write(" %s\n" % c_call)
out.write(" }\n")
fn_scope["callfn"] = callfn
def write_fortran_init_flag():
output.write("static int fortran_init = 0;\n")
once(write_fortran_init_flag)
else:
fn_scope["callfn"] = c_call
def write_body(out):
for child in children:
child.evaluate(out, fn_scope)
out.write("/* ================== C Wrappers for %s ================== */\n" % fn_name)
write_c_wrapper(out, fn, return_val, write_body)
if output_fortran_wrappers:
out.write("/* =============== Fortran Wrappers for %s =============== */\n" % fn_name)
write_fortran_wrappers(out, fn, return_val)
out.write("/* ================= End Wrappers for %s ================= */\n\n\n" % fn_name)
cur_function = None
@macro("forallfn", has_body=True)
def forallfn(out, scope, args, children):
"""Iterate over all but the functions listed in args."""
args or syntax_error("Error: forallfn requires function name argument.")
foreachfn(out, scope, [args[0]] + all_but(args[1:]), children)
@macro("fnall", has_body=True)
def fnall(out, scope, args, children):
"""Iterate over all but listed functions and generate skeleton too."""
args or syntax_error("Error: fnall requires function name argument.")
fn(out, scope, [args[0]] + all_but(args[1:]), children)
@macro("sub")
def sub(out, scope, args, children):
"""{{sub <string> <regexp> <substitution>}}
Replaces value of <string> with all instances of <regexp> replaced with <substitution>.
"""
len(args) == 3 or syntax_error("'sub' macro takes exactly 4 arguments.")
string, regex, substitution = args
if isinstance(string, list):
return [re.sub(regex, substitution, s) for s in string]
if not isinstance(regex, str):
syntax_error("Invalid regular expression in 'sub' macro: '%s'" % regex)
else:
return re.sub(regex, substitution, string)
@macro("zip")
def zip_macro(out, scope, args, children):
len(args) == 2 or syntax_error("'zip' macro takes exactly 2 arguments.")
if not all([isinstance(a, list) for a in args]):
syntax_error("Arguments to 'zip' macro must be lists.")
a, b = args
return ["%s %s" % x for x in zip(a, b)]
@macro("def")
def def_macro(out, scope, args, children):
len(args) == 2 or syntax_error("'def' macro takes exactly 2 arguments.")
scope[args[0]] = args[1]
@macro("list")
def list_macro(out, scope, args, children):
result = []
for arg in args:
if isinstance(arg, list):
result.extend(arg)
else:
result.append(arg)
return result
@macro("filter")
def filter_macro(out, scope, args, children):
"""{{filter <regex> <list>}}
Returns a list containing all elements of <list> that <regex> matches.
"""
len(args) == 2 or syntax_error("'filter' macro takes exactly 2 arguments.")
regex, l = args
if not isinstance(l, list):
syntax_error("Invalid list in 'filter' macro: '%s'" % str(list))
if not isinstance(regex, str):
syntax_error("Invalid regex in 'filter' macro: '%s'" % str(regex))
def match(s):
return re.search(regex, s)
return filter(match, l)
@macro("fn_num")
def fn_num(out, scope, args, children):
val = fn_num.val
fn_num.val += 1
return val
fn_num.val = 0 # init the counter here.
################################################################################
# Parser support:
# - Chunk class for bits of parsed text on which macros are executed.
# - parse() function uses a Lexer to examine a file.
################################################################################
class Chunk:
"""Represents a piece of a wrapper file. Is either a text chunk
or a macro chunk with children to which the macro should be applied.
macros are evaluated lazily, so the macro is just a string until
execute is called and it is fetched from its enclosing scope."""
def __init__(self):
self.macro = None
self.args = []
self.text = None
self.children = []
def iwrite(self, file, level, text):
"""Write indented text."""
for x in xrange(level):
file.write(" ")
file.write(text)
def write(self, file=sys.stdout, l=0):
if self.macro: self.iwrite(file, l, "{{%s %s}}" % (self.macro, " ".join([str(arg) for arg in self.args])))
if self.text: self.iwrite(file, l, "TEXT\n")
for child in self.children:
child.write(file, l+1)
def execute(self, out, scope):
"""This function executes a chunk. For strings, lists, text chunks, etc., this just
entails returning the chunk's value. For callable macros, this executes and returns
the chunk's value.
"""
if not self.macro:
out.write(self.text)
else:
if not self.macro in scope:
error_msg = "Invalid macro: '%s'" % self.macro
if scope.function_name:
error_msg += " for " + scope.function_name
syntax_error(error_msg)
value = scope[self.macro]
if hasattr(value, "__call__"):
# It's a macro, so we need to execute it. But first evaluate its args.
def eval_arg(arg):
if isinstance(arg, Chunk):
return arg.execute(out, scope)
else:
return arg
args = [eval_arg(arg) for arg in self.args]
return value(out, scope, args, self.children)
elif isinstance(value, list):
# Special case for handling lists and list indexing
return handle_list(self.macro, value, self.args)
else:
# Just return the value of anything else
return value
def stringify(self, value):
"""Used by evaluate() to print the return values of chunks out to the output file."""
if isinstance(value, list):
return ", ".join(value)
else:
return str(value)
def evaluate(self, out, scope):
"""This is an 'interactive' version of execute. This should be called when
the chunk's value (if any) should be written out. Body macros and the outermost
scope should use this instead of execute().
"""
value = self.execute(out, scope)
if value is not None: # Note the distinction here -- 0 is false but we want to print it!
out.write(self.stringify(value))
class Parser:
"""Parser for the really simple wrappergen grammar.
This parser has support for multiple lexers. self.tokens is a list of iterables, each
representing a new token stream. You can add additional tokens to be lexed using push_tokens.
This will cause the pushed tokens to be handled before any others. This allows us to switch
lexers while parsing, so that the outer part of the file is processed in a language-agnostic
way, but stuff inside macros is handled as its own macro language.
"""
def __init__(self, macros):
self.macros = macros
self.macro_lexer = InnerLexer()
self.tokens = iter([]) # iterators over tokens, handled in order. Starts empty.
self.token = None # last accepted token
self.next = None # next token
def gettok(self):
"""Puts the next token in the input stream into self.next."""
try:
self.next = self.tokens.next()
except StopIteration:
self.next = None
def push_tokens(self, iterable):
"""Adds all tokens in some iterable to the token stream."""
self.tokens = itertools.chain(iter(iterable), iter([self.next]), self.tokens)
self.gettok()
def accept(self, id):
"""Puts the next symbol in self.token if we like it. Then calls gettok()"""
if self.next.isa(id):
self.token = self.next
self.gettok()
return True
return False
def unexpected_token(self):
syntax_error("Unexpected token: %s." % self.next)
def expect(self, id):
"""Like accept(), but fails if we don't like the next token."""
if self.accept(id):
return True
else:
if self.next:
self.unexpected_token()
else:
syntax_error("Unexpected end of file.")
sys.exit(1)
def is_body_macro(self, name):
"""Shorthand for testing whether a particular name is the name of a macro that has a body.
Need this for parsing the language b/c things like {{fn}} need a corresponding {{endfn}}.
"""
return name in self.macros and self.macros[name].has_body
def macro(self, accept_body_macros=True):
# lex inner-macro text as wrapper language if we encounter text here.
if self.accept(TEXT):
self.push_tokens(self.macro_lexer.lex(self.token.value))
# Now proceed with parsing the macro language's tokens
chunk = Chunk()
self.expect(IDENTIFIER)
chunk.macro = self.token.value
if not accept_body_macros and self.is_body_macro(chunk.macro):
syntax_error("Cannot use body macros in expression context: '%s'" % chunk.macro)
eys.exit(1)
while True:
if self.accept(LBRACE):
chunk.args.append(self.macro(False))
elif self.accept(IDENTIFIER):
chunk.args.append(self.token.value)
elif self.accept(TEXT):
self.push_tokens(self.macro_lexer.lex(self.token.value))
else:
self.expect(RBRACE)
break
return chunk
def text(self, end_macro = None):
chunks = []
while self.next:
if self.accept(TEXT):
chunk = Chunk()
chunk.text = self.token.value
chunks.append(chunk)
elif self.accept(LBRACE):
chunk = self.macro()
name = chunk.macro
if name == end_macro:
# end macro: just break and don't append
break
elif isindex(chunk.macro):
# Special case for indices -- raw number macros index 'args' list
chunk.macro = "args"
chunk.args = [name]
elif self.is_body_macro(name):
chunk.children = self.text("end"+name)
chunks.append(chunk)
else:
self.unexpected_token()
return chunks
def parse(self, text):
if skip_headers:
outer_lexer = OuterRegionLexer() # Not generating C code, text is text.
else:
outer_lexer = OuterCommentLexer() # C code. Considers C-style comments.
self.push_tokens(outer_lexer.lex(text))
return self.text()
################################################################################
# Main script:
# Get arguments, set up outer scope, parse files, generator wrappers.
################################################################################
def usage():
sys.stderr.write(usage_string)
sys.exit(2)
# Let the user specify another mpicc to get mpi.h from
output = sys.stdout
output_filename = None
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "fsgdc:o:i:I:")
except getopt.GetoptError, err:
sys.stderr.write(err + "\n")
usage()
for opt, arg in opts:
if opt == "-d": dump_prototypes = True
if opt == "-f": output_fortran_wrappers = True
if opt == "-s": skip_headers = True
if opt == "-g": output_guards = True
if opt == "-c": mpicc = arg
if opt == "-o": output_filename = arg
if opt == "-I":
stripped = arg.strip()
if stripped: includes.append(stripped)
if opt == "-i":
if not arg in pmpi_init_bindings:
sys.stderr.write("ERROR: PMPI_Init binding must be one of:\n %s\n" % " ".join(possible_bindings))
usage()
else:
pmpi_init_binding = arg
if len(args) < 1 and not dump_prototypes:
usage()
# Parse mpi.h and put declarations into a map.
for decl in enumerate_mpi_declarations(mpicc, includes):
mpi_functions[decl.name] = decl
if dump_prototypes: print decl
# Fail gracefully if we didn't find anything.
if not mpi_functions:
sys.stderr.write("Error: Found no declarations in mpi.h.\n")
sys.exit(1)
# If we're just dumping prototypes, we can just exit here.
if dump_prototypes: sys.exit(0)
# Open the output file here if it was specified
if output_filename:
try:
output = open(output_filename, "w")
except IOError:
sys.stderr.write("Error: couldn't open file " + arg + " for writing.\n")
sys.exit(1)
try:
# Start with some headers and definitions.
if not skip_headers:
output.write(wrapper_includes)
if output_guards: output.write("static int in_wrapper = 0;\n")
# Parse each file listed on the command line and execute
# it once it's parsed.
fileno = 0
for f in args:
cur_filename = f
file = open(cur_filename)
# Outer scope contains fileno and the fundamental macros.
outer_scope = Scope()
outer_scope["fileno"] = str(fileno)
outer_scope.include(macros)
parser = Parser(macros)
chunks = parser.parse(file.read())
for chunk in chunks:
chunk.evaluate(output, Scope(outer_scope))
fileno += 1
except WrapSyntaxError:
output.close()
if output_filename: os.remove(output_filename)
sys.exit(1)
output.close()
| cuda-profiler-master | nvtx_pmpi_wrappers/wrap/wrap.py |
#!/usr/bin/python
import json
from collections import OrderedDict
from sys import argv
def return_json(payload):
return(json.dumps(payload,
sort_keys=True,
indent=4
)
)
if argv[1] == 'update_order':
fw_manifest = argv[2]
ver_manifest = argv[3]
updateItems = {}
updateOrder = OrderedDict()
with open(fw_manifest) as f:
manifest_jsonify = json.load(f)
with open(ver_manifest) as f:
version_jsonify = json.load(f)
# Grab sequence type info from FW Manifest..
for obj in manifest_jsonify:
try:
for component in manifest_jsonify[obj]['Items']:
updateItems[component['CompName']] = \
[
component['Sequence'],
component['CompModel'],
obj
]
except KeyError as e:
pass
# Iterate through FW Versioning, write Update Condition to updateItems
for item in version_jsonify:
for component in version_jsonify[item]['Items']:
if not component['IsUpToDate']:
try:
updateItems[component['ID']].append({'NeedsUpdate': True})
except:
try:
updateItems[component['Model']].append({'NeedsUpdate': True})
except:
continue
if component['IsUpToDate']:
try:
updateItems[component['ID']].append({'NeedsUpdate': False})
except:
try:
updateItems[component['Model']].append({'NeedsUpdate': False})
except:
continue
for i in updateItems:
try:
needsUpdate = updateItems[i][3]
pass
except IndexError:
group = updateItems[i][2]
for item in version_jsonify[group]['Items']:
if not item['IsUpToDate']:
updateItems[i].append({'NeedsUpdate': True})
break
if item['IsUpToDate']:
updateItems[i].append({'NeedsUpdate': False})
continue
for k in updateItems:
updateItems[k] = [i for n, i in enumerate(updateItems[k]) if i not in updateItems[k][n + 1:]]
sortedUpdateItems = sorted(updateItems.items(), key=lambda x: x[1][0])
try:
if argv[4] == 'order_length':
count = 0
for i in sortedUpdateItems:
if i[1][3]['NeedsUpdate']:
count += 1
print(count - 1)
exit(0)
except IndexError:
pass
itemsToUpdate = OrderedDict()
for i in sortedUpdateItems:
if i[1][3]['NeedsUpdate']:
if i[0] == 'MB_CEC':
itemsToUpdate[(str(i[0]))] = True
elif i[0] == 'Delta_CEC':
itemsToUpdate[(str(i[0]))] = True
else:
itemsToUpdate[str(i[1][2])] = True
for item in itemsToUpdate:
print(item)
exit(0)
if argv[1] == 'parse_update_json':
file_path = argv[2]
fw_update_json = {
'Error': True,
'State': 'Unknown',
'Action': 'Check Output Log'
}
with open(file_path) as f:
fw_update = f.readlines()
for line in fw_update:
try:
lineJson = json.loads(line)
if 'FirmwareLoadAction' in json.loads(line).keys(): # Detects if chassis-level power cycle is required
fw_update_json = json.loads(line)
if 'Reboot required' in lineJson['Message']: # Detects if host-level reboot is required
fw_update_json['RebootRequired'] = True
if lineJson['State'] == 'Failed':
fw_update_json['State'] = 'Failed'
fw_update_json['Message'] = lineJson['Message']
break
if lineJson['State'] == 'Canceled':
fw_update_json['State'] = 'Canceled'
fw_update_json['Message'] = lineJson['Message']
break
if lineJson['State'] == 'Done':
fw_update_json['State'] = 'Done'
fw_update_json['Message'] = lineJson['Message']
except Exception as e:
continue
print(return_json(fw_update_json))
if argv[1] == 'parse_versioning':
file_path = argv[2]
manifest_json = {
'ErrorWritingVersioning': True
}
with open(file_path) as f:
output_all = f.readlines()
# Grab JSON from raw output
for line in output_all:
try:
manifest_json = json.loads(line)
except ValueError:
pass
try:
if manifest_json['ErrorWritingVersioning']:
print('No JSON could be loaded, is the container already running?')
exit(1)
except KeyError:
print(json.dumps(manifest_json,
sort_keys=True,
indent=4
)
)
| deepops-master | roles/nvidia-dgx-firmware/files/parse_manifest.py |
#!/usr/bin/env python3
import sys
import getopt
import math
def print_help():
print("")
print("calculate_N.py -- A script to calculate a range of N values near maximum Memory Use")
print("")
print("Example:")
print(" ./calculate.py --mem 32768 --nb 192 --ranks 8")
print("")
print("")
print("Options:")
print(" --mem : Total memory per GPU in MB")
print(" --nb : value of NB")
print(" --ranks : Total number of ranks (P*Q)")
print("")
print("")
opts,args=getopt.getopt(sys.argv[1:],'h',['mem=','nb=','ranks=','help'])
memsize=0
nb=0
ranks=0
for opt,arg in opts:
if opt in ('--mem'):
memsize=int(arg)
elif opt in ('--nb'):
nb=int(arg)
elif opt in ('--ranks'):
ranks=int(arg)
elif opt in ('--help','h'):
print_help()
exit()
else:
print_help()
exit()
if memsize == 0:
print("ERROR: memsize not set")
print_help()
exit()
if nb == 0:
print("ERROR: nb not set")
print_help()
exit()
if nb == 0:
print("ERROR: ranks not set")
print_help()
exit()
print("")
print("HPL Parameter Calculator")
print("")
print("Total Memory Size (MB): %d" % memsize)
print(" Specified NB: %d" % nb)
print(" Total Number of Ranks: %d" % ranks)
# Find approximate value
v=math.sqrt(float(ranks)*float(memsize)*1024*1024/8)
max_val=round(math.floor(v/nb))*nb
ideal_val=int(math.floor(v/nb)*0.99)*nb
print("")
print("Theoretical Max value of N: %d" % max_val)
print("Ideal value of N (99%% of N): %d " % ideal_val)
# modify value for best fit for NB and ranks
# Make steps 0.5% in nb of max
istep=round((0.005*max_val)/nb)*nb
# Print list of N
print("")
print("List of N:")
for v in range(-3,3):
n=ideal_val+v*istep
print("%d " % n, end="")
print("")
print("")
| deepops-master | workloads/bit/hpl/calculate_N.py |
#!/usr/bin/python3
#
# format_results.py
#
# This script will format the results from an HPL
# experiment and write them into a comma separated file
# to be read into a spreadsheet for additional analysis.
import sys
import os
import subprocess
import getopt
import re
def print_help():
print("")
print("\tformat_results.py -d (experiment directory)\n")
print("")
sys.exit(1)
print("")
try:
opts,args = getopt.getopt(sys.argv[1:],"d:",["d="])
except getopt.GetoptError:
print_help()
dir = ""
for opt,arg in opts:
if opt == '-h':
print_help()
elif opt in ("-d","--dir"):
expdir=arg
if expdir == "":
print("ERROR: Directory must be specified.")
print_help()
if not os.path.isdir(expdir):
print("ERROR: Specified path is not a directory. DIR="+dir)
print("ERROR: exiting")
print("")
sys.exit(1)
# Create a results dictionary
res={}
# Now loop over all .out files found in dir
for fn in os.listdir(expdir):
if not fn.endswith(".out"):
continue
# For each file, find the HOSTLIST and performance metric
with open(expdir+"/"+fn,"r") as fp:
hosts=""
perf=""
for line in fp:
l=line.strip()
# Matching: HOSTLIST node-001,node-002
if(re.match('HOSTLIST',l)):
if hosts != "":
print("ERROR: Found HOSTLIST twice in "+fn)
hosts=l.split()[-1]
## Matching: WR01C2C8 180224 144 4 4 50.27 7.763e+04
m=re.match('^W\w\d{2}\w\d\w\d (.*)$',l)
if m:
# Silently grab the last one
perf=l.split()[-1]
# Compress nodelist
cmd=['scontrol','show','hostlist',hosts]
cout=subprocess.run(cmd,stdout=subprocess.PIPE)
chosts=cout.stdout.decode('utf-8').strip()
if chosts in res:
res[chosts].append(perf)
else:
res[chosts]=[perf]
## Now all the data have been read, lets print it out
print("")
print("Comma Separated Results for Experiment: "+expdir)
print("")
## Print headers
maxexp=0
for k,v in sorted(res.items()):
l=len(res[k])
if l > maxexp:
maxexp=l
print("Nodelist;",end="")
for n in range(1,maxexp+1):
print("Exp %d;" % n,end="")
print("")
for k,v in sorted(res.items()):
print("%s;%s" % (k,";".join(res[k])))
| deepops-master | workloads/bit/hpl/format_results.py |
#!/usr/bin/env python3
#
# verify_hpl_experiment.sh <DIRECTORY> (SYSTEM)
#
# This script will do two things.
# 1) It will verify the performance against a reference, if the reference is available
# 2) It will verify performance based on jitter of all of the results.
#
# In the event there are failed jobs, the nodes and failure counts will be reported.
### TODO
#### Print total summary of experiment (total number of jobs, jobs per node, success, etc)
#### When a slow/bad job is found, write it out with the nodelist (compressed?)
####
import sys
import os
import glob
import re
### Define thresholds for slow jobs, in percent
HPLTHRESH=1.05
CPUTHRESH=1.05
def print_help():
print("")
print("verify_hpl_experiment.py <directory>")
print("")
print("\tThis script will validate the results for an HPL Burnin Experiment. It validates")
print("\thow each run completed as well as inspects the performance consistency of each run.")
print("\tJobs that ran slow, did not pass internal validation, or did not complete, are reported")
print("\tby nodes that were used.")
print("")
exit(1)
def format_hostlist(hostlist):
s=""
for h in sorted(hostlist):
hs=h+":"+str(hostlist[h])
if s=="":
s=hs
else:
s=s+","+hs
return s
def validate_case(label,d):
val=""
key=""
for k in d:
if val == "":
val=d[k]
key=k
else:
if val != d[k]:
print("ERROR: Cases do not match: val=<{},{}> key=<{},{}>".format(val,d[k],key,k))
print("ERROR: This should never happen.")
return ""
return val
def print_table(t_slow,t_total):
for key in sorted(t_slow, key=t_slow.get,reverse=True):
if t_slow[key] > 0:
print("{}: {} out of {}".format(key,t_slow[key],t_total[key]))
print("")
print("")
print("Verifying HPL Burnin Results")
print("")
if len(sys.argv) <= 1:
print("Error: no command line arguments found.")
print_help()
expdir=sys.argv[1]
if not os.path.exists(expdir):
print('ERROR: {} does not exist'.format(expdir))
print_help()
if not os.path.isdir(expdir):
print('ERROR: {} is not a directory.'.format(expdir))
print_help()
# Define hash tables to store results
cfg={}
n={}
nb={}
p={}
q={}
time={}
gflops={}
status={}
hl={}
tc={}
explist={}
fncnt=0
besttime=9999999.0
maxperf=0.0
minperf=1.0e+12
##HPL_AI WR01L8R2 288000 288 4 2 23.55 6.763e+05 11.53998 2 4.539e+05
for fn in glob.glob(expdir + "/*.out", recursive=False):
# Check 3 things, did the job complete, did the job pass or fail, what was the performance
fncnt+=1
file=open(fn,'r')
tc[fn]=0
for l in file.readlines():
# Sometimes there may be a 2nd entry, only pull the last
explist[fn]=1
if re.search('WR',l):
# Check if this is regular HPL or HPL-AI
off=0
if (l.split()[0] == "HPL_AI"): off=1
cfg[fn]=l.split()[0+off]
n[fn]=int(l.split()[1+off])
nb[fn]=int(l.split()[2+off])
p[fn]=int(l.split()[3+off])
q[fn]=int(l.split()[4+off])
time[fn]=float(l.split()[5+off])
if time[fn] < besttime:
besttime=time[fn]
if (l.split()[0] == "HPL_AI"):
gflops[fn]=float(l.split()[10])
else:
gflops[fn]=float(l.split()[6])
if gflops[fn] < minperf:
minperf=gflops[fn]
if gflops[fn] > maxperf:
maxperf=gflops[fn]
#if re.search('^\|\|Ax-b\|\|\/eps',l):
if re.search('\|\|Ax-b\|\|\_oo/\(eps',l):
if l.split()[3]=='PASSED':
status[fn]='passed'
elif l.split()[3]=='FAILED':
status[fn]='failed'
else:
status[fn]='unknown'
if re.search('^HOSTLIST:',l):
hl[fn]=l.split()[1]
if re.search('End of Tests',l):
tc[fn]=1
file.close()
# Vaidate each case and make sure they all have the same settings
if fncnt == 0:
print("ERROR: No cases were found. Either this is an invalid experiment directory or something we wrong. Please check")
print("")
exit(1)
e_cfg=validate_case("run config",cfg)
if e_cfg == "":
print("ERROR: Of the {} files read, the run config tag was not found. All results failed to run, please check each run manually.".format(fncnt))
print("")
exit(1)
e_n=validate_case("N",n)
e_nb=validate_case("NB",nb)
e_p=validate_case("P",p)
e_q=validate_case("Q",q)
# now analyze the data, record stats by node
# TODO, verify that all experiments have the same settings
slowcnt=0
failedcnt=0
unkcnt=0
dnccnt=0
t_slow={}
t_failed={}
t_unk={}
t_dnc={}
t_total={}
sum_slow=0
sum_failed=0
sum_unk=0
sum_dnc=0
for key in explist:
isslow=0
isfailed=0
isunk=0
isdnc=0
for h in hl[key].split(','):
if h not in t_total: t_total[h]=0
t_total[h]+=1
if h not in t_slow: t_slow[h]=0
if h not in t_failed: t_failed[h]=0
if h not in t_unk: t_unk[h]=0
if h not in t_dnc: t_dnc[h]=0
if tc[key] == 0:
t_dnc[h]+=1
dnccnt+=1
isdnc=1
else:
if time[key] > besttime*HPLTHRESH:
t_slow[h]+=1
slowcnt+=1
isslow=1
if status[key] == 'failed':
t_failed[h]+=1
failedcnt+=1
isfailed=1
if status[key] == 'unknown':
t_unk[h]+=1
unkcnt+=1
isunk=1
sum_slow+=isslow
sum_failed+=isfailed
sum_unk+=isunk
sum_dnc+=isdnc
# Now sort and print results
print("")
print("Issues Found:")
print("")
stat=0
if slowcnt > 0:
print("Slow Nodes:")
print_table(t_slow,t_total)
stat=1
if failedcnt > 0:
print("Nodes on which Jobs Failed:")
print_table(t_failed,t_total)
stat=1
if unkcnt > 0:
print("Nodes on which Jobs ended in Unknown State:")
print_table(t_unk,t_total)
stat=1
if dnccnt > 0:
print("Nodes on which Jobs did not complete:")
print_table(t_dnc,t_total)
stat=1
if stat == 0:
print("No Issues Found")
print("")
print("")
print("Summary:")
print("")
print(" Experiment Dir:",expdir)
print(" Total Jobs:", fncnt)
print(" Slow Jobs:", sum_slow)
print(" Failed Jobs:", sum_failed)
print(" Unknown Jobs:", sum_unk)
print(" Did Not Complete:", sum_dnc)
print(" HPL CFG:", e_cfg)
print(" N:", e_n)
print(" NB:", e_nb)
print(" P*Q: {}*{}".format(e_p,e_q))
print(" Hostlist:", format_hostlist(t_total))
print(" MaxPerf:", maxperf,"GF")
print(" MinPerf:", minperf,"GF")
print(" Percent Range: {:.2f}%".format(100.0*(maxperf-minperf)/maxperf))
print("")
if stat!=0:
print("Issues were found. Refer to the README.md file for instructions on how to interpret the results.")
print("")
| deepops-master | workloads/bit/hpl/verify_hpl_experiment.py |
# pylint: disable-all
import os, sys, argparse, time
import cupy
import dask
import dask.array as da
from dask_cuda import LocalCUDACluster
from dask.distributed import Client, LocalCluster, wait
from dask.delayed import delayed
from dask.diagnostics import ProgressBar
from multiprocessing.pool import ThreadPool
import socket
def create_data(rs, xdim, ydim, x_chunk_size, y_chunk_size):
x = rs.normal(10, 1, size=(xdim, ydim), chunks=(x_chunk_size, y_chunk_size))
return x
def run(data):
(data + 1)[::2, ::2].sum().compute()
return
def get_scheduler_info():
scheduler_hostname = socket.gethostname()
scheduler_ip = socket.gethostbyname(scheduler_hostname)
scheduler_port = '8786'
scheduler_uri = str(scheduler_ip) + ':' + scheduler_port
return(scheduler_ip, scheduler_uri)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--xdim', type=int, default=500000)
parser.add_argument('--ydim', type=int, default=500000)
parser.add_argument('--x_chunk_size', type=int, default=10000)
parser.add_argument('--y_chunk_size', type=int, default=10000)
parser.add_argument('--use_gpus_only', action="store_true")
parser.add_argument('--n_gpus', type=int, default=1)
parser.add_argument('--use_cpus_only', action="store_true")
parser.add_argument('--n_cpu_sockets', type=int, default=1)
parser.add_argument('--n_cpu_cores_per_socket', type=int, default=1)
parser.add_argument('--use_distributed_dask', action="store_true")
args = parser.parse_args()
sched_ip, sched_uri = get_scheduler_info()
if args.use_distributed_dask:
print('Using Distributed Dask')
client = Client(sched_uri)
elif args.use_gpus_only:
print('Using GPUs and Local Dask')
cluster = LocalCUDACluster(ip=sched_ip, n_workers=args.n_gpus)
client = Client(cluster)
elif args.use_cpus_only:
print('Using CPUs and Local Dask')
cluster = LocalCluster(ip=sched_ip, n_workers=args.n_cpu_sockets, threads_per_worker=args.n_cpu_cores_per_socket)
client = Client(cluster)
else:
print("Exiting...")
sys.exit(-1)
start = time.time()
if args.use_gpus_only:
print('Allocating and initializing arrays using GPU memory with CuPY')
rs = da.random.RandomState(RandomState=cupy.random.RandomState)
elif args.use_cpus_only:
print('Allocating and initializing arrays using CPU memory')
rs = da.random.RandomState()
x = create_data(rs, args.xdim, args.ydim, args.x_chunk_size, args.y_chunk_size)
print('Array size: {:.2f} TB. Computing parallel sum . . .'.format(x.nbytes/1e12))
run(x)
end = time.time()
delta = (end - start)
print('Processing complete.')
print('Wall time create data + computation time: {:10.8f} seconds'.format(delta))
del x
if __name__ == '__main__':
main()
| deepops-master | workloads/examples/slurm/dask-rapids/files/sum.py |
#!/usr/bin/env python3
'''
Kubeflow documentation: https://kubeflow-pipelines.readthedocs.io/en/latest/_modules/kfp/dsl/_container_op.html
K8S documentation: https://github.com/kubernetes-client/python/blob/02ef5be4ecead787961037b236ae498944040b43/kubernetes/docs/V1Container.md
Example Triton Inference Server Models: https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-master-branch-guide/docs/run.html#example-model-repository
Example Triton Inference Server Client: https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-master-branch-guide/docs/client_example.html#section-getting-the-client-examples
Bugs:
Cannot dynamically assign GPU counts: https://github.com/kubeflow/pipelines/issues/1956
# Manual run example:
nvidia-docker run --rm --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -p8000:8000 -p8001:8001 -p8002:8002 -v/raid/shared/results/model_repository/:/model_repository nvcr.io/nvidia/tensorrtserver:20.02-py3 trtserver --model-repository=/model_repository
docker run -it --rm --net=host tensorrtserver_client /workspace/install/bin/image_client -m resnet50_netdef images/mug.jpg
'''
import triton_ops
import kfp.dsl as dsl
from kubernetes import client as k8s_client
@dsl.pipeline(
name='tritonPipeline',
description='Deploy a Triton server'
)
def triton_pipeline(skip_examples):
op_dict = {}
# Hardcoded paths mounted in the Triton container
results_dir = "/results/"
data_dir = "/data/"
checkpoints_dir = "/checkpoints/"
models = "/results/model_repository"
# Set default volume names
pv_data = "triton-data"
pv_results = "triton-results"
pv_checkpoints = "triton-checkpoints"
# Create K8s PVs
op_dict['triton_volume_results'] = triton_ops.TritonVolume('triton_volume_results', pv_results)
op_dict['triton_volume_data'] = triton_ops.TritonVolume('triton_volume_data', pv_data)
op_dict['triton_volume_checkpoints'] = triton_ops.TritonVolume('triton_volume_checkpoints', pv_checkpoints)
# Download example models
with dsl.Condition(skip_examples == '', name='skip-examples-download'):
op_dict['triton_download'] = triton_ops.TritonDownload('triton_download', models)
# Common Operations
op_dict['triton_service'] = triton_ops.TritonService('triton_service')
op_dict['triton_deploy'] = triton_ops.TritonDeploy('triton_deploy', models)
# Use GPUs
op_dict['triton_deploy'].set_gpu_limit(1, vendor = "nvidia")
# Add Triton Ports
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(container_port=8000, host_port=8000)) # HTTP
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(8001, host_port=8001)) # gRPC
op_dict['triton_deploy'].add_port(k8s_client.V1ContainerPort(8002, host_port=8002)) # Metrics
# Set order so tha volumes are created, then examples downloaded, then service started
op_dict['triton_download'].after(op_dict['triton_volume_results'])
op_dict['triton_download'].after(op_dict['triton_volume_data'])
op_dict['triton_download'].after(op_dict['triton_volume_checkpoints'])
op_dict['triton_deploy'].after(op_dict['triton_download'])
# Mount Volumes
for name, container_op in op_dict.items():
if name == 'triton_service' or type(container_op) == triton_ops.TritonVolume:
continue
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_results, read_only=False), name=pv_results))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=results_dir, name=pv_results, read_only=False))
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_data, read_only=False), name=pv_data))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=data_dir, name=pv_data, read_only=True))
container_op.add_volume(k8s_client.V1Volume(persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name=pv_checkpoints, read_only=False), name=pv_checkpoints))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=checkpoints_dir, name=pv_checkpoints, read_only=True))
'''
TODO Implement https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Probe.md:
livenessProbe:
httpGet:
path: /api/health/live
port: http
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 5
httpGet:
path: /api/health/ready
port: http
'''
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(triton_pipeline, __file__ + '.tar.gz')
| deepops-master | workloads/examples/k8s/kubeflow-pipeline-deploy/triton.py |
deepops-master | workloads/examples/k8s/kubeflow-pipeline-deploy/__init__.py |
|
#!/usr/bin/env python3
import kfp.dsl as dsl
from kubernetes import client as k8s_client
import yaml
__TRITON_CONTAINER_VERSION__ = 'nvcr.io/nvidia/tritonserver:21.02-py3'
__TRITON_POD_LABEL__ = 'triton-kubeflow'
__TRITON_SERVICE_MANIFEST___ = '''
apiVersion: v1
kind: Service
metadata:
name: {}
spec:
selector:
app: {}
ports:
- name: http
protocol: TCP
port: 8000
targetPort: 8000
nodePort: 30800
- name: grpc
port: 8001
targetPort: 8001
nodePort: 30801
- name: metrics
port: 8002
targetPort: 8002
nodePort: 30802
type: NodePort
'''.format(__TRITON_POD_LABEL__, __TRITON_POD_LABEL__)
class ObjectDict(dict):
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("No such attribute: " + name)
class TritonVolume(dsl.ResourceOp):
'''Initialize a volume if one does not exist'''
def __init__(self, name, pv_name):
super(TritonVolume, self).__init__(
k8s_resource=k8s_client.V1PersistentVolumeClaim(
api_version="v1", kind="PersistentVolumeClaim",
metadata=k8s_client.V1ObjectMeta(name=pv_name),
spec=k8s_client.V1PersistentVolumeClaimSpec(
access_modes=['ReadWriteMany'], resources=k8s_client.V1ResourceRequirements(
requests={'storage': '2000Gi'}),
storage_class_name="nfs-client")),
action='apply',
name=name
)
name=name
class TritonDownload(dsl.ContainerOp):
'''Download example Triton models and move them into the PV'''
def __init__(self, name, models):
cmd = ["/bin/bash", "-cx"]
arguments = ["cd /tmp; git clone https://github.com/triton-inference-server/server.git; " \
"cd server/docs/examples; ./fetch_models.sh; cd model_repository; cp -a . " + str(models)]
super(TritonDownload, self).__init__(
name=name,
image=__TRITON_CONTAINER_VERSION__,
command=cmd,
arguments=arguments,
file_outputs={}
)
self.pod_labels['app'] = __TRITON_POD_LABEL__
name=name
class TritonDeploy(dsl.ContainerOp):
'''Deploy Triton'''
def __init__(self, name, models):
cmd = ["/bin/bash", "-cx"]
arguments = ["echo Deploying: " + str(models) + ";ls /data; ls /results; ls /checkpoints; tritonserver --model-store=" + models]
super(TritonDeploy, self).__init__(
name=name,
image=__TRITON_CONTAINER_VERSION__,
command=cmd,
arguments=arguments,
file_outputs={}
)
self.pod_labels['app'] = __TRITON_POD_LABEL__
name=name
class TritonService(dsl.ResourceOp):
'''Launch Triton Service'''
def __init__(self, name):
super(TritonService, self).__init__(
name=name,
k8s_resource=yaml.load(__TRITON_SERVICE_MANIFEST___),
action='create'
)
| deepops-master | workloads/examples/k8s/kubeflow-pipeline-deploy/triton_ops.py |
import kfp
import kfp_server_api
import json
import time
# Define and build a Kubeflow Pipeline
@kfp.dsl.pipeline(
name="kubeflow-quick-test",
description="Verify Kubeflow can launch a container via a pipeline")
def test_kubeflow_op():
op = kfp.dsl.ContainerOp(
name='kubeflow-test-op',
image='busybox',
command=["/bin/sh", "-cx"],
arguments=["echo 'Container started!'"],
file_outputs={}
)
kfp.compiler.Compiler().compile(test_kubeflow_op, 'kubeflow-test.yml')
# Connect to Kubeflow and create job, this simply rungs RAPIDS and prints out a message
while True:
time.sleep(30) # Occassionally Kubeflow fails to respond even when all deployments are up. I don't know why, sometimes it is a 403, sometimes a 500, and sometimes it works. So we will just wait and re-try until the test/script times out.
try:
print("Submitting Kubeflow pipeline")
run_result = kfp.Client(host="10.0.0.2:31380").create_run_from_pipeline_package('kubeflow-test.yml', arguments={})
break # This means it worked!
except kfp_server_api.rest.ApiException as e:
print("Hit an error, waiting and trying again: {}".format(e))
for i in range(70): # The test eventually times out. So we run a little longer than that. This accounts mostly for NGC download time.
print("Polling for pipeline status: {} - {}".format(run_result, i))
run = kfp.Client(host="10.0.0.2:31380").get_run(run_result.run_id).run
if run.status == "Succeeded":
print("SUCCESS: Kubeflow launched a container successfully")
break
print("Got {}, waiting some more... {}".format(run.status, run))
time.sleep(10) # Wait 10 seconds and poll
| deepops-master | workloads/jenkins/scripts/test-kubeflow-pipeline.py |
#!/usr/bin/env python
'''Because there is currently no clean way to update this config through ks this script exists.
The purpose of this script is to dynamically update Kubeflow to point at the latest NGC containers.
In addition to that it changes default resource requests to optimize for GPUs
TODO: Do this with Ansible
'''
#import requests
import json
import urllib2
import logging
import yaml
import os
NVCR = 'nvcr.io'
try:
KF_DIR = os.environ['KF_DIR']
except OSError as e:
logging.error("Could not locate KF_DIR: {}".format(e))
exit()
def get_images(url='https://api.ngc.nvidia.com/v2/repos', number_tags=5):
images = []
# Get response from Registry
try:
req = urllib2.Request(url)
repos = urllib2.urlopen(req)
except Exception as e:
logging.error("Failed to get repos {}".format(e)) # Fail on non-200 status code or other issues
return 1
# Parse Registry response
try:
repos = json.loads(repos.read())
except Exception as e:
logging.error("Failed to parse NGC response")
return 1
if 'repositories' not in repos:
loggging.warn("no repositories listed")
return 1
# Iterate through registry response
for repo in repos['repositories']:
if 'tags' not in repo or \
'namespace' not in repo or \
'name' not in repo:
continue
count = 0
for tag in repo['tags']:
images.append((repo['namespace'], repo['name'],tag))
count += 1
if count >= number_tags:
break
return map(lambda x : "{}/{}/{}:{}".format(NVCR, x[0], x[1], x[2]), images)
def update_yaml(images, yaml_file):
with open(yaml_file, 'r') as fname:
config = yaml.load(fname.read(), Loader=yaml.FullLoader)
ui_config = yaml.load(config['data']['spawner_ui_config.yaml'], Loader=yaml.FullLoader)
# XXX: the yaml file doesn't read in properly due to the line 'spawner_ui_config.yaml: |'. So we pull it out and put it back later.
config['data']['spawner_ui_config.yaml'] = ui_config
# Update YAML file with NVIDIA default config and first 3 tags of all NGC containers
try:
config['data']['spawner_ui_config.yaml']['spawnerFormDefaults']['extraResources']['value'] = '{"nvidia.com/gpu": 1}'
config['data']['spawner_ui_config.yaml']['spawnerFormDefaults']['image']['value'] = images[0]
config['data']['spawner_ui_config.yaml']['spawnerFormDefaults']['image']['options'] = images
except KeyError:
logging.error("Couldn't parse config for update")
return
with open(yaml_file, 'w') as fname:
# When Python reads the Kubeflow YAML in it is having difficulty parsing the | and then removes some quotes. We put it back here.
yaml_string = yaml.dump(config).replace('spawner_ui_config.yaml:', 'spawner_ui_config.yaml: |')
yaml_string = yaml_string.replace("workspace-{notebook-name}", "'workspace-{notebook-name}'")
fname.write(yaml_string)
if __name__ == '__main__':
images = get_images()
# This block of code updates kustomize files, in order for them to take effect you must run kfctl apply
try:
update_yaml(images,
'{}/kustomize/jupyter-web-app/base/config-map.yaml'.format(KF_DIR))
logging.info("Updated KS kustomize code configurations.")
except IOError as e: # the ks_app files may not exist at time of running this
logging.error("Failed to update KS kustomize code configurations: {}".format(e))
| deepops-master | scripts/k8s/update_kubeflow_config.py |
#!/usr/bin/python
import datetime, bisect
def parse_timestamp(raw_str):
tokens = raw_str.split()
if len(tokens) == 1:
if tokens[0].lower() == 'never':
return 'never';
else:
raise Exception('Parse error in timestamp')
elif len(tokens) == 3:
return datetime.datetime.strptime(' '.join(tokens[1:]),
'%Y/%m/%d %H:%M:%S')
else:
raise Exception('Parse error in timestamp')
def timestamp_is_ge(t1, t2):
if t1 == 'never':
return True
elif t2 == 'never':
return False
else:
return t1 >= t2
def timestamp_is_lt(t1, t2):
if t1 == 'never':
return False
elif t2 == 'never':
return t1 != 'never'
else:
return t1 < t2
def timestamp_is_between(t, tstart, tend):
return timestamp_is_ge(t, tstart) and timestamp_is_lt(t, tend)
def parse_hardware(raw_str):
tokens = raw_str.split()
if len(tokens) == 2:
return tokens[1]
else:
raise Exception('Parse error in hardware')
def strip_endquotes(raw_str):
return raw_str.strip('"')
def identity(raw_str):
return raw_str
def parse_binding_state(raw_str):
tokens = raw_str.split()
if len(tokens) == 2:
return tokens[1]
else:
raise Exception('Parse error in binding state')
def parse_next_binding_state(raw_str):
tokens = raw_str.split()
if len(tokens) == 3:
return tokens[2]
else:
raise Exception('Parse error in next binding state')
def parse_rewind_binding_state(raw_str):
tokens = raw_str.split()
if len(tokens) == 3:
return tokens[2]
else:
raise Exception('Parse error in next binding state')
def parse_leases_file(leases_file):
valid_keys = {
'starts': parse_timestamp,
'ends': parse_timestamp,
'tstp': parse_timestamp,
'tsfp': parse_timestamp,
'atsfp': parse_timestamp,
'cltt': parse_timestamp,
'hardware': parse_hardware,
'binding': parse_binding_state,
'next': parse_next_binding_state,
'rewind': parse_rewind_binding_state,
'uid': strip_endquotes,
'client-hostname': strip_endquotes,
'option': identity,
'set': identity,
'on': identity,
'abandoned': None,
'bootp': None,
'reserved': None,
}
leases_db = {}
lease_rec = {}
in_lease = False
in_failover = False
for line in leases_file:
if line.lstrip().startswith('#'):
continue
tokens = line.split()
if len(tokens) == 0:
continue
key = tokens[0].lower()
if key == 'lease':
if not in_lease:
ip_address = tokens[1]
lease_rec = {'ip_address' : ip_address}
in_lease = True
else:
raise Exception('Parse error in leases file')
elif key == 'failover':
in_failover = True
elif key == '}':
if in_lease:
for k in valid_keys:
if callable(valid_keys[k]):
lease_rec[k] = lease_rec.get(k, '')
else:
lease_rec[k] = False
ip_address = lease_rec['ip_address']
if ip_address in leases_db:
leases_db[ip_address].insert(0, lease_rec)
else:
leases_db[ip_address] = [lease_rec]
lease_rec = {}
in_lease = False
elif in_failover:
in_failover = False
continue
else:
raise Exception('Parse error in leases file')
elif key in valid_keys:
if in_lease:
value = line[(line.index(key) + len(key)):]
value = value.strip().rstrip(';').rstrip()
if callable(valid_keys[key]):
lease_rec[key] = valid_keys[key](value)
else:
lease_rec[key] = True
else:
raise Exception('Parse error in leases file')
else:
if in_lease:
raise Exception('Parse error in leases file')
if in_lease:
raise Exception('Parse error in leases file')
return leases_db
def round_timedelta(tdelta):
return datetime.timedelta(tdelta.days,
tdelta.seconds + (0 if tdelta.microseconds < 500000 else 1))
def timestamp_now():
n = datetime.datetime.utcnow()
return datetime.datetime(n.year, n.month, n.day, n.hour, n.minute,
n.second + (0 if n.microsecond < 500000 else 1))
def lease_is_active(lease_rec, as_of_ts):
return timestamp_is_between(as_of_ts, lease_rec['starts'],
lease_rec['ends'])
def ipv4_to_int(ipv4_addr):
parts = ipv4_addr.split('.')
return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
(int(parts[2]) << 8) + int(parts[3])
def select_active_leases(leases_db, as_of_ts):
retarray = []
sortedarray = []
for ip_address in leases_db:
lease_rec = leases_db[ip_address][0]
if lease_is_active(lease_rec, as_of_ts):
ip_as_int = ipv4_to_int(ip_address)
insertpos = bisect.bisect(sortedarray, ip_as_int)
sortedarray.insert(insertpos, ip_as_int)
retarray.insert(insertpos, lease_rec)
return retarray
##############################################################################
myfile = open('/var/lib/dhcp/dhcpd.leases', 'r')
leases = parse_leases_file(myfile)
myfile.close()
now = timestamp_now()
report_dataset = select_active_leases(leases, now)
print('+------------------------------------------------------------------------------')
print('| DHCPD ACTIVE LEASES REPORT')
print('+-----------------+-------------------+----------------------+-----------------')
print('| IP Address | MAC Address | Expires (days,H:M:S) | Client Hostname ')
print('+-----------------+-------------------+----------------------+-----------------')
for lease in report_dataset:
print('| ' + format(lease['ip_address'], '<15') + ' | ' + \
format(lease['hardware'], '<17') + ' | ' + \
format(str((lease['ends'] - now) if lease['ends'] != 'never' else 'never'), '>20') + ' | ' + \
lease['client-hostname'])
print('+-----------------+-------------------+----------------------+-----------------')
print('| Total Active Leases: ' + str(len(report_dataset)))
print('| Report generated (UTC): ' + str(now))
print('+------------------------------------------------------------------------------')
| deepops-master | src/containers/dgxie/get_hosts.py |
#!/usr/bin/python
from flask import Flask, abort, request
import json
import datetime
import re
import os
app = Flask(__name__)
@app.route('/v1/boot/<mac>')
def pxe(mac):
'''See https://github.com/danderson/netboot/blob/master/pixiecore/README.api.md for API specs'''
# load machine profiles for each call so we can re-load changes from disk
jf = open('/etc/machines/machines.json', 'r')
machines = json.load(jf)
jf.close()
if "HTTP_PORT" in os.environ.keys():
http_port = os.environ['HTTP_PORT']
else:
http_port = "13370"
# return profile in json for matching machine
for machine in machines:
if 'mac' in machines[machine] and re.match(machines[machine]['mac'], mac):
machines[machine]['mac'] = mac
machines[machine]['kernel'] = machines[machine]['kernel'].replace("$HTTP_PORT", http_port)
if 'cmdline' in machines[machine]:
machines[machine]['cmdline'] = machines[machine]['cmdline'].replace("$HTTP_PORT", http_port)
if 'initrd' in machines[machine]:
for i in range(len(machines[machine]['initrd'])):
machines[machine]['initrd'][i] = machines[machine]['initrd'][i].replace("$HTTP_PORT", http_port)
return json.dumps(machines[machine])
abort(404)
@app.route('/install', methods=['POST'])
def install():
if request.method == 'POST':
timestamp = datetime.datetime.now()
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
action = request.form['action']
print timestamp, ip
return 'done'
if __name__ == '__main__':
app.run(port=9090, threaded=True)
| deepops-master | src/containers/dgxie/api.py |
#!/usr/bin/python
from flask import Flask, request
from subprocess import check_output as run
import datetime
file = "/www/install.log"
with open(file, "a") as install_file:
install_file.write("== LOG OPENED ==\n")
app = Flask(__name__)
@app.route('/hosts')
def hosts():
return run("/usr/local/bin/get_hosts.py")
@app.route('/log')
def log():
f = open(file, 'r')
return f.read()
@app.route('/install', methods=['POST'])
def install():
if request.method == 'POST':
with open(file, "a") as install_file:
timestamp = datetime.datetime.now()
ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
action = request.form['action']
print timestamp, ip
install_file.write("%s: %s - %s\n" % (timestamp, action, ip))
return 'done'
if __name__ == '__main__':
app.run(port=5000, threaded=True)
#TODO:
# add start/end log entries for install
# busybox doesn't have curl, but can use wget:
# wget --post-data "ip=1.2.3.4" 192.168.1.1/install
| deepops-master | src/containers/dgxie/rest_api.py |
#!/usr/bin/env python
"""
Get a list of Ansible playbooks and roles that have changes staged in Git.
Run ansible-lint on only those playbooks and roles.
"""
from __future__ import print_function
import subprocess
import re
import sys
def get_changed_ansible_paths():
"""
Get a list of playbook files and role directories that are staged for commit
"""
git_diff = subprocess.check_output("git diff --name-only --cached".split())
ansible_lint_paths_to_check = []
for f in git_diff.split("\n"):
# Add playbook files
if re.match(r"^playbooks/.*(yml|yaml)$", f):
ansible_lint_paths_to_check.append(f)
# Add role directories
role_match = re.match(r"^roles/(\w+)/.*", f)
if role_match:
ansible_lint_paths_to_check.append(
"roles/{}".format(role_match.group(1)))
return ansible_lint_paths_to_check
def run_ansible_lint(paths):
cmd = ["ansible-lint", "-c" "src/repo/ansible-lint"] + paths
return subprocess.call(cmd)
if __name__ == "__main__":
changed = get_changed_ansible_paths()
if len(changed) > 0:
sys.exit(run_ansible_lint(changed))
| deepops-master | src/repo/githooks/check-ansible.py |
#!/usr/bin/env python
"""
Get a list of changed python scripts that are staged for commit.
Run shellcheck on only those files.
"""
from __future__ import print_function
import subprocess
import re
import sys
def get_changed_paths():
git_diff = subprocess.check_output("git diff --name-only --cached".split())
paths = []
for f in git_diff.split("\n"):
# Add playbook files
if re.match(r".*(\.py)$", f):
paths.append(f)
return paths
def run_lint(paths):
cmd = ["pylint", "-rn", "-sn", "-d", "R,C"] + paths
return subprocess.call(cmd)
if __name__ == "__main__":
changed = get_changed_paths()
if len(changed) > 0:
sys.exit(run_lint(changed))
| deepops-master | src/repo/githooks/check-python.py |
#!/usr/bin/env python
"""
Get a list of changed bash scripts that are staged for commit.
Run shellcheck on only those files.
"""
from __future__ import print_function
import subprocess
import re
import sys
def get_changed_shell_paths():
git_diff = subprocess.check_output("git diff --name-only --cached".split())
paths = []
for f in git_diff.split("\n"):
# Add playbook files
if re.match(r".*(\.sh|\.bash)$", f):
paths.append(f)
return paths
def run_lint(paths):
cmd = ["shellcheck", "-x"] + paths
return subprocess.call(cmd)
if __name__ == "__main__":
changed = get_changed_shell_paths()
if len(changed) > 0:
sys.exit(run_lint(changed))
| deepops-master | src/repo/githooks/check-shell.py |
#!/usr/bin/python3
#*****************************************************************************
# Copyright 2020 NVIDIA Corporation. All rights reserved.
#*****************************************************************************
import subprocess
import argparse
import datetime
import json
import time
def get_options():
parser = argparse.ArgumentParser(
description='Provision a Kubernetes cluster in GKE.')
parser.add_argument(
'-c', '--cluster', type=str, default=None,
help='K8s cluster to configure'
)
parser.add_argument(
'-i', '--image', type=str, default='',
help='Base distro OS image used in nodes.'
)
parser.add_argument(
'-z', '--zone', type=str, default=None,
help='Zone where the GPU cluster is running in.'
)
args = parser.parse_args()
return args
def run_cmd(cmd):
output = ''
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
print("Error running command: {}".format(cmd))
return output
def wait_for_gpus(cluster_name, timeout=datetime.timedelta(minutes=15)):
''' Wait until nodes are available in GPU cluster. '''
cmd = [
'kubectl', 'get', 'nodes',
'-l', 'cloud.google.com/gke-nodepool={}-gpu-pool'.format(cluster_name),
'-o=json'
]
end_time = datetime.datetime.now() + timeout
print('Waiting for GPUs to be ready ', end='')
while datetime.datetime.now() <= end_time:
output = run_cmd(cmd)
items = json.loads(output.decode('UTF-8')).get("items", [])
for i in items:
gpus = int(i['status']['capacity'].get('nvidia.com/gpu', '0'))
if gpus > 0:
print('OK')
return
print('.', end='')
time.sleep(10)
if __name__ == '__main__':
opts = get_options()
print('Getting credentials for cluster ...')
run_cmd(['gcloud', 'container', 'clusters', 'get-credentials', opts.cluster, '--zone', opts.zone])
print('Enabling Application CRD...')
app_crd_path = 'https://raw.githubusercontent.com/GoogleCloudPlatform/marketplace-k8s-app-tools/master/crd/app-crd.yaml'
run_cmd(['kubectl', 'apply', '-f', app_crd_path])
print('Enabling GPUs in GPU cluster...')
nv_daemonset = 'https://raw.githubusercontent.com/GoogleCloudPlatform/container-engine-accelerators/master/nvidia-driver-installer/cos/daemonset-preloaded.yaml'
run_cmd(['kubectl', 'apply', '-f', nv_daemonset])
wait_for_gpus(opts.cluster)
| nvindex-cloud-master | provision/gke/finalize.py |
import websocket
import random
import logging
import sys
import json
import base64
import ssl
import time
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def get_websocket(ws_cmd_url, credentials=None):
""" Generate a WebSocket connection to the service """
if credentials:
b64 = base64.b64encode(f"{credentials[0]}:{credentials[1]}".encode()).decode("ascii")
header = {"Authorization": f"Basic {b64}"}
else:
header = {}
return websocket.create_connection(ws_cmd_url, suppress_origin=True, header=header, sslopt={"cert_reqs": ssl.CERT_NONE})
def send_jsonrpc_command(websocket: websocket.WebSocket, cmd: dict) -> tuple[bool, dict]:
""" Send a jsonrpc command, wait for the response and return it. """
cmd_id = random.randint(10000, 90000)
cmd_json = {
"jsonrpc": "2.0",
"id": cmd_id,
**cmd
}
logging.debug(f"sending cmd: {cmd} id: {cmd_id}")
websocket.send(json.dumps(cmd_json))
while True:
ret = websocket.recv()
ret_json = json.loads(ret)
if ret_json["id"] == cmd_id:
return "result" in ret_json, ret_json
else:
# print('waiting for ', my_cmd_id)
pass
def wait_for_data_to_load(websocket: websocket.WebSocket, retries=60*10, sleep_time_s=2) -> bool:
cmd_json = {
"method": "nv::index::app::perflab.get_performance_value",
"params": {
"query_performance_key_list": False,
"query_performance_value": ["nb_subcubes_rendered", "frames_per_second"]
}
}
while retries > 0:
_, res = send_jsonrpc_command(websocket, cmd_json)
values = res.get("result", {}).get("performance_value", {})
if int(values.get("nb_subcubes_rendered", 0)) > 0 and float(values.get("frames_per_second", 0.0)) > 2.5:
return True
time.sleep(sleep_time_s)
return False
| nvindex-cloud-master | notebooks/nvindex_util.py |
#!/usr/bin/env python2
import sys, subprocess
if len(sys.argv) > 2:
ifile = sys.argv[1]
encopt = sys.argv[2:-1]
ofile = sys.argv[-1]
else:
print 'usage: %s <input> [encode_options] <output>' % sys.argv[0]
sys.exit(1)
analysis_cmd = 'ffprobe -v error -of compact=p=0:nk=1 '
analysis_cmd += '-show_entries frame_tags=lavfi.r128.I -f lavfi '
analysis_cmd += "amovie='%s',ebur128=metadata=1" % ifile
try:
probe_out = subprocess.check_output(analysis_cmd, shell=True)
except subprocess.CalledProcessError, e:
sys.exit(e.returncode)
loudness = ref = -23
for line in probe_out.splitlines():
sline = line.rstrip()
if sline:
loudness = sline
adjust = ref - float(loudness)
if abs(adjust) < 0.0001:
print 'No normalization needed for ' + ifile
else:
print "Adjust %s by %.1fdB" % (ifile, adjust)
norm_cmd = ['ffmpeg', '-i', ifile, '-af', 'volume=%fdB' % adjust]
norm_cmd += encopt + [ofile]
print ' => %s' % ' '.join(norm_cmd)
subprocess.call(norm_cmd)
| GMAT-main | ffmpeg-gpu/tools/normalize.py |
#!/usr/bin/env python2
import sys, zmq, cmd
class LavfiCmd(cmd.Cmd):
prompt = 'lavfi> '
def __init__(self, bind_address):
context = zmq.Context()
self.requester = context.socket(zmq.REQ)
self.requester.connect(bind_address)
cmd.Cmd.__init__(self)
def onecmd(self, cmd):
if cmd == 'EOF':
sys.exit(0)
print 'Sending command:[%s]' % cmd
self.requester.send(cmd)
message = self.requester.recv()
print 'Received reply:[%s]' % message
try:
bind_address = sys.argv[1] if len(sys.argv) > 1 else "tcp://localhost:5555"
LavfiCmd(bind_address).cmdloop('FFmpeg libavfilter interactive shell')
except KeyboardInterrupt:
pass
| GMAT-main | ffmpeg-gpu/tools/zmqshell.py |
# Copyright (c) 2019 Guo Yejun
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
import tensorflow as tf
import numpy as np
import sys, struct
import convert_header as header
__all__ = ['convert_from_tensorflow']
class Operand(object):
IOTYPE_INPUT = 1
IOTYPE_OUTPUT = 2
IOTYPE_INTERMEDIATE = IOTYPE_INPUT | IOTYPE_OUTPUT
DTYPE_FLOAT = 1
DTYPE_UINT8 = 4
index = 0
def __init__(self, name, dtype, dims):
self.name = name
self.dtype = dtype
self.dims = dims
self.iotype = 0
self.used_count = 0
self.index = Operand.index
Operand.index = Operand.index + 1
self.iotype2str = {Operand.IOTYPE_INPUT: 'in', Operand.IOTYPE_OUTPUT: 'out', Operand.IOTYPE_INTERMEDIATE: 'inout'}
self.dtype2str = {Operand.DTYPE_FLOAT: 'DT_FLOAT', Operand.DTYPE_UINT8: 'DT_UINT8'}
def add_iotype(self, iotype):
self.iotype = self.iotype | iotype
if iotype == Operand.IOTYPE_INPUT:
self.used_count = self.used_count + 1
def __str__(self):
return "{}: (name: {}, iotype: {}, dtype: {}, dims: {}, used_count: {})".format(self.index,
self.name, self.iotype2str[self.iotype], self.dtype2str[self.dtype],
self.dims, self.used_count)
def __lt__(self, other):
return self.index < other.index
class TFConverter:
def __init__(self, graph_def, nodes, outfile, dump4tb):
self.graph_def = graph_def
self.nodes = nodes
self.outfile = outfile
self.dump4tb = dump4tb
self.layer_number = 0
self.output_names = []
self.name_node_dict = {}
self.edges = {}
self.conv_activations = {'Relu':0, 'Tanh':1, 'Sigmoid':2, 'None':3, 'LeakyRelu':4}
self.conv_paddings = {'VALID':0, 'SAME':1}
self.pool_paddings = {'VALID':0, 'SAME':1}
self.converted_nodes = set()
self.conv2d_scope_names = set()
self.conv2d_scopename_inputname_dict = {}
self.dense_scope_names = set()
self.dense_scopename_inputname_dict = {}
self.op2code = {'Conv2D':1, 'DepthToSpace':2, 'MirrorPad':3, 'Maximum':4,
'MathBinary':5, 'MathUnary':6, 'AvgPool':7, 'MatMul':8}
self.mathbin2code = {'Sub':0, 'Add':1, 'Mul':2, 'RealDiv':3, 'Minimum':4, 'FloorMod':5}
self.mathun2code = {'Abs':0, 'Sin':1, 'Cos':2, 'Tan':3, 'Asin':4,
'Acos':5, 'Atan':6, 'Sinh':7, 'Cosh':8, 'Tanh':9, 'Asinh':10,
'Acosh':11, 'Atanh':12, 'Ceil':13, 'Floor':14, 'Round':15,
'Exp':16}
self.mirrorpad_mode = {'CONSTANT':0, 'REFLECT':1, 'SYMMETRIC':2}
self.name_operand_dict = {}
def add_operand(self, name, type):
node = self.name_node_dict[name]
if name not in self.name_operand_dict:
dtype = node.attr['dtype'].type
if dtype == 0:
dtype = node.attr['T'].type
dims = [-1,-1,-1,-1]
if 'shape' in node.attr:
dims[0] = node.attr['shape'].shape.dim[0].size
dims[1] = node.attr['shape'].shape.dim[1].size
dims[2] = node.attr['shape'].shape.dim[2].size
dims[3] = node.attr['shape'].shape.dim[3].size
operand = Operand(name, dtype, dims)
self.name_operand_dict[name] = operand;
self.name_operand_dict[name].add_iotype(type)
return self.name_operand_dict[name].index
def dump_for_tensorboard(self):
graph = tf.get_default_graph()
tf.import_graph_def(self.graph_def, name="")
tf.summary.FileWriter('/tmp/graph', graph)
print('graph saved, run "tensorboard --logdir=/tmp/graph" to see it')
def get_conv2d_params(self, conv2d_scope_name):
knode = self.name_node_dict[conv2d_scope_name + '/kernel']
bnode = self.name_node_dict[conv2d_scope_name + '/bias']
if conv2d_scope_name + '/dilation_rate' in self.name_node_dict:
dnode = self.name_node_dict[conv2d_scope_name + '/dilation_rate']
else:
dnode = None
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
if conv2d_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[conv2d_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, dnode, anode
def get_dense_params(self, dense_scope_name):
knode = self.name_node_dict[dense_scope_name + '/kernel']
bnode = self.name_node_dict.get(dense_scope_name + '/bias')
# the BiasAdd name is possible be changed into the output name,
# if activation is None, and BiasAdd.next is the last op which is Identity
anode = None
if bnode:
if dense_scope_name + '/BiasAdd' in self.edges:
anode = self.edges[dense_scope_name + '/BiasAdd'][0]
if anode.op not in self.conv_activations:
anode = None
else:
anode = None
return knode, bnode, anode
def dump_complex_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, dnode for dilation, anode for activation
knode, bnode, dnode, anode = self.get_conv2d_params(scope_name)
if dnode is not None:
dilation = struct.unpack('i', dnode.attr['value'].tensor.tensor_content[0:4])[0]
else:
dilation = 1
if anode is not None:
activation = anode.op
else:
activation = 'None'
padding = node.attr['padding'].s.decode("utf-8")
# conv2d with dilation > 1 generates tens of nodes, not easy to parse them, so use this tricky method.
if dilation > 1 and scope_name + '/stack' in self.name_node_dict:
if self.name_node_dict[scope_name + '/stack'].op == "Const":
padding = 'SAME'
padding = self.conv_paddings[padding]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 1
np.array([self.op2code[node.op], dilation, padding, self.conv_activations[activation], in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
f.write(bias)
input_name = self.conv2d_scopename_inputname_dict[scope_name]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_dense_to_file(self, node, f):
assert(node.op == 'MatMul')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
scope_name = TFConverter.get_scope_name(node.name)
#knode for kernel, bnode for bias, anode for activation
knode, bnode, anode = self.get_dense_params(scope_name.split('/')[0])
if bnode is not None:
has_bias = 1
btensor = bnode.attr['value'].tensor
if btensor.tensor_shape.dim[0].size == 1:
bias = struct.pack("f", btensor.float_val[0])
else:
bias = btensor.tensor_content
else:
has_bias = 0
if anode is not None:
activation = anode.op
else:
activation = 'None'
ktensor = knode.attr['value'].tensor
in_channels = ktensor.tensor_shape.dim[0].size
out_channels = ktensor.tensor_shape.dim[1].size
if in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(in_channels, out_channels)
kernel = np.transpose(kernel, [1, 0])
np.array([self.op2code[node.op], self.conv_activations[activation], in_channels, out_channels, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
if has_bias:
f.write(bias)
input_name = self.dense_scopename_inputname_dict[scope_name.split('/')[0]]
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
if anode is not None:
output_operand_index = self.add_operand(anode.name, Operand.IOTYPE_OUTPUT)
else:
if bnode is not None:
output_operand_index = self.add_operand(self.edges[bnode.name][0].name, Operand.IOTYPE_OUTPUT)
else:
output_operand_index = self.add_operand(self.edges[scope_name+'/concat_1'][0].name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_simple_conv2d_to_file(self, node, f):
assert(node.op == 'Conv2D')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
node1 = self.name_node_dict[node.input[1]]
if node0.op == 'Const':
knode = node0
input_name = node.input[1]
else:
knode = node1
input_name = node.input[0]
ktensor = knode.attr['value'].tensor
filter_height = ktensor.tensor_shape.dim[0].size
filter_width = ktensor.tensor_shape.dim[1].size
in_channels = ktensor.tensor_shape.dim[2].size
out_channels = ktensor.tensor_shape.dim[3].size
if filter_height * filter_width * in_channels * out_channels == 1:
kernel = np.float32(ktensor.float_val[0])
else:
kernel = np.frombuffer(ktensor.tensor_content, dtype=np.float32)
kernel = kernel.reshape(filter_height, filter_width, in_channels, out_channels)
kernel = np.transpose(kernel, [3, 0, 1, 2])
has_bias = 0
dilation = 1
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], dilation, self.conv_paddings[padding], self.conv_activations['None'],
in_channels, out_channels, filter_height, has_bias], dtype=np.uint32).tofile(f)
kernel.tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_depth2space_to_file(self, node, f):
assert(node.op == 'DepthToSpace')
self.layer_number = self.layer_number + 1
block_size = node.attr['block_size'].i
np.array([self.op2code[node.op], block_size], dtype=np.uint32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mirrorpad_to_file(self, node, f):
assert(node.op == 'MirrorPad')
self.layer_number = self.layer_number + 1
mode = node.attr['mode'].s
mode = self.mirrorpad_mode[mode.decode("utf-8")]
np.array([self.op2code[node.op], mode], dtype=np.uint32).tofile(f)
pnode = self.name_node_dict[node.input[1]]
self.converted_nodes.add(pnode.name)
paddings = pnode.attr['value'].tensor.tensor_content
f.write(paddings)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_maximum_to_file(self, node, f):
assert(node.op == 'Maximum')
self.layer_number = self.layer_number + 1
ynode = self.name_node_dict[node.input[1]]
y = ynode.attr['value'].tensor.float_val[0]
np.array([self.op2code[node.op]], dtype=np.uint32).tofile(f)
np.array([y], dtype=np.float32).tofile(f)
self.converted_nodes.add(node.name)
input_operand_index = self.add_operand(node.input[0], Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathbinary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
i1_node = self.name_node_dict[node.input[1]]
np.array([self.op2code['MathBinary'], self.mathbin2code[node.op]], dtype=np.uint32).tofile(f)
if i0_node.op == 'Const':
scalar = i0_node.attr['value'].tensor.float_val[0]
np.array([1], dtype=np.uint32).tofile(f) # broadcast: 1
np.array([scalar], dtype=np.float32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f) # broadcast: 0
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
elif i1_node.op == 'Const':
scalar = i1_node.attr['value'].tensor.float_val[0]
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([1], dtype=np.uint32).tofile(f)
np.array([scalar], dtype=np.float32).tofile(f)
else:
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
np.array([0], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i1_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index], dtype=np.uint32).tofile(f)
def dump_mathunary_to_file(self, node, f):
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
i0_node = self.name_node_dict[node.input[0]]
np.array([self.op2code['MathUnary'], self.mathun2code[node.op]], dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(i0_node.name, Operand.IOTYPE_INPUT)
np.array([input_operand_index], dtype=np.uint32).tofile(f)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([output_operand_index],dtype=np.uint32).tofile(f)
def dump_avg_pool_to_file(self, node, f):
assert(node.op == 'AvgPool')
self.layer_number = self.layer_number + 1
self.converted_nodes.add(node.name)
node0 = self.name_node_dict[node.input[0]]
strides = node.attr['strides']
# Tensorflow do not support pooling strides in batch dimension and
# current native NN do not support pooling strides in channel dimension, added assert() here.
assert(strides.list.i[1]==strides.list.i[2])
assert(strides.list.i[0]==1)
assert(strides.list.i[3]==1)
strides = strides.list.i[1]
filter_node = node.attr['ksize']
input_name = node.input[0]
# Tensorflow do not support pooling ksize in batch dimension and channel dimension.
assert(filter_node.list.i[0]==1)
assert(filter_node.list.i[3]==1)
filter_height = filter_node.list.i[1]
filter_width = filter_node.list.i[2]
padding = node.attr['padding'].s.decode("utf-8")
np.array([self.op2code[node.op], strides, self.pool_paddings[padding], filter_height],
dtype=np.uint32).tofile(f)
input_operand_index = self.add_operand(input_name, Operand.IOTYPE_INPUT)
output_operand_index = self.add_operand(node.name, Operand.IOTYPE_OUTPUT)
np.array([input_operand_index, output_operand_index],dtype=np.uint32).tofile(f)
def dump_layers_to_file(self, f):
for node in self.nodes:
if node.name in self.converted_nodes:
continue
# conv2d with dilation generates very complex nodes, so handle it in special
if self.in_conv2d_scope(node.name):
if node.op == 'Conv2D':
self.dump_complex_conv2d_to_file(node, f)
continue
if self.in_dense_scope(node.name):
if node.op == 'MatMul':
self.dump_dense_to_file(node, f)
continue
if node.op == 'Conv2D':
self.dump_simple_conv2d_to_file(node, f)
continue
if node.name in self.output_names:
input_name = self.id_different_scope_dict[node.name]
if TFConverter.get_scope_name(input_name)!=TFConverter.get_scope_name(node.name):
continue
if node.op == 'AvgPool':
self.dump_avg_pool_to_file(node, f)
elif node.op == 'DepthToSpace':
self.dump_depth2space_to_file(node, f)
elif node.op == 'MirrorPad':
self.dump_mirrorpad_to_file(node, f)
elif node.op == 'Maximum':
self.dump_maximum_to_file(node, f)
elif node.op in self.mathbin2code:
self.dump_mathbinary_to_file(node, f)
elif node.op in self.mathun2code:
self.dump_mathunary_to_file(node, f)
def dump_operands_to_file(self, f):
operands = sorted(self.name_operand_dict.values())
for operand in operands:
#print('{}'.format(operand))
np.array([operand.index, len(operand.name)], dtype=np.uint32).tofile(f)
f.write(operand.name.encode('utf-8'))
np.array([operand.iotype, operand.dtype], dtype=np.uint32).tofile(f)
np.array(operand.dims, dtype=np.uint32).tofile(f)
def dump_to_file(self):
with open(self.outfile, 'wb') as f:
f.write(header.str.encode('utf-8'))
np.array([header.major, header.minor], dtype=np.uint32).tofile(f)
self.dump_layers_to_file(f)
self.dump_operands_to_file(f)
np.array([self.layer_number, len(self.name_operand_dict)], dtype=np.uint32).tofile(f)
def generate_name_node_dict(self):
for node in self.nodes:
self.name_node_dict[node.name] = node
def generate_output_names(self):
used_names = []
for node in self.nodes:
for input in node.input:
used_names.append(input)
for node in self.nodes:
if node.name not in used_names:
self.output_names.append(node.name)
def remove_identity(self):
self.id_different_scope_dict = {}
id_nodes = []
id_dict = {}
for node in self.nodes:
if node.op == 'Identity':
name = node.name
input = node.input[0]
id_nodes.append(node)
# do not change the output name
if name in self.output_names:
self.name_node_dict[input].name = name
self.name_node_dict[name] = self.name_node_dict[input]
del self.name_node_dict[input]
self.id_different_scope_dict[name] = input
else:
id_dict[name] = input
for idnode in id_nodes:
self.nodes.remove(idnode)
for node in self.nodes:
for i in range(len(node.input)):
input = node.input[i]
if input in id_dict:
node.input[i] = id_dict[input]
def generate_edges(self):
for node in self.nodes:
for input in node.input:
if input in self.edges:
self.edges[input].append(node)
else:
self.edges[input] = [node]
@staticmethod
def get_scope_name(name):
index = name.rfind('/')
if index == -1:
return ""
return name[0:index]
def in_conv2d_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.conv2d_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def in_dense_scope(self, name):
inner_scope = TFConverter.get_scope_name(name)
if inner_scope == "":
return False;
for scope in self.dense_scope_names:
index = inner_scope.find(scope)
if index == 0:
return True
return False
def generate_sub_block_op_scope_info(self):
# mostly, conv2d/dense is a sub block in graph, get the scope name
for node in self.nodes:
if node.op == 'Conv2D':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.conv2d is called directly
if scope == '':
continue
# for the case tf.nn.conv2d is called within a scope
if scope + '/kernel' not in self.name_node_dict:
continue
self.conv2d_scope_names.add(scope)
elif node.op == 'MatMul':
scope = TFConverter.get_scope_name(node.name)
# for the case tf.nn.dense is called directly
if scope == '':
continue
# for the case tf.nn.dense is called within a scope
if scope + '/kernel' not in self.name_node_dict and scope.split('/Tensordot')[0] + '/kernel' not in self.name_node_dict:
continue
self.dense_scope_names.add(scope.split('/Tensordot')[0])
# get the input name to the conv2d/dense sub block
for node in self.nodes:
scope = TFConverter.get_scope_name(node.name)
if scope in self.conv2d_scope_names:
if node.op == 'Conv2D' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.conv2d_scopename_inputname_dict[scope] = inp
elif scope in self.dense_scope_names:
if node.op == 'MatMul' or node.op == 'Shape':
for inp in node.input:
if TFConverter.get_scope_name(inp) != scope:
self.dense_scopename_inputname_dict[scope] = inp
elif scope.split('/Tensordot')[0] in self.dense_scope_names:
if node.op == 'Transpose':
for inp in node.input:
if TFConverter.get_scope_name(inp).find(scope)<0 and TFConverter.get_scope_name(inp).find(scope.split('/')[0])<0:
self.dense_scopename_inputname_dict[scope.split('/Tensordot')[0]] = inp
def run(self):
self.generate_name_node_dict()
self.generate_output_names()
self.remove_identity()
self.generate_edges()
self.generate_sub_block_op_scope_info()
if self.dump4tb:
self.dump_for_tensorboard()
self.dump_to_file()
def convert_from_tensorflow(infile, outfile, dump4tb):
with open(infile, 'rb') as f:
# read the file in .proto format
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
nodes = graph_def.node
converter = TFConverter(graph_def, nodes, outfile, dump4tb)
converter.run()
| GMAT-main | ffmpeg-gpu/tools/python/convert_from_tensorflow.py |
# Copyright (c) 2019 Guo Yejun
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
# verified with Python 3.5.2 on Ubuntu 16.04
import argparse
import os
from convert_from_tensorflow import *
def get_arguments():
parser = argparse.ArgumentParser(description='generate native mode model with weights from deep learning model')
parser.add_argument('--outdir', type=str, default='./', help='where to put generated files')
parser.add_argument('--infmt', type=str, default='tensorflow', help='format of the deep learning model')
parser.add_argument('infile', help='path to the deep learning model with weights')
parser.add_argument('--dump4tb', type=str, default='no', help='dump file for visualization in tensorboard')
return parser.parse_args()
def main():
args = get_arguments()
if not os.path.isfile(args.infile):
print('the specified input file %s does not exist' % args.infile)
exit(1)
if not os.path.exists(args.outdir):
print('create output directory %s' % args.outdir)
os.mkdir(args.outdir)
basefile = os.path.split(args.infile)[1]
basefile = os.path.splitext(basefile)[0]
outfile = os.path.join(args.outdir, basefile) + '.model'
dump4tb = False
if args.dump4tb.lower() in ('yes', 'true', 't', 'y', '1'):
dump4tb = True
if args.infmt == 'tensorflow':
convert_from_tensorflow(args.infile, outfile, dump4tb)
if __name__ == '__main__':
main()
| GMAT-main | ffmpeg-gpu/tools/python/convert.py |
# Copyright (c) 2019
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
str = 'FFMPEGDNNNATIVE'
# increase major and reset minor when we have to re-convert the model file
major = 1
# increase minor when we don't have to re-convert the model file
minor = 23
| GMAT-main | ffmpeg-gpu/tools/python/convert_header.py |
# Copyright (c) 2021
#
# This file is part of FFmpeg.
#
# FFmpeg is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# FFmpeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with FFmpeg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ==============================================================================
# verified with Python 3.6.8 on CentOS 7.2
import tensorflow as tf
visible_device_list = '0' # use , separator for more GPUs like '0, 1'
per_process_gpu_memory_fraction = 0.9 # avoid out of memory
intra_op_parallelism_threads = 2 # default in tensorflow
inter_op_parallelism_threads = 5 # default in tensorflow
gpu_options = tf.compat.v1.GPUOptions(
per_process_gpu_memory_fraction = per_process_gpu_memory_fraction,
visible_device_list = visible_device_list,
allow_growth = True)
config = tf.compat.v1.ConfigProto(
allow_soft_placement = True,
log_device_placement = False,
intra_op_parallelism_threads = intra_op_parallelism_threads,
inter_op_parallelism_threads = inter_op_parallelism_threads,
gpu_options = gpu_options)
s = config.SerializeToString()
# print(list(map(hex, s))) # print by json if need
print('a serialized protobuf string for TF_SetConfig, note the byte order is in normal order.')
b = ''.join(format(b,'02x') for b in s)
print('0x%s' % b) # print by hex format
| GMAT-main | ffmpeg-gpu/tools/python/tf_sess_config.py |
import torch
import torch.cuda as cuda
from frame_extractor import FrameExtractor
import threading
from functools import reduce
import time
import sys
import ctypes
cudaFree = ctypes.CDLL('libcudart.so').cudaFree
dev = torch.device("cuda:0")
# initialize cuda runtime
dummy = torch.empty((1,), device=dev)
def extract_proc(file_path, l_n_frame):
# setting cuda context for current context
cudaFree(0)
with open(file_path, 'rb') as mp4:
extractor = FrameExtractor(buffer=mp4.read())
n_frame = 0
bgr = torch.empty((3, extractor.get_height(), extractor.get_width()), dtype=torch.float32, device=dev)
with cuda.stream(cuda.Stream(dev)):
while extractor.extract_to_device_buffer(bgr.data_ptr(), cuda.current_stream().cuda_stream):
n_frame += 1
l_n_frame.append(n_frame)
if __name__ == '__main__':
file_path = '../build/bunny.mp4'
if len(sys.argv) >= 2:
file_path = sys.argv[1]
n_thread = 2
l_n_frame = []
l_thread = []
for i in range(n_thread):
th = threading.Thread(target=extract_proc, args=(file_path, l_n_frame))
l_thread.append(th)
th.start()
t0 = time.time()
for th in l_thread:
th.join()
sum = reduce(lambda x,y:x+y, l_n_frame)
print('sum =', sum, ', fps =', sum / (time.time() - t0))
| GMAT-main | metrans/python/app_extract_perf.py |
import torch
import torch.cuda as cuda
from frame_extractor import FrameExtractor
import ctypes
libnvToolsExt = ctypes.CDLL('libnvToolsExt.so')
nvtxRangePush = libnvToolsExt.nvtxRangePushA
nvtxRangePop = libnvToolsExt.nvtxRangePop
dev = torch.device("cuda:0")
dummy = torch.empty((1,), device=dev)
file_path = '../build/bunny.mp4'
# extractor = FrameExtractor(file_path, None)
with open(file_path, 'rb') as mp4:
extractor = FrameExtractor(buffer=mp4.read())
bgr = torch.empty((3, extractor.get_height(), extractor.get_width()), dtype=torch.float32, device=dev)
extractor.set_frame_interval(10)
n = 0
with open('out.bgrp', 'wb') as f, cuda.stream(cuda.Stream(dev)):
while True:
nvtxRangePush(('Frame#' + str(n)).encode('utf8'))
if not extractor.extract_to_device_buffer(bgr.data_ptr(), cuda.current_stream().cuda_stream):
nvtxRangePop()
break;
n += 1
t = (bgr.cpu() * 255.0).char();
nvtxRangePop()
f.write(t.numpy().tobytes())
| GMAT-main | metrans/python/app_extract.py |
import ctypes
CSwscale = ctypes.CDLL("../build/CSwscale.so")
SwscaleCuda_Nv12ToRgbpf32_Init = CSwscale.SwscaleCuda_Nv12ToRgbpf32_Init
SwscaleCuda_Nv12ToRgbpf32_Convert = CSwscale.SwscaleCuda_Nv12ToRgbpf32_Convert
SwscaleCuda_Nv12ToRgbpf32_Delete = CSwscale.SwscaleCuda_Nv12ToRgbpf32_Delete
SwscaleCuda_Nv12ToRgbpf32_Init.restype = ctypes.c_void_p
class SwscaleCuda:
def __init__(self, w, h):
self.ctx = SwscaleCuda_Nv12ToRgbpf32_Init(ctypes.c_int(w), ctypes.c_int(h))
self.w = w
self.h = h
def __del__(self):
SwscaleCuda_Nv12ToRgbpf32_Delete(ctypes.c_void_p(self.ctx))
# in_nv12 and out_rgbp are pointers to CUDA memory
def nv12_to_rgbpf32(self, in_nv12, in_stride, out_rgbp, out_stride, stream=0):
return SwscaleCuda_Nv12ToRgbpf32_Convert(ctypes.c_void_p(self.ctx), in_nv12, in_stride,
ctypes.c_ulonglong(out_rgbp), ctypes.c_int(out_stride),
self.w, self.h, ctypes.c_ulonglong(stream))
| GMAT-main | metrans/python/swscale.py |
import numpy as np
import torch
import torch.cuda as cuda
from frame_extractor import FrameExtractor
import heif_format
import swscale
import ctypes
libnvToolsExt = ctypes.CDLL('libnvToolsExt.so')
nvtxRangePush = libnvToolsExt.nvtxRangePushA
nvtxRangePop = libnvToolsExt.nvtxRangePop
dev = torch.device("cuda:0")
dummy = torch.empty((1,), device=dev)
file_path = '../build/bunny.mp4'
with open(file_path, 'rb') as mp4:
extractor = FrameExtractor(buffer=mp4.read())
enc = heif_format.NvEncLite(width=extractor.get_width(), height=extractor.get_height())
dec = heif_format.NvDecLite()
nv12 = torch.empty((1, extractor.get_height() * 3 // 2, extractor.get_width()), dtype=torch.uint8, device=dev)
rgbp = torch.empty((3, extractor.get_height(), extractor.get_width()), dtype=torch.float32, device=dev)
scale = swscale.SwscaleCuda(extractor.get_width(), extractor.get_height())
extractor.set_frame_interval(10)
n = 0
with open('bunny.rgb24', 'wb') as f, cuda.stream(cuda.Stream(dev)):
while True:
nvtxRangePush(('Frame#' + str(n)).encode('utf8'))
if not extractor.extract_to_buffer(nv12.data_ptr(), cuda.current_stream().cuda_stream):
nvtxRangePop()
break
n += 1
pkt = heif_format.Packet()
enc.encode_device_frame(nv12.data_ptr(), pkt.v)
writer = heif_format.NvHeifWriter()
img, size = writer.write_stillimage(pkt.v)
reader = heif_format.NvHeifReader(img, size)
pkt_ref, pkt_size = reader.read_image()
frame, width, height, linesize = dec.decode_still(pkt_ref, pkt_size)
scale.nv12_to_rgbpf32(frame, linesize, rgbp.data_ptr(), rgbp.stride(1), cuda.current_stream().cuda_stream)
rgb24 = (rgbp*255.0).permute((1, 2, 0)).cpu().numpy().astype(np.uint8)
if n == 1:
seq = rgb24
else:
seq = np.concatenate([seq, rgb24], axis=0)
seq.tofile('bunny_ext_rgb24.rgb') | GMAT-main | metrans/python/app_extract_heif.py |
import ctypes
libavutil = ctypes.CDLL('libavutil.so')
libavutil.av_log_set_level(24)
CFrameExtractor = ctypes.CDLL('../build/CFrameExtractor.so')
FrameExtractor_InitFromFile = CFrameExtractor.FrameExtractor_InitFromFile
FrameExtractor_InitFromBuffer = CFrameExtractor.FrameExtractor_InitFromBuffer
FrameExtractor_Delete = CFrameExtractor.FrameExtractor_Delete
FrameExtractor_SetFrameInterval = CFrameExtractor.FrameExtractor_SetFrameInterval
FrameExtractor_SetTimeInterval = CFrameExtractor.FrameExtractor_SetTimeInterval
FrameExtractor_GetWidth = CFrameExtractor.FrameExtractor_GetWidth
FrameExtractor_GetHeight = CFrameExtractor.FrameExtractor_GetHeight
FrameExtractor_GetFrameSize = CFrameExtractor.FrameExtractor_GetFrameSize
FrameExtractor_ExtractToDeviceBuffer = CFrameExtractor.FrameExtractor_ExtractToDeviceBuffer
FrameExtractor_ExtractToBuffer = CFrameExtractor.FrameExtractor_ExtractToBuffer
FrameExtractor_InitFromFile.restype = ctypes.c_void_p
FrameExtractor_InitFromBuffer.restype = ctypes.c_void_p
class FrameExtractor:
def __init__(self, file_path=None, buffer=None):
if file_path:
self.h = FrameExtractor_InitFromFile(file_path.encode('utf-8'))
elif buffer:
self.buffer = buffer
self.h = FrameExtractor_InitFromBuffer(buffer, len(buffer))
else:
raise ValueError('file_path or buffer is needed')
def __del__(self):
print('delete FrameExtractor')
FrameExtractor_Delete(ctypes.c_ulonglong(self.h))
def set_frame_interval(self, frame_interval):
FrameExtractor_SetFrameInterval(ctypes.c_ulonglong(self.h), frame_interval);
def set_time_interval(self, time_interval):
FrameExtractor_SetTimeInterval(ctypes.c_ulonglong(self.h), ctypes.c_double(time_interval));
def get_width(self):
return FrameExtractor_GetWidth(ctypes.c_ulonglong(self.h))
def get_height(self):
return FrameExtractor_GetHeight(ctypes.c_ulonglong(self.h))
def get_frame_size(self):
return FrameExtractor_GetFrameSize(ctypes.c_ulonglong(self.h));
def extract_to_device_buffer(self, dpBgrp, stream=0):
return FrameExtractor_ExtractToDeviceBuffer(ctypes.c_ulonglong(self.h), ctypes.c_ulonglong(dpBgrp), ctypes.c_ulonglong(stream))
def extract_to_buffer(self, pframe, stream=0):
return FrameExtractor_ExtractToBuffer(ctypes.c_ulonglong(self.h), ctypes.c_ulonglong(pframe), ctypes.c_ulonglong(stream)) | GMAT-main | metrans/python/frame_extractor.py |
import torch
import torch.cuda as cuda
import threading
from functools import reduce
import time
import sys
import ctypes
import pdb
from frame_extractor import FrameExtractor
import heif_format
import swscale
cudaFree = ctypes.CDLL('libcudart.so').cudaFree
cudaSetDevice = ctypes.CDLL('libcudart.so').cudaSetDevice
dev = torch.device("cuda:0")
# initialize cuda runtime
dummy = torch.empty((1,), device=dev)
def extract_heif_proc(file_path, l_n_frame):
# cudaFree(0)
cudaSetDevice(ctypes.c_int(0))
with open(file_path, 'rb') as mp4:
extractor = FrameExtractor(buffer=mp4.read())
enc = heif_format.NvEncLite(width=extractor.get_width(), height=extractor.get_height())
dec = heif_format.NvDecLite()
nv12 = torch.empty((2, extractor.get_height(), extractor.get_width()), dtype=torch.uint8, device=dev)
rgbp = torch.empty((3, extractor.get_height(), extractor.get_width()), dtype=torch.float32, device=dev)
scale = swscale.SwscaleCuda(extractor.get_width(), extractor.get_height())
n_frame = 0
with cuda.stream(cuda.Stream(dev)):
while extractor.extract_to_buffer(nv12.data_ptr(), cuda.current_stream().cuda_stream):
n_frame += 1
pkt = heif_format.Packet()
enc.encode_device_frame(nv12.data_ptr(), pkt.v)
writer = heif_format.NvHeifWriter()
img, size = writer.write_stillimage(pkt.v)
reader = heif_format.NvHeifReader(img, size)
pkt_ref, pkt_size = reader.read_image()
frame, width, height, linesize = dec.decode_still(pkt_ref, pkt_size)
scale.nv12_to_rgbpf32(frame, linesize, rgbp.data_ptr(), rgbp.stride(1))
l_n_frame.append(n_frame)
if __name__ == '__main__':
file_path = '../build/bunny.mp4'
if len(sys.argv) >= 2:
file_path = sys.argv[1]
n_thread = 2
l_n_frame = []
l_thread = []
for i in range(n_thread):
th = threading.Thread(target=extract_heif_proc, args=(file_path, l_n_frame))
l_thread.append(th)
th.start()
t0 = time.time()
for th in l_thread:
th.join()
sum = reduce(lambda x,y:x+y, l_n_frame)
print('sum =', sum, ', fps =', sum / (time.time() - t0)) | GMAT-main | metrans/python/app_extract_heif_perf.py |
import ctypes
import numpy as np
CHeif = ctypes.CDLL('../build/CHeif.so')
NvEncLite_InitStill = CHeif.NvEncLite_InitStill
NvEncLite_EncodeDeviceFrame = CHeif.NvEncLite_EncodeDeviceFrame
NvHeifWriter_Init = CHeif.NvHeifWriter_Init
NvHeifWriter_WriteStillImage = CHeif.NvHeifWriter_WriteStillImage
NvHeifReader_Init = CHeif.NvHeifReader_Init
NvDecLite_Init = CHeif.NvDecLite_Init
NvDecLite_DecodeStill = CHeif.NvDecLite_DecodeStill
NvHeifReader_ReadImage = CHeif.NvHeifReader_ReadImage
NvHeifWriter_Delete = CHeif.NvHeifWriter_Delete
NvEncLite_Delete = CHeif.NvEncLite_Delete
NvHeifReader_Delete = CHeif.NvHeifReader_Delete
NvDecLite_Delete = CHeif.NvDecLite_Delete
NvHeifWriter_GetBufferData = CHeif.NvHeifWriter_GetBufferData
NvHeifWriter_GetBufferSize = CHeif.NvHeifWriter_GetBufferSize
NvHeifWriter_WriteToNp =CHeif.NvHeifWriter_WriteToNp
Create_PktVector = CHeif.Create_PktVector
Delete_PktVector = CHeif.Delete_PktVector
# NvEncLite_InitStill.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_wchar_p]
NvEncLite_InitStill.restype = ctypes.c_void_p
NvHeifWriter_Init.restype = ctypes.c_void_p
NvHeifWriter_GetBufferData.restype = ctypes.c_void_p
NvHeifWriter_GetBufferSize.restype = ctypes.c_ulonglong
NvHeifReader_Init.restype = ctypes.c_void_p
NvDecLite_Init.restype = ctypes.c_void_p
Create_PktVector.restype = ctypes.c_void_p
class Packet:
def __init__(self):
self.v = Create_PktVector()
def __del__(self):
Delete_PktVector(ctypes.c_void_p(self.v))
class NvEncLite:
def __init__(self, width, height, init_param="-codec hevc -preset p1 -bitrate 4M"):
if width == 0 or height == 0:
raise ValueError('width and height cannot be 0')
self.h = height
self.w = width
self.enc = NvEncLite_InitStill(width, height)
def __del__(self):
NvEncLite_Delete(ctypes.c_void_p(self.enc))
def encode_device_frame(self, dpframe, vpkt):
return NvEncLite_EncodeDeviceFrame(ctypes.c_void_p(self.enc), ctypes.c_ulonglong(dpframe), ctypes.c_void_p(vpkt))
class NvDecLite:
def __init__(self):
self.dec = NvDecLite_Init()
def __del__(self):
NvDecLite_Delete(ctypes.c_void_p(self.dec))
def decode_still(self, pkt_data, pkt_size):
frame = ctypes.POINTER(ctypes.c_uint8)()
width = ctypes.c_int()
height = ctypes.c_int()
linesize = ctypes.c_int()
NvDecLite_DecodeStill(ctypes.c_void_p(self.dec), ctypes.byref(pkt_data), ctypes.c_int(pkt_size), ctypes.byref(frame), ctypes.byref(width), ctypes.byref(height), ctypes.byref(linesize))
return frame, width, height, linesize
# Add external memory ctor
class NvHeifWriter:
def __init__(self):
self.writer = NvHeifWriter_Init()
def __del__(self):
NvHeifWriter_Delete(ctypes.c_void_p(self.writer))
def write_stillimage(self, pkt):
res = NvHeifWriter_WriteStillImage(ctypes.c_void_p(self.writer), ctypes.c_void_p(pkt))
# img_buf = NvHeifWriter_GetBufferData(ctypes.c_void_p(self.writer))
size = NvHeifWriter_GetBufferSize(ctypes.c_void_p(self.writer))
img_np = np.zeros((size,), dtype=np.uint8)
NvHeifWriter_WriteToNp(ctypes.c_void_p(self.writer), img_np.ctypes.data_as(ctypes.c_void_p))
return img_np, size
class NvHeifReader:
def __init__(self, img, size):
if img is None:
raise ValueError('input buffer cannot be empty')
if size is None:
raise ValueError('input buffer size cannot be 0')
buffer = img.ctypes.data_as(ctypes.c_void_p)
self.reader = NvHeifReader_Init(img.ctypes.data_as(ctypes.c_void_p), ctypes.c_ulonglong(size))
def __del__(self):
NvHeifReader_Delete(ctypes.c_void_p(self.reader))
def read_image(self):
pkt_ref = ctypes.POINTER(ctypes.c_uint8)()
pkt_size = NvHeifReader_ReadImage(ctypes.c_void_p(self.reader), ctypes.byref(pkt_ref))
return pkt_ref, pkt_size
| GMAT-main | metrans/python/heif_format.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
#
# 1. run nds_transcode.py to load data to Iceberg or DeltaLake. => get "TLoad" and "timestamp" for 2.
# 2. run nds_gen_query_stream.py to generate query streams with RNDSEED = "timestamp" from 1.
# TPC-DS specification requires Sq >= 4, but this script allow Sq >= 1 for test purpose.
# 3. run nds_power.py to do Power Test => get "TPower"
# 4. run nds-throughput to do Throughput Test 1. => get "Ttt1"
# 5. run nds_maintenance.py to do Maintenance Test 1. => get "Tdm1"
# 6. run nds-throughput to do Throughput Test 2. => get "Ttt2"
# 7. run nds_maintenance.py to do Maintenance Test 2. => get "Tdm2"
import argparse
import math
import subprocess
import yaml
def get_yaml_params(yaml_file):
with open(yaml_file, 'r') as f:
try:
params = yaml.safe_load(f)
return params
except yaml.YAMLError as exc:
print(exc)
return None
def get_load_end_timestamp(load_report_file):
"""get the end timestamp in str format from the load report file.
this timestamp will be used to generate query streams as the RNDSEED input argument.
"""
rngseed = None
with open(load_report_file, "r") as f:
for line in f:
if "RNGSEED used:" in line:
# e.g. "RNGSEED used: 07291122510"
rngseed = line.split(":")[1].strip()
if not rngseed:
raise Exception(
f"RNGSEED not found in Load Test report file: {load_report_file}")
else:
return rngseed
def get_load_time(load_report_file):
"""get the load test elapse time in str format from the load report file"""
load_elapse = None
with open(load_report_file, "r") as f:
for line in f:
if "Load Test Time" in line:
# e.g. "Load Test Time: 1234 seconds"
load_elapse = line.split(":")[1].split(" ")[1]
if load_elapse:
return load_elapse
else:
raise Exception(
f"Load Test Time not found in Load Test report file: {load_report_file}.")
def get_power_time(power_report_file):
"""get the total elapse time for Power Test in str format from the power report file"""
power_elapse = None
with open(power_report_file, "r") as f:
for line in f:
if "Power Test Time" in line:
# e.g. "app-20220715143743-0007,Power Test Time,11838"
power_elapse = line.split(",")[2].strip()
if power_elapse:
return power_elapse
else:
raise Exception(
f"Power Test Time not found in Power Test report file: {power_report_file}.")
def get_start_end_time(report_file):
"""get the start timestamp in str format from the Power Test report file"""
start_time = None
end_time = None
with open(report_file, "r") as f:
for line in f:
if "Power Start Time" in line:
# e.g. "app-20220715143743-0007,Power Start Time,1659067405.468058"
start_time = line.split(",")[2].strip()
if "Power End Time" in line:
# e.g. "app-20220715143743-0007,Power End Time,1659067405.468058"
end_time = line.split(",")[2].strip()
if start_time and end_time:
return start_time, end_time
else:
raise Exception(
f"Start or End time not found in Power Test report file: {report_file}")
def get_stream_range(num_streams, first_or_second):
"""get the range of stream numbers for the given number of total streams and the index of throughput test
e.g. num_streams = 9, first_or_second = 1 => return [1,2,3,4]
e.g. num_streams = 9, first_or_second = 2 => return [5,6,7,8]
"""
if first_or_second == 1:
stream_range = [x for x in range(1, num_streams//2+1)]
else:
stream_range = [x for x in range(num_streams//2+1, num_streams)]
return stream_range
def get_throughput_time(throughput_report_file_base, num_streams, first_or_second):
"""get Throughput elapse time according to Spec 7.4.7.4.
Filter all Throughput reports and get the start timestamp and end timestamp to calculate the
elapse time of Throughput Test
num_streams (int): number of streams in total including Power Stream
first_or_second (int): 1 for first throughput test, 2 for second throughput test
"""
start_time = []
end_time = []
stream_range = get_stream_range(num_streams, first_or_second)
for stream_num in stream_range:
report_file = throughput_report_file_base + f"_{stream_num}.csv"
sub_start_time, sub_end_time = get_start_end_time(report_file)
start_time.append(float(sub_start_time))
end_time.append(float(sub_end_time))
start_time = min(start_time)
end_time = max(end_time)
elapse = round_up_to_nearest_10_percent(end_time - start_time)
return elapse
def get_refresh_time(maintenance_report_file):
"""get Maintenance elapse time from report"""
maintenance_elapse = None
with open(maintenance_report_file, "r") as f:
for line in f:
if "Data Maintenance Time" in line:
# e.g. "app-20220715143743-0007,Data Maintenance Time,11838"
maintenance_elapse = line.split(",")[2].strip()
if maintenance_elapse:
return float(maintenance_elapse)
else:
raise Exception("Data Maintenance Time not found in Data Maintenance report file: " +
f"{maintenance_report_file}.")
def get_maintenance_time(maintenance_report_base_path,
num_streams, first_or_second):
"""Get maintenance time from maintenance report files generated in one maintenance test.
THe maintenance time is the sum of the elapse time of all maintenance reports in one maintenance test.
Args:
maintenance_report_base_path (str): base path of maintenance report files.
num_streams (int): total number of streams.
first_or_second (int): 1 or 2 for first or second maintenance test.
Returns:
float: elapse time of maintenance test.
"""
refresh_nums = get_stream_range(num_streams, first_or_second)
Tdm = 0
# refresh run for each stream in Throughput Test 1.
for i in refresh_nums:
maintenance_report_path = maintenance_report_base_path + \
f"_{i}" + ".csv"
Tdm += float(get_refresh_time(maintenance_report_path))
return round_up_to_nearest_10_percent(Tdm)
def get_throughput_stream_nums(num_streams, first_or_second):
"""only used for throughput run script, get the stream numbers for throughput test in a comma
separated string. e.g. "1,2,3,4" for num_streams = 9, first_or_second = 1
"""
int_range = get_stream_range(num_streams, first_or_second)
return ",".join([str(x) for x in int_range])
def round_up_to_nearest_10_percent(num):
return math.ceil(num * 10) / 10
def run_data_gen(scale_factor, parallel, data_path, local_or_hdfs, num_streams):
gen_data_cmd = ["python3",
"nds_gen_data.py",
local_or_hdfs,
scale_factor,
parallel,
data_path,
"--overwrite_output"]
subprocess.run(gen_data_cmd, check=True)
for i in range(1, num_streams):
gen_refresh_data_cmd = ["python3",
"nds_gen_data.py",
local_or_hdfs,
scale_factor,
parallel,
data_path + f"_{i}",
"--overwrite_output",
"--update", str(i)]
subprocess.run(gen_refresh_data_cmd, check=True)
def run_load_test(template_path,
input_path,
output_path,
warehouse_type,
load_report_file):
load_test_cmd = ["./spark-submit-template",
template_path,
"nds_transcode.py",
input_path,
output_path,
load_report_file,
"--output_format", warehouse_type,
"--output_mode", "overwrite",
"--log_level", "WARN"]
subprocess.run(load_test_cmd, check=True)
def gen_streams(num_streams,
template_dir,
scale_factor,
stream_output_path,
RNGSEED):
gen_stream_cmd = ["python3",
"nds_gen_query_stream.py",
template_dir,
scale_factor,
stream_output_path,
"--rngseed", RNGSEED,
"--streams", str(num_streams)]
subprocess.run(gen_stream_cmd, check=True)
def power_test(template_path,
input_path,
stream_path,
report_path,
property_path,
output_path,
warehouse_type):
power_test_cmd = ["./spark-submit-template",
template_path,
"nds_power.py",
input_path,
stream_path,
report_path,
"--input_format", warehouse_type,
"--property_file", property_path]
if output_path:
power_test_cmd.extend(["--output_prefix", output_path])
subprocess.run(power_test_cmd, check=True)
def throughput_test(num_streams,
first_or_second,
template_path,
input_path,
stream_base_path,
report_base_path,
property_path,
warehouse_type):
throughput_cmd = ["./nds-throughput",
get_throughput_stream_nums(num_streams, first_or_second),
"./spark-submit-template",
template_path,
"nds_power.py",
input_path,
stream_base_path + "/query_{}.sql",
report_base_path + "_{}.csv",
"--input_format", warehouse_type,
"--property_file", property_path]
print(throughput_cmd)
subprocess.run(throughput_cmd, check=True)
def maintenance_test(num_streams,
first_or_second,
warehouse_path,
refresh_template_path,
maintenance_raw_data_base_path,
maintenance_query_path,
maintenance_report_base_path,
property_path,
warehouse_type):
refresh_nums = get_stream_range(num_streams, first_or_second)
# refresh run for each stream in Throughput Test.
for i in refresh_nums:
maintenance_raw_path = maintenance_raw_data_base_path + f"_{i}"
maintenance_report_path = maintenance_report_base_path + \
f"_{i}" + ".csv"
maintenance_cmd = ["./spark-submit-template",
refresh_template_path,
"nds_maintenance.py",
warehouse_path,
maintenance_raw_path,
maintenance_query_path,
maintenance_report_path,
"--property_file", property_path,
"--warehouse_type", warehouse_type]
subprocess.run(maintenance_cmd, check=True)
def get_perf_metric(scale_factor, num_streams_in_throughput, Tload, Tpower, Ttt1, Ttt2, Tdm1, Tdm2):
"""get the primary performance metric of benchmark.
note: TPT, TTT, TDM and TLD quantities are in units of decimal hours with a resolution of at least 1/3600th
of an hour (i.e., 1 second)
Args:
scale_factor (str): scale factor
num_streams_in_throughput (int): number of streams executed in a Throughput Test
Tload: time elapse for data loading in seconds
Tpower (float): time elaspe for Power Test in seconds
Ttt1 (float): time elapse for Throughput Test 1 in seconds
Ttt2 (float): time elapse for Throughput Test 2 in seconds
Tdm1 (float): time elapse for Maintenance Test 1 in seconds
Tdm2 (float): time elapse for Maintenance Test 2 in seconds
"""
# Q=Sq*99
Q = num_streams_in_throughput * 99
Tpt = (Tpower * num_streams_in_throughput) / 3600
Ttt = (Ttt1 + Ttt2) / 3600
Tdm = (Tdm1 + Tdm2) / 3600
Tld = (0.01 * num_streams_in_throughput * Tload) / 3600
metric = int(int(scale_factor) * Q / (Tpt*Ttt*Tdm*Tld)**(1/4))
return metric
def write_metrics_report(report_path, metrics_map):
"""write metrics to a csv file at report_path"""
with open(report_path, 'w') as f:
for key, value in metrics_map.items():
f.write(f"{key},{value}\n")
def run_full_bench(yaml_params):
skip_data_gen = yaml_params['data_gen']['skip']
scale_factor = str(yaml_params['data_gen']['scale_factor'])
parallel = str(yaml_params['data_gen']['parallel'])
raw_data_path = yaml_params['data_gen']['raw_data_path']
local_or_hdfs = yaml_params['data_gen']['local_or_hdfs']
# write to Iceberg or DeltaLake
skip_load_test = yaml_params['load_test']['skip']
load_template_path = yaml_params['load_test']['spark_template_path']
warehouse_output_path = yaml_params['load_test']['output_path']
warehouse_type = yaml_params['load_test']['warehouse_type']
load_report_path = yaml_params['load_test']['report_path']
skip_stream_gen = yaml_params['generate_query_stream']['skip']
num_streams = yaml_params['generate_query_stream']['num_streams']
query_template_dir = yaml_params['generate_query_stream']['query_template_dir']
stream_output_path = yaml_params['generate_query_stream']['stream_output_path']
power_stream_path = stream_output_path + "/query_0.sql"
skip_power_test = yaml_params['power_test']['skip']
power_template_path = yaml_params['power_test']['spark_template_path']
power_report_path = yaml_params['power_test']['report_path']
power_property_path = yaml_params['power_test']['property_path']
power_output_path = yaml_params['power_test']['output_path']
skip_throughput_test = yaml_params['throughput_test']['skip']
throughput_report_base = yaml_params['throughput_test']['report_base_path']
# temaplte to write to parquet, with GPU
skip_maintenance_test = yaml_params['maintenance_test']['skip']
# template for refresh functions, requires "spark.sql.catalog.spark_catalog.warehouse"
maintenance_refresh_template = yaml_params['maintenance_test']['maintenance_template_path']
maintenance_query_dir = yaml_params['maintenance_test']['query_dir']
maintenance_report_base_path = yaml_params['maintenance_test']['maintenance_report_base_path']
metrics_report = yaml_params['metrics_report_path']
# 0.
if not skip_data_gen:
run_data_gen(scale_factor, parallel, raw_data_path,
local_or_hdfs, num_streams)
# 1.
if not skip_load_test:
run_load_test(load_template_path,
raw_data_path,
warehouse_output_path,
warehouse_type,
load_report_path)
Tld = round_up_to_nearest_10_percent(float(get_load_time(load_report_path)))
# 2.
if not skip_stream_gen:
# RNGSEED is required for query stream generation in Spec 4.3.1
RNGSEED = get_load_end_timestamp(load_report_path)
gen_streams(num_streams, query_template_dir,
scale_factor, stream_output_path, RNGSEED)
# 3.
if not skip_power_test:
power_test(power_template_path,
warehouse_output_path,
power_stream_path,
power_report_path,
power_property_path,
power_output_path,
warehouse_type)
# TPower is in milliseconds
# But Spec 7.1.16: Elapsed time is measured in seconds rounded up to the nearest 0.1 second.
# Convert it to seconds.
TPower = round_up_to_nearest_10_percent(
float(get_power_time(power_report_path)) / 1000)
# 4.
if not skip_throughput_test:
throughput_test(num_streams,
1,
power_template_path,
warehouse_output_path,
stream_output_path,
throughput_report_base,
power_property_path,
warehouse_type)
Ttt1 = get_throughput_time(throughput_report_base,
num_streams, 1)
# 5
if not skip_maintenance_test:
maintenance_test(num_streams,
1,
warehouse_output_path,
maintenance_refresh_template,
raw_data_path,
maintenance_query_dir,
maintenance_report_base_path,
power_property_path,
warehouse_type)
Tdm1 = get_maintenance_time(maintenance_report_base_path,
num_streams,
1)
# 6
if not skip_throughput_test:
throughput_test(num_streams,
2,
power_template_path,
warehouse_output_path,
stream_output_path,
throughput_report_base,
power_property_path,
warehouse_type)
Ttt2 = get_throughput_time(throughput_report_base,
num_streams, 2)
# 7
if not skip_maintenance_test:
maintenance_test(num_streams,
2,
warehouse_output_path,
maintenance_refresh_template,
raw_data_path,
maintenance_query_dir,
maintenance_report_base_path,
power_property_path,
warehouse_type)
Tdm2 = get_maintenance_time(maintenance_report_base_path,
num_streams,
2)
perf_metric = get_perf_metric(
scale_factor, num_streams//2, Tld, TPower, Ttt1, Ttt2, Tdm1, Tdm2)
print(f"====== Performance Metric: {perf_metric} ======")
metrics_map = {"scale_factor": scale_factor,
"num_streams": num_streams,
"Tld": Tld,
"TPower": TPower,
"Ttt1": Ttt1,
"Ttt2": Ttt2,
"Tdm1": Tdm1,
"Tdm2": Tdm2,
"perf_metric": perf_metric}
write_metrics_report(metrics_report, metrics_map)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument('yaml_config',
help='yaml config file for the benchmark')
args = parser.parse_args()
params = get_yaml_params(args.yaml_config)
run_full_bench(params)
| spark-rapids-benchmarks-dev | nds/nds_bench.py |
#!/usr/bin/env python3
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import os
import sys
from pathlib import Path
def check_version():
req_ver = (3, 6)
cur_ver = sys.version_info
if cur_ver < req_ver:
raise Exception('Minimum required Python version is 3.6, but current python version is {}.'
.format(str(cur_ver.major) + '.' + str(cur_ver.minor)) +
' Please use proper Python version')
def check_build():
"""check jar and tpcds executable
Raises:
Exception: the build is not done or broken
Returns:
PosixPath, PosixPath: path of jar and dsdgen executable
"""
# Check if necessary executable or jars are built.
# we assume user won't move this script.
src_dir = Path(__file__).parent.absolute()
jar_path = list(
Path(src_dir / 'tpcds-gen/target').rglob("tpcds-gen-*.jar"))
tool_path = list(Path(src_dir / 'tpcds-gen/target/tools').rglob("dsdgen"))
if jar_path == [] or tool_path == []:
raise Exception('Target jar file is not found in `target` folder or dsdgen executable is ' +
'not found in `target/tools` folder.' +
'Please refer to README document and build this project first.')
return jar_path[0], tool_path[0]
def get_abs_path(input_path):
"""receive a user input path and return absolute path of it.
Args:
input_path (str): user's input path
Returns:
str: if the input is absolute, return it; if it's relative path, return the absolute path of
it.
"""
if Path(input_path).is_absolute():
# it's absolute path
output_path = input_path
else:
# it's relative path where this script is executed
output_path = os.getcwd() + '/' + input_path
return output_path
def valid_range(range, parallel):
"""check the range validation
Args:
range (str): a range specified for a range data generation, e.g. "1,10"
parallel (str): string type number for parallelism in TPC-DS data generation, e.g. "20"
Raises:
Exception: error message for invalid range input.
"""
if len(range.split(',')) != 2:
msg = 'Invalid range: please specify a range with a comma between start and end. e.g., "1,10".'
raise Exception(msg)
range_start = int(range.split(',')[0])
range_end = int(range.split(',')[1])
if range_start < 1 or range_start > range_end or range_end > int(parallel):
msg = 'Please provide correct child range: 1 <= range_start <= range_end <= parallel'
raise Exception(msg)
return range_start, range_end
def parallel_value_type(p):
"""helper function to check parallel valuie
Args:
p (str): parallel value
Raises:
argparse.ArgumentTypeError: ArgumentTypeError exception
Returns:
str: parallel in string
"""
if int(p) < 2:
raise argparse.ArgumentTypeError("PARALLEL must be >= 2")
return p
def get_dir_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
def check_json_summary_folder(json_summary_folder):
if json_summary_folder:
# prepare a folder to save json summaries of query results
if not os.path.exists(json_summary_folder):
os.makedirs(json_summary_folder)
else:
if os.listdir(json_summary_folder):
raise Exception(f"json_summary_folder {json_summary_folder} is not empty. " +
"There may be already some json files there. Please clean the folder " +
"or specify another one.")
def check_query_subset_exists(query_dict, subset_list):
"""check if the query subset exists in the query dictionary"""
for q in subset_list:
if q not in query_dict.keys():
raise Exception(f"Query {q} is not in the query dictionary. Please check the query subset.")
return True
| spark-rapids-benchmarks-dev | nds/check.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import timeit
import pyspark
import os
from datetime import datetime
from pyspark.sql.types import *
from pyspark.sql.functions import col
from nds_schema import *
# Note the specific partitioning is applied when save the parquet data files.
TABLE_PARTITIONING = {
'catalog_sales': 'cs_sold_date_sk',
'catalog_returns': 'cr_returned_date_sk',
'inventory': 'inv_date_sk',
'store_sales': 'ss_sold_date_sk',
'store_returns': 'sr_returned_date_sk',
'web_sales': 'ws_sold_date_sk',
'web_returns': 'wr_returned_date_sk'
}
def load(session, filename, schema, input_format, delimiter="|", header="false", prefix=""):
data_path = prefix + '/' + filename
if input_format == 'csv':
return session.read.option("delimiter", delimiter).option("header", header).csv(data_path, schema=schema)
elif input_format in ['parquet', 'orc', 'avro', 'json']:
return session.read.format(input_format).load(data_path)
# TODO: all of the output formats should be also supported as input format possibilities
# remains 'iceberg', 'delta'
else:
raise ValueError("Unsupported input format: {}".format(input_format))
def store(session,
df,
filename,
output_format,
output_mode,
iceberg_write_format,
compression,
prefix="",
delta_unmanaged=False,
hive_external=False):
"""Create Iceberg tables by CTAS
Args:
session (SparkSession): a working SparkSession instance
df (DataFrame): DataFrame to be serialized into Iceberg table
filename (str): name of the table(file)
output_format (str): parquet, orc or avro
output_mode (str): save modes as defined by "https://spark.apache.org/docs/latest/sql-data-sources-load-save-functions.html#save-modes.
iceberg_write_format (bool): write data into Iceberg tables with specified format
compression (str): compression codec for converted data when saving to disk
prefix (str): output data path when not using Iceberg.
"""
if output_format == "iceberg":
if output_mode == 'overwrite':
session.sql(f"drop table if exists {filename}")
CTAS = f"create table {filename} using iceberg "
if filename in TABLE_PARTITIONING.keys():
df.repartition(
col(TABLE_PARTITIONING[filename])).sortWithinPartitions(
TABLE_PARTITIONING[filename]).createOrReplaceTempView("temptbl")
CTAS += f"partitioned by ({TABLE_PARTITIONING[filename]})"
else:
df.coalesce(1).createOrReplaceTempView("temptbl")
CTAS += f" tblproperties('write.format.default' = '{iceberg_write_format}'"
# Iceberg now only support compression codec option for Parquet and Avro write.
if compression:
if iceberg_write_format == "parquet":
CTAS += f", 'write.parquet.compression-codec' = '{compression}'"
elif iceberg_write_format == "avro":
CTAS += f", 'write.avro.compression-codec' = '{compression}'"
CTAS += ")"
CTAS += " as select * from temptbl"
session.sql(CTAS)
elif output_format == "delta" and not delta_unmanaged:
if output_mode == 'overwrite':
session.sql(f"drop table if exists {filename}")
CTAS = f"create table {filename} using delta "
if filename in TABLE_PARTITIONING.keys():
df.repartition(
col(TABLE_PARTITIONING[filename])).sortWithinPartitions(
TABLE_PARTITIONING[filename]).createOrReplaceTempView("temptbl")
CTAS += f"partitioned by ({TABLE_PARTITIONING[filename]})"
else:
df.coalesce(1).createOrReplaceTempView("temptbl")
# Delta Lake doesn't have specific compression properties, set it by `spark.sql.parquet.compression.codec`
# Note Delta Lake only support Parquet.
if compression:
session.conf.set("spark.sql.parquet.compression.codec", compression)
CTAS += " as select * from temptbl"
session.sql(CTAS)
else:
data_path = prefix + '/' + filename
if filename in TABLE_PARTITIONING.keys():
df = df.repartition(
col(TABLE_PARTITIONING[filename])).sortWithinPartitions(
TABLE_PARTITIONING[filename])
writer = df.write
if compression:
writer = writer.option('compression', compression)
writer = writer.format(output_format).mode(
output_mode).partitionBy(TABLE_PARTITIONING[filename])
if not hive_external:
writer.save(data_path)
else:
writer.saveAsTable(filename, path=data_path)
else:
writer = df.coalesce(1).write
if compression:
writer = writer.option('compression', compression)
writer = writer.format(output_format).mode(output_mode)
if not hive_external:
writer.save(data_path)
else:
writer.saveAsTable(filename, path=data_path)
def transcode(args):
session_builder = pyspark.sql.SparkSession.builder
if args.output_format == "iceberg":
session_builder.config("spark.sql.catalog.spark_catalog.warehouse", args.output_prefix)
if args.output_format == "delta" and not args.delta_unmanaged:
session_builder.config("spark.sql.warehouse.dir", args.output_prefix)
session_builder.config("spark.sql.catalogImplementation", "hive")
if args.hive:
session_builder.enableHiveSupport()
session = session_builder.appName(f"NDS - transcode - {args.output_format}").getOrCreate()
if args.hive:
session.sql(f"CREATE DATABASE IF NOT EXISTS {args.database}")
session.catalog.setCurrentDatabase(args.database)
session.sparkContext.setLogLevel(args.log_level)
results = {}
schemas = get_schemas(use_decimal=not args.floats)
maintenance_schemas = get_maintenance_schemas(use_decimal=not args.floats)
if args.update:
trans_tables = maintenance_schemas
else:
trans_tables = schemas
if args.tables:
for t in args.tables:
if t not in trans_tables.keys() :
raise Exception(f"invalid table name: {t}. Valid tables are: {schemas.keys()}")
trans_tables = {t: trans_tables[t] for t in args.tables if t in trans_tables}
start_time = datetime.now()
print(f"Load Test Start Time: {start_time}")
for fn, schema in trans_tables.items():
results[fn] = timeit.timeit(
lambda: store(session,
load(session,
f"{fn}",
schema,
input_format=args.input_format,
prefix=args.input_prefix),
f"{fn}",
args.output_format,
args.output_mode,
args.iceberg_write_format,
args.compression,
args.output_prefix,
args.delta_unmanaged,
args.hive),
number=1)
end_time = datetime.now()
delta = (end_time - start_time).total_seconds()
print(f"Load Test Finished at: {end_time}")
print(f"Load Test Time: {delta} seconds")
# format required at TPC-DS Spec 4.3.1
end_time_formatted = end_time.strftime("%m%d%H%M%S%f")[:-5]
print(f"RNGSEED used :{end_time_formatted}")
report_text = ""
report_text += f"Load Test Time: {delta} seconds\n"
report_text += f"Load Test Finished at: {end_time}\n"
report_text += f"RNGSEED used: {end_time_formatted}\n"
for table, duration in results.items():
report_text += "Time to convert '%s' was %.04fs\n" % (table, duration)
report_text += "\n\n\nSpark configuration follows:\n\n"
with open(args.report_file, "w") as report:
report.write(report_text)
print(report_text)
for conf in session.sparkContext.getConf().getAll():
report.write(str(conf) + "\n")
print(conf)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument(
'input_prefix',
help='text to prepend to every input file path (e.g., "hdfs:///ds-generated-data"; the default is empty)')
parser.add_argument(
'output_prefix',
help='text to prepend to every output file (e.g., "hdfs:///ds-parquet"; the default is empty)' +
'. If output_format is "iceberg", this argument will be regarded as the value of property ' +
'"spark.sql.catalog.spark_catalog.warehouse". Only default Spark catalog ' +
'session name "spark_catalog" is supported now, customized catalog is not ' +
'yet supported.')
parser.add_argument(
'report_file',
help='location to store a performance report(local)')
parser.add_argument(
'--output_mode',
choices=['overwrite', 'append', 'ignore', 'error', 'errorifexists'],
help="save modes as defined by " +
"https://spark.apache.org/docs/latest/sql-data-sources-load-save-functions.html#save-modes." +
"default value is errorifexists, which is the Spark default behavior.",
default="errorifexists")
parser.add_argument(
'--input_format',
choices=['csv', 'parquet', 'orc', 'avro', 'json'],
default='csv',
help='input data format to be converted. default value is csv.'
)
parser.add_argument(
'--output_format',
choices=['parquet', 'orc', 'avro', 'json', 'iceberg', 'delta'],
default='parquet',
help="output data format when converting CSV data sources."
)
parser.add_argument(
'--tables',
type=lambda s: s.split(','),
help="specify table names by a comma separated string. e.g. 'catalog_page,catalog_sales'.")
parser.add_argument(
'--log_level',
help='set log level for Spark driver log. Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN(default: INFO)',
default="INFO")
parser.add_argument(
'--floats',
action='store_true',
help='replace DecimalType with DoubleType when saving parquet files. If not specified, decimal data will be saved.')
parser.add_argument(
'--update',
action='store_true',
help='transcode the source data or update data'
)
parser.add_argument(
'--iceberg_write_format',
choices=['parquet', 'orc', 'avro'],
default='parquet',
help='File format for the Iceberg table; parquet, avro, or orc'
)
parser.add_argument(
'--compression',
help='Compression codec to use when saving data.' +
' See https://iceberg.apache.org/docs/latest/configuration/#write-properties ' +
' for supported codecs in Iceberg.' +
' See https://spark.apache.org/docs/latest/sql-data-sources.html' +
' for supported codecs for Spark built-in formats.' +
' When not specified, the default for the requested output format will be used.'
)
parser.add_argument(
'--delta_unmanaged',
action='store_true',
help='Use unmanaged tables for DeltaLake. This is useful for testing DeltaLake without ' +
'leveraging a Metastore service.')
parser.add_argument(
'--hive',
action='store_true',
help='create Hive external tables for the converted data.'
)
parser.add_argument(
'--database',
help='the name of a database to use instead of `default`, currently applies only to Hive',
default="default"
)
args = parser.parse_args()
transcode(args)
| spark-rapids-benchmarks-dev | nds/nds_transcode.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
from pyspark.sql import SparkSession
tables_to_rollback = [
'catalog_sales',
'inventory',
'store_returns',
'store_sales',
'web_returns',
'web_sales']
def rollback(spark, timestamp, tables_to_rollback):
"""roll back the tables to the timestamp"""
for table in tables_to_rollback:
print(f"Rolling back {table} to {timestamp}")
rollback_sql = f"CALL spark_catalog.system.rollback_to_timestamp('{table}', TIMESTAMP '{timestamp}')"
spark.sql(rollback_sql)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument('timestamp', help='timestamp to rollback to')
args = parser.parse_args()
spark = SparkSession.builder.appName("Rollback").getOrCreate()
rollback(spark, args.timestamp, tables_to_rollback)
spark.stop() | spark-rapids-benchmarks-dev | nds/nds_rollback.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import csv
import os
import time
from collections import OrderedDict
from pyspark.sql import SparkSession
from PysparkBenchReport import PysparkBenchReport
from pyspark.sql import DataFrame
from check import check_json_summary_folder, check_query_subset_exists, check_version
from nds_gen_query_stream import split_special_query
from nds_schema import get_schemas
check_version()
def gen_sql_from_stream(query_stream_file_path):
"""Read Spark compatible query stream and split them one by one
Args:
query_stream_file_path (str): path of query stream generated by TPC-DS tool
Returns:
ordered dict: an ordered dict of {query_name: query content} query pairs
"""
with open(query_stream_file_path, 'r') as f:
stream = f.read()
all_queries = stream.split('-- start')[1:]
# split query in query14, query23, query24, query39
extended_queries = OrderedDict()
for q in all_queries:
# e.g. "-- start query 32 in stream 0 using template query98.tpl"
query_name = q[q.find('template')+9: q.find('.tpl')]
if 'select' in q.split(';')[1]:
part_1, part_2 = split_special_query(q)
extended_queries[query_name + '_part1'] = part_1
extended_queries[query_name + '_part2'] = part_2
else:
extended_queries[query_name] = q
# add "-- start" string back to each query
for q_name, q_content in extended_queries.items():
extended_queries[q_name] = '-- start' + q_content
return extended_queries
def setup_tables(spark_session, input_prefix, input_format, use_decimal, execution_time_list):
"""set up data tables in Spark before running the Power Run queries.
Args:
spark_session (SparkSession): a SparkSession instance to run queries.
input_prefix (str): path of input data.
input_format (str): type of input data source, e.g. parquet, orc, csv, json.
use_decimal (bool): use decimal type for certain columns when loading data of text type.
execution_time_list ([(str, str, int)]): a list to record query and its execution time.
Returns:
execution_time_list: a list recording query execution time.
"""
spark_app_id = spark_session.sparkContext.applicationId
# Create TempView for tables
for table_name in get_schemas(False).keys():
start = int(time.time() * 1000)
table_path = input_prefix + '/' + table_name
reader = spark_session.read.format(input_format)
if input_format in ['csv', 'json']:
reader = reader.schema(get_schemas(use_decimal)[table_name])
reader.load(table_path).createOrReplaceTempView(table_name)
end = int(time.time() * 1000)
print("====== Creating TempView for table {} ======".format(table_name))
print("Time taken: {} millis for table {}".format(end - start, table_name))
execution_time_list.append(
(spark_app_id, "CreateTempView {}".format(table_name), end - start))
return execution_time_list
def register_delta_tables(spark_session, input_prefix, execution_time_list):
spark_app_id = spark_session.sparkContext.applicationId
# Register tables for Delta Lake
for table_name in get_schemas(False).keys():
start = int(time.time() * 1000)
# input_prefix must be absolute path: https://github.com/delta-io/delta/issues/555
register_sql = f"CREATE TABLE IF NOT EXISTS {table_name} USING DELTA LOCATION '{input_prefix}/{table_name}'"
print(register_sql)
spark_session.sql(register_sql)
end = int(time.time() * 1000)
print("====== Registering for table {} ======".format(table_name))
print("Time taken: {} millis for table {}".format(end - start, table_name))
execution_time_list.append(
(spark_app_id, "Register {}".format(table_name), end - start))
return execution_time_list
def run_one_query(spark_session,
query,
query_name,
output_path,
output_format):
df = spark_session.sql(query)
if not output_path:
df.collect()
else:
ensure_valid_column_names(df).write.format(output_format).mode('overwrite').save(
output_path + '/' + query_name)
def ensure_valid_column_names(df: DataFrame):
def is_column_start(char):
return char.isalpha() or char == '_'
def is_column_part(char):
return char.isalpha() or char.isdigit() or char == '_'
def is_valid(column_name):
return len(column_name) > 0 and is_column_start(column_name[0]) and all(
[is_column_part(char) for char in column_name[1:]])
def make_valid(column_name):
# To simplify: replace all invalid char with '_'
valid_name = ''
if is_column_start(column_name[0]):
valid_name += column_name[0]
else:
valid_name += '_'
for char in column_name[1:]:
if not is_column_part(char):
valid_name += '_'
else:
valid_name += char
return valid_name
def deduplicate(column_names):
# In some queries like q35, it's possible to get columns with the same name. Append a number
# suffix to resolve this problem.
dedup_col_names = []
for i,v in enumerate(column_names):
count = column_names.count(v)
index = column_names[:i].count(v)
dedup_col_names.append(v+str(index) if count > 1 else v)
return dedup_col_names
valid_col_names = [c if is_valid(c) else make_valid(c) for c in df.columns]
dedup_col_names = deduplicate(valid_col_names)
return df.toDF(*dedup_col_names)
def get_query_subset(query_dict, subset):
"""Get a subset of queries from query_dict.
The subset is specified by a list of query names.
"""
check_query_subset_exists(query_dict, subset)
return dict((k, query_dict[k]) for k in subset)
def run_query_stream(input_prefix,
property_file,
query_dict,
time_log_output_path,
extra_time_log_output_path,
sub_queries,
input_format="parquet",
use_decimal=True,
output_path=None,
output_format="parquet",
json_summary_folder=None,
delta_unmanaged=False,
keep_sc=False,
hive_external=False):
"""run SQL in Spark and record execution time log. The execution time log is saved as a CSV file
for easy accesibility. TempView Creation time is also recorded.
Args:
input_prefix (str): path of input data or warehouse if input_format is "iceberg" or hive_external=True.
query_dict (OrderedDict): ordered dict {query_name: query_content} of all TPC-DS queries runnable in Spark
time_log_output_path (str): path of the log that contains query execution time, both local
and HDFS path are supported.
input_format (str, optional): type of input data source.
use_deciaml(bool, optional): use decimal type for certain columns when loading data of text type.
output_path (str, optional): path of query output, optinal. If not specified, collect()
action will be applied to each query. Defaults to None.
output_format (str, optional): query output format, choices are csv, orc, parquet. Defaults
to "parquet".
"""
execution_time_list = []
total_time_start = time.time()
# check if it's running specific query or Power Run
if len(query_dict) == 1:
app_name = "NDS - " + list(query_dict.keys())[0]
else:
app_name = "NDS - Power Run"
# Execute Power Run or Specific query in Spark
# build Spark Session
session_builder = SparkSession.builder
if property_file:
spark_properties = load_properties(property_file)
for k,v in spark_properties.items():
session_builder = session_builder.config(k,v)
if input_format == 'iceberg':
session_builder.config("spark.sql.catalog.spark_catalog.warehouse", input_prefix)
if input_format == 'delta' and not delta_unmanaged:
session_builder.config("spark.sql.warehouse.dir", input_prefix)
session_builder.enableHiveSupport()
if hive_external:
session_builder.enableHiveSupport()
spark_session = session_builder.appName(
app_name).getOrCreate()
if hive_external:
spark_session.catalog.setCurrentDatabase(input_prefix)
if input_format == 'delta' and delta_unmanaged:
# Register tables for Delta Lake. This is only needed for unmanaged tables.
execution_time_list = register_delta_tables(spark_session, input_prefix, execution_time_list)
spark_app_id = spark_session.sparkContext.applicationId
if input_format != 'iceberg' and input_format != 'delta' and not hive_external:
execution_time_list = setup_tables(spark_session, input_prefix, input_format, use_decimal,
execution_time_list)
check_json_summary_folder(json_summary_folder)
if sub_queries:
query_dict = get_query_subset(query_dict, sub_queries)
# Run query
power_start = int(time.time())
for query_name, q_content in query_dict.items():
# show query name in Spark web UI
spark_session.sparkContext.setJobGroup(query_name, query_name)
print("====== Run {} ======".format(query_name))
q_report = PysparkBenchReport(spark_session)
summary = q_report.report_on(run_one_query,spark_session,
q_content,
query_name,
output_path,
output_format)
print(f"Time taken: {summary['queryTimes']} millis for {query_name}")
query_times = summary['queryTimes']
execution_time_list.append((spark_app_id, query_name, query_times[0]))
if json_summary_folder:
# property_file e.g.: "property/aqe-on.properties" or just "aqe-off.properties"
if property_file:
summary_prefix = os.path.join(
json_summary_folder, os.path.basename(property_file).split('.')[0])
else:
summary_prefix = os.path.join(json_summary_folder, '')
q_report.write_summary(query_name, prefix=summary_prefix)
power_end = int(time.time())
power_elapse = int((power_end - power_start)*1000)
if not keep_sc:
spark_session.sparkContext.stop()
total_time_end = time.time()
total_elapse = int((total_time_end - total_time_start)*1000)
print("====== Power Test Time: {} milliseconds ======".format(power_elapse))
print("====== Total Time: {} milliseconds ======".format(total_elapse))
execution_time_list.append(
(spark_app_id, "Power Start Time", power_start))
execution_time_list.append(
(spark_app_id, "Power End Time", power_end))
execution_time_list.append(
(spark_app_id, "Power Test Time", power_elapse))
execution_time_list.append(
(spark_app_id, "Total Time", total_elapse))
header = ["application_id", "query", "time/milliseconds"]
# print to driver stdout for quick view
print(header)
for row in execution_time_list:
print(row)
# write to local file at driver node
with open(time_log_output_path, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(execution_time_list)
# write to csv in cloud environment
if extra_time_log_output_path:
spark_session = SparkSession.builder.getOrCreate()
time_df = spark_session.createDataFrame(data=execution_time_list, schema = header)
time_df.coalesce(1).write.csv(extra_time_log_output_path)
def load_properties(filename):
myvars = {}
with open(filename) as myfile:
for line in myfile:
name, var = line.partition("=")[::2]
myvars[name.strip()] = var.strip()
return myvars
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument('input_prefix',
help='text to prepend to every input file path (e.g., "hdfs:///ds-generated-data"). ' +
'If --hive or if input_format is "iceberg", this argument will be regarded as the value of property ' +
'"spark.sql.catalog.spark_catalog.warehouse". Only default Spark catalog ' +
'session name "spark_catalog" is supported now, customized catalog is not ' +
'yet supported. Note if this points to a Delta Lake table, the path must be ' +
'absolute. Issue: https://github.com/delta-io/delta/issues/555')
parser.add_argument('query_stream_file',
help='query stream file that contains NDS queries in specific order')
parser.add_argument('time_log',
help='path to execution time log, only support local path.',
default="")
parser.add_argument('--input_format',
help='type for input data source, e.g. parquet, orc, json, csv or iceberg, delta. ' +
'Certain types are not fully supported by GPU reading, please refer to ' +
'https://github.com/NVIDIA/spark-rapids/blob/branch-22.08/docs/compatibility.md ' +
'for more details.',
choices=['parquet', 'orc', 'avro', 'csv', 'json', 'iceberg', 'delta'],
default='parquet')
parser.add_argument('--output_prefix',
help='text to prepend to every output file (e.g., "hdfs:///ds-parquet")')
parser.add_argument('--output_format',
help='type of query output',
default='parquet')
parser.add_argument('--property_file',
help='property file for Spark configuration.')
parser.add_argument('--floats',
action='store_true',
help='When loading Text files like json and csv, schemas are required to ' +
'determine if certain parts of the data are read as decimal type or not. '+
'If specified, float data will be used.')
parser.add_argument('--json_summary_folder',
help='Empty folder/path (will create if not exist) to save JSON summary file for each query.')
parser.add_argument('--delta_unmanaged',
action='store_true',
help='Use unmanaged tables for DeltaLake. This is useful for testing DeltaLake without ' +
' leveraging a Metastore service.')
parser.add_argument('--keep_sc',
action='store_true',
help='Keep SparkContext alive after running all queries. This is a ' +
'limitation on Databricks runtime environment. User should always attach ' +
'this flag when running on Databricks.')
parser.add_argument('--hive',
action='store_true',
help='use table meta information in Hive metastore directly without ' +
'registering temp views.')
parser.add_argument('--extra_time_log',
help='extra path to save time log when running in cloud environment where '+
'driver node/pod cannot be accessed easily. User needs to add essential extra ' +
'jars and configurations to access different cloud storage systems. ' +
'e.g. s3, gs etc.')
parser.add_argument('--sub_queries',
type=lambda s: [x.strip() for x in s.split(',')],
help='comma separated list of queries to run. If not specified, all queries ' +
'in the stream file will be run. e.g. "query1,query2,query3". Note, use ' +
'"_part1" and "_part2" suffix for the following query names: ' +
'query14, query23, query24, query39. e.g. query14_part1, query39_part2')
args = parser.parse_args()
query_dict = gen_sql_from_stream(args.query_stream_file)
run_query_stream(args.input_prefix,
args.property_file,
query_dict,
args.time_log,
args.extra_time_log,
args.sub_queries,
args.input_format,
not args.floats,
args.output_prefix,
args.output_format,
args.json_summary_folder,
args.delta_unmanaged,
args.keep_sc,
args.hive)
| spark-rapids-benchmarks-dev | nds/nds_power.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import json
import os
import time
import traceback
from typing import Callable
from pyspark.sql import SparkSession
import python_listener
class PysparkBenchReport:
"""Class to generate json summary report for a benchmark
"""
def __init__(self, spark_session: SparkSession) -> None:
self.spark_session = spark_session
self.summary = {
'env': {
'envVars': {},
'sparkConf': {},
'sparkVersion': None
},
'queryStatus': [],
'exceptions': [],
'startTime': None,
'queryTimes': [],
}
def report_on(self, fn: Callable, *args):
"""Record a function for its running environment, running status etc. and exclude sentive
information like tokens, secret and password Generate summary in dict format for it.
Args:
fn (Callable): a function to be recorded
Returns:
dict: summary of the fn
"""
spark_conf = dict(self.spark_session.sparkContext._conf.getAll())
env_vars = dict(os.environ)
redacted = ["TOKEN", "SECRET", "PASSWORD"]
filtered_env_vars = dict((k, env_vars[k]) for k in env_vars.keys() if not (k in redacted))
self.summary['env']['envVars'] = filtered_env_vars
self.summary['env']['sparkConf'] = spark_conf
self.summary['env']['sparkVersion'] = self.spark_session.version
listener = None
try:
listener = python_listener.PythonListener()
listener.register()
except TypeError as e:
print("Not found com.nvidia.spark.rapids.listener.Manager", str(e))
listener = None
if listener is not None:
print("TaskFailureListener is registered.")
try:
start_time = int(time.time() * 1000)
fn(*args)
end_time = int(time.time() * 1000)
if listener and len(listener.failures) != 0:
self.summary['queryStatus'].append("CompletedWithTaskFailures")
else:
self.summary['queryStatus'].append("Completed")
except Exception as e:
# print the exception to ease debugging
print('ERROR BEGIN')
print(e)
traceback.print_tb(e.__traceback__)
print('ERROR END')
end_time = int(time.time() * 1000)
self.summary['queryStatus'].append("Failed")
self.summary['exceptions'].append(str(e))
finally:
self.summary['startTime'] = start_time
self.summary['queryTimes'].append(end_time - start_time)
if listener is not None:
listener.unregister()
return self.summary
def write_summary(self, query_name, prefix=""):
"""_summary_
Args:
query_name (str): name of the query
prefix (str, optional): prefix for the output json summary file. Defaults to "".
"""
# Power BI side is retrieving some information from the summary file name, so keep this file
# name format for pipeline compatibility
self.summary['query'] = query_name
filename = prefix + '-' + query_name + '-' +str(self.summary['startTime']) + '.json'
self.summary['filename'] = filename
with open(filename, "w") as f:
json.dump(self.summary, f, indent=2)
| spark-rapids-benchmarks-dev | nds/PysparkBenchReport.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import glob
import json
import math
import os
import re
import time
from decimal import *
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.types import *
from pyspark.sql.functions import col
from nds_power import gen_sql_from_stream
def compare_results(spark_session: SparkSession,
input1: str,
input2: str,
input1_format: str,
input2_format: str,
ignore_ordering: bool,
is_q78: bool,
q78_problematic_col: int,
use_iterator=False,
max_errors=10,
epsilon=0.00001) -> bool :
"""Giving 2 paths of input query output data, compare them row by row, value by value to see if
the results match or not.
Args:
spark_session (SparkSession): Spark Session to hold the comparison
input1 (str): path for the first input data
input2 (str): path for the second input data
input1_format (str): data source format for input1, e.g. parquet, orc
input2_format (str): data source format for input2, e.g. parquet, orc
ignore_ordering (bool): whether ignoring the order of input data.
If true, we will order by ourselves.
is_q78 (bool): whether the query is query78.
q78_problematic_col: the column index that has problematic data. Only used for query78.
use_iterator (bool, optional): When set to true, use `toLocalIterator` to load one partition
at a time into driver memory, reducing memory usage at the cost of performance because
processing will be single-threaded. Defaults to False.
max_errors (int, optional): Maximum number of differences to report. Defaults to 10.
epsilon (float, optional): Allow for differences in precision when comparing floating point
values. Defaults to 0.00001.
Returns:
bool: True if result matches otherwise False
"""
df1 = spark_session.read.format(input1_format).load(input1)
df2 = spark_session.read.format(input2_format).load(input2)
count1 = df1.count()
count2 = df2.count()
if(count1 == count2):
#TODO: need partitioned collect for NDS? there's no partitioned output currently
result1 = collect_results(df1, ignore_ordering, use_iterator)
result2 = collect_results(df2, ignore_ordering, use_iterator)
errors = 0
i = 0
while i < count1 and errors < max_errors:
lhs = next(result1)
rhs = next(result2)
if not rowEqual(list(lhs), list(rhs), epsilon, is_q78, q78_problematic_col):
print(f"Row {i}: \n{list(lhs)}\n{list(rhs)}\n")
errors += 1
i += 1
print(f"Processed {i} rows")
if errors == max_errors:
print(f"Aborting comparison after reaching maximum of {max_errors} errors")
return False
elif errors == 0:
print("Results match")
return True
else:
print(f"There were {errors} errors")
return False
else:
print(f"DataFrame row counts do not match: {count1} != {count2}")
return False
def collect_results(df: DataFrame,
ignore_ordering: bool,
use_iterator: bool):
# apply sorting if specified
non_float_cols = [col(field.name) for \
field in df.schema.fields \
if (field.dataType.typeName() != FloatType.typeName()) \
and \
(field.dataType.typeName() != DoubleType.typeName())]
float_cols = [col(field.name) for \
field in df.schema.fields \
if (field.dataType.typeName() == FloatType.typeName()) \
or \
(field.dataType.typeName() == DoubleType.typeName())]
if ignore_ordering:
df = df.sort(non_float_cols + float_cols)
# TODO: do we still need this for NDS? Query outputs are usually 1 - 100 rows,
# there should'nt be memory pressure.
if use_iterator:
it = df.toLocalIterator()
else:
print("Collecting rows from DataFrame")
t1 = time.time()
rows = df.collect()
t2 = time.time()
print(f"Collected {len(rows)} rows in {t2-t1} seconds")
it = iter(rows)
return it
def check_nth_col_problematic_q78(q78_content: str) -> int:
"""parse the query78 content, return which column is the problematic one.
example content: https://github.com/NVIDIA/spark-rapids-benchmarks/issues/101#issuecomment-1217758683
parse logic:
1. find the content between the last "select" and "from" pair.
2. split the content by ", " or ",\n"
3. find the index of the string that contains "ratio"
4. return the index, if not found, raise exception
plus 1 to return to make it more intuitive for users to understand the column index starting from 1.
"""
last_between = q78_content.split("select")[-1].split("from")[0]
target_splits = re.split(', |,\n',last_between)
nth = -1
for index, string in enumerate(target_splits):
if 'ratio' in string:
nth = index
if nth == -1:
raise Exception(f"Cannot find the problematic column in the query78 content. Please check the content.\n{q78_content}")
return nth + 1
def rowEqual(row1, row2, epsilon, is_q78, q78_problematic_col):
# only simple types in a row for NDS results
if is_q78:
# TODO: make the special compare for q78 more common and make it apply to other queries that contain round function
# TODO: remove this special case after we resolve https://github.com/NVIDIA/spark-rapids/issues/1573
# see example error case: https://github.com/NVIDIA/spark-rapids-benchmarks/pull/7#issue-1247422850
# Pop the 2nd or 4th column value in q78, compare it alone.
# It is possible the problematic column are at different positions in different streams,
# see example and more details: https://github.com/NVIDIA/spark-rapids-benchmarks/issues/101#issuecomment-1217758683
if q78_problematic_col != 2 and q78_problematic_col != 4:
raise Exception(f"q78 problematic column should be 2nd or 4th, but get {q78_problematic_col}")
# remember to -1 to get the index in python list
problematic_val_row1 = row1.pop(q78_problematic_col-1)
problematic_val_row2 = row2.pop(q78_problematic_col-1)
problematic_val_eq = False
# this value could be none in some rows
if all([problematic_val_row1, problematic_val_row2]):
# this value is rounded to its pencentile: round(ss_qty/(coalesce(ws_qty,0)+coalesce(cs_qty,0)),2)
# so we allow the diff <= 0.01 + default epsilon 0.00001
problematic_val_eq = abs(problematic_val_row1 - problematic_val_row2) <= 0.01001
elif problematic_val_row1 == None and problematic_val_row2 == None:
problematic_val_eq = True
else:
problematic_val_eq = False
return problematic_val_eq and all([compare(lhs, rhs, epsilon) for lhs, rhs in zip(row1, row2)])
else:
return all([compare(lhs, rhs, epsilon) for lhs, rhs in zip(row1, row2)])
def compare(expected, actual, epsilon=0.00001):
#TODO 1: we can optimize this with case-match after Python 3.10
#TODO 2: we can support complex data types like nested type if needed in the future.
# now NDS only contains simple data types.
if isinstance(expected, float) and isinstance(actual, float):
# Double is converted to float in pyspark...
if math.isnan(expected) and math.isnan(actual):
return True
else:
return math.isclose(expected, actual, rel_tol=epsilon)
elif isinstance(expected, str) and isinstance(actual, str):
return expected == actual
elif expected == None and actual == None:
return True
elif expected != None and actual == None:
return False
elif expected == None and actual != None:
return False
elif isinstance(expected, Decimal) and isinstance(actual, Decimal):
return math.isclose(expected, actual, rel_tol=epsilon)
else:
return expected == actual
def iterate_queries(spark_session: SparkSession,
input1: str,
input2: str,
input1_format: str,
input2_format: str,
ignore_ordering: bool,
query_dict: dict,
use_iterator=False,
max_errors=10,
epsilon=0.00001,
is_float=False):
# Iterate each query folder for a Power Run output
# Providing a list instead of hard-coding all NDS queires is to satisfy the arbitary queries run.
unmatch_queries = []
for query_name in query_dict.keys():
if query_name == 'query65':
# query65 is skipped due to: https://github.com/NVIDIA/spark-rapids-benchmarks/pull/7#issuecomment-1147077894
continue
if query_name == 'query67' and is_float:
# query67 is skipped due to: https://github.com/NVIDIA/spark-rapids-benchmarks/pull/7#issuecomment-1156214630
continue
sub_input1 = input1 + '/' + query_name
sub_input2 = input2 + '/' + query_name
print(f"=== Comparing Query: {query_name} ===")
# default it to 2, which is the 2nd column in the query78
problematic_col = 2
if query_name == 'query78':
problematic_col = check_nth_col_problematic_q78(query_dict[query_name])
result_equal = compare_results(spark_session,
sub_input1,
sub_input2,
input1_format,
input2_format,
ignore_ordering,
query_name == 'query78',
q78_problematic_col=problematic_col,
use_iterator=use_iterator,
max_errors=max_errors,
epsilon=epsilon)
if result_equal == False:
unmatch_queries.append(query_name)
if len(unmatch_queries) != 0:
print(f"=== Unmatch Queries: {unmatch_queries} ===")
return unmatch_queries
def update_summary(prefix, unmatch_queries):
"""set the queryValidationStatus field in json summary file.
If the queryStatus is 'Completed' or 'CompletedWithTaskFailures' but validation failed,
set to 'Fail'.
If the queryStatus is 'Completed' or 'CompletedWithTaskFailures' and validation passed,
set to 'Pass'.
If the queryStatus is 'Failed',
set to 'NotAttempted'.
Args:
prefix (str): folder of the json summary files
unmatch_queries ([str]): list of queries that failed validation
"""
if not os.path.exists(prefix):
raise Exception("The json summary folder doesn't exist.")
print(f"Updating queryValidationStatus in folder {prefix}.")
for query_name in query_dict.keys():
summary_wildcard = prefix + f'/*{query_name}-*.json'
file_glob = glob.glob(summary_wildcard)
if len(file_glob) > 1:
raise Exception(f"More than one summary file found for query {query_name} in folder {prefix}.")
if len(file_glob) == 0:
raise Exception(f"No summary file found for query {query_name} in folder {prefix}.")
for filename in file_glob:
with open(filename, 'r') as f:
summary = json.load(f)
if query_name in unmatch_queries:
if 'Completed' in summary['queryStatus'] or 'CompletedWithTaskFailures' in summary['queryStatus']:
summary['queryValidationStatus'] = ['Fail']
else:
summary['queryValidationStatus'] = ['NotAttempted']
else:
summary['queryValidationStatus'] = ['Pass']
with open(filename, 'w') as f:
json.dump(summary, f, indent=2)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument('input1',
help='path of the first input data.')
parser.add_argument('input2',
help='path of the second input data.')
parser.add_argument('query_stream_file',
help='query stream file that contains NDS queries in specific order.')
parser.add_argument('--input1_format',
default='parquet',
help='data source type for the first input data. e.g. parquet, orc. Default is: parquet.')
parser.add_argument('--input2_format',
default='parquet',
help='data source type for the second input data. e.g. parquet, orc. Default is: parquet.')
parser.add_argument('--max_errors',
help='Maximum number of differences to report.',
type=int,
default=10)
parser.add_argument('--epsilon',
type=float,
default=0.00001,
help='Allow for differences in precision when comparing floating point values.' +
' Given 2 float numbers: 0.000001 and 0.000000, the diff of them is 0.000001' +
' which is less than 0.00001, so we regard this as acceptable and will not' +
' report a mismatch.')
parser.add_argument('--ignore_ordering',
action='store_true',
help='Sort the data collected from the DataFrames before comparing them.')
parser.add_argument('--use_iterator',
action='store_true',
help='When set, use `toLocalIterator` to load one partition at a' +
' time into driver memory, reducing memory usage at the cost of performance' +
' because processing will be single-threaded.')
parser.add_argument('--floats',
action='store_true',
help='whether the input data contains float data or decimal data. There\'re' +
' some known mismatch issues due to float point, we will do some special' +
' checks when the input data is float for some queries.')
parser.add_argument('--json_summary_folder',
help='path of a folder that contains json summary file for each query.')
args = parser.parse_args()
query_dict = gen_sql_from_stream(args.query_stream_file)
session_builder = SparkSession.builder.appName("Validate Query Output").getOrCreate()
unmatch_queries = iterate_queries(session_builder,
args.input1,
args.input2,
args.input1_format,
args.input2_format,
args.ignore_ordering,
query_dict,
use_iterator=args.use_iterator,
max_errors=args.max_errors,
epsilon=args.epsilon,
is_float=args.floats)
if args.json_summary_folder:
update_summary(args.json_summary_folder, unmatch_queries)
| spark-rapids-benchmarks-dev | nds/nds_validate.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import csv
from datetime import datetime
import os
from pyspark.sql import SparkSession
from PysparkBenchReport import PysparkBenchReport
from check import check_json_summary_folder, get_abs_path
from nds_schema import get_maintenance_schemas
from nds_power import register_delta_tables
INSERT_FUNCS = [
'LF_CR',
'LF_CS',
'LF_I',
'LF_SR',
'LF_SS',
'LF_WR',
'LF_WS']
DELETE_FUNCS = [
'DF_CS',
'DF_SS',
'DF_WS']
INVENTORY_DELETE_FUNC = ['DF_I']
DM_FUNCS = INSERT_FUNCS + DELETE_FUNCS + INVENTORY_DELETE_FUNC
def get_delete_date(spark_session):
"""get delete dates for Data Maintenance. Each delete functions requires 3 tuples: (date1, date2)
Args:
spark_session (SparkSession): Spark session
Returns:
delete_dates_dict ({str: list[(date1, date2)]}): a dict contains date tuples for each delete functions
"""
delete_dates = spark_session.sql("select * from delete").collect()
inventory_delete_dates = spark_session.sql("select * from inventory_delete").collect()
date_dict = {}
date_dict['delete'] = [(row['date1'], row['date2']) for row in delete_dates]
date_dict['inventory_delete'] = [(row['date1'], row['date2']) for row in inventory_delete_dates]
return date_dict
def replace_date(query_list, date_tuple_list):
"""Replace the date keywords in DELETE queries. 3 date tuples will be applied to the delete query.
Args:
query_list ([str]): delete query list
date_tuple_list ([(str, str)]): actual delete date
"""
q_updated = []
for date_tuple in date_tuple_list:
date1 = datetime.strptime(date_tuple[0], "%Y-%m-%d")
date2 = datetime.strptime(date_tuple[1], "%Y-%m-%d")
if date1 > date2:
earlier = date_tuple[1]
later = date_tuple[0]
else:
earlier = date_tuple[0]
later = date_tuple[1]
for c in query_list:
c = c.replace("DATE1", earlier)
c = c.replace("DATE2", later)
q_updated.append(c)
return q_updated
def get_valid_query_names(spec_queries):
global DM_FUNCS
if spec_queries:
for q in spec_queries:
if q not in DM_FUNCS:
raise Exception(f"invalid Data Maintenance query: {q}. Valid are: {DM_FUNCS}")
DM_FUNCS = spec_queries
return DM_FUNCS
def create_spark_session(valid_queries, warehouse_path, warehouse_type):
if len(valid_queries) == 1:
app_name = "NDS - Data Maintenance - " + valid_queries[0]
else:
app_name = "NDS - Data Maintenance"
spark_session_builder = SparkSession.builder
if warehouse_type == "iceberg":
spark_session_builder.config("spark.sql.catalog.spark_catalog.warehouse", warehouse_path)
spark_session = spark_session_builder.appName(app_name).getOrCreate()
return spark_session
def get_maintenance_queries(spark_session, folder, valid_queries):
"""get query content from DM query files
Args:
folder (str): folder to Data Maintenance query files
spec_queries (list[str]): specific target Data Maintenance queries
Returns:
dict{str: list[str]}: a dict contains Data Maintenance query name and its content.
"""
delete_date_dict = get_delete_date(spark_session)
folder_abs_path = get_abs_path(folder)
q_dict = {}
for q in valid_queries:
with open(folder_abs_path + '/' + q + '.sql', 'r') as f:
# file content e.g.
# " LICENSE CONTENT ... ;"
# " CREATE view ..... ; INSERT into .... ;"
# " DELETE from ..... ; DELETE FROM .... ;"
q_content = [ c + ';' for c in f.read().split(';')[1:-1]]
if q in DELETE_FUNCS:
# There're 3 date tuples to be replace for one DELETE function
# according to TPC-DS Spec 5.3.11
q_content = replace_date(q_content, delete_date_dict['delete'])
if q in INVENTORY_DELETE_FUNC:
q_content = replace_date(q_content, delete_date_dict['inventory_delete'])
q_dict[q] = q_content
return q_dict
def run_subquery_for_delta(spark_session, delete_query):
"""DeltaLake doesn't support DELETE with subquery, so run the subquery at first as workaround.
return: a query that can be run on Delta Lake after subquery replacement.
See issue: https://github.com/delta-io/delta/issues/730
Note this method is very tricky and is totally based on the query content itself.
TODO: remove this method when the issue above is resolved.
"""
# first strip out the license part
delete_query = delete_query.split('--')[-1]
if not "min" in delete_query:
# e.g. "delete ... in (select ...);"
subquery_start_pos = delete_query.find("(") + 1
subquery_end_pos = delete_query.find(")")
if subquery_start_pos == -1 or subquery_end_pos == -1:
raise Exception("invalid delete query")
subquery = delete_query[subquery_start_pos:subquery_end_pos]
subquery_df = spark_session.sql(subquery)
# only 1 column, so retrive directly at index 0
col_name = subquery_df.schema.fields[0].name
subquery_result = subquery_df.collect()
# form the string then drop "[" and "]"
subquery_result = str([row[col_name] for row in subquery_result])[1:-1]
final_query = delete_query.replace(subquery, subquery_result)
return final_query
else:
# e.g. "delete ... (select min(d_date_sk) ... )... and ... ( select max(d_date_sk) ... );"
# subquery_1 is between first "(" and second ")"
# subquery_2 is only different from subquery_1 in the "min" and "max" keyword.
subquery_start_pos1 = delete_query.find("(") + 1
first_right_parenthesis = delete_query.find(")")
subquery_end_pos1 = delete_query.find(")", first_right_parenthesis + 1)
subquery_1 = delete_query[subquery_start_pos1:subquery_end_pos1]
subquery_2 = subquery_1.replace("min", "max")
# result only 1 row.
subquery_1_result = str(spark_session.sql(subquery_1).collect()[0][0])
subquery_2_result = str(spark_session.sql(subquery_2).collect()[0][0])
final_query = delete_query.replace(
subquery_1, subquery_1_result).replace(
subquery_2, subquery_2_result)
return final_query
def run_dm_query(spark, query_list, query_name, warehouse_type):
"""Run data maintenance query.
For delete queries, they can run on Spark 3.2.2 but not Spark 3.2.1
See: https://issues.apache.org/jira/browse/SPARK-39454
See: data_maintenance/DF_*.sql for delete query details.
See data_maintenance/LF_*.sql for insert query details.
Args:
spark (SparkSession): SparkSession instance.
query_list ([str]): INSERT query list.
"""
for q in query_list:
if query_name in DELETE_FUNCS + INVENTORY_DELETE_FUNC and warehouse_type == "delta":
q = run_subquery_for_delta(spark, q)
spark.sql(q)
def run_query(spark_session,
query_dict,
time_log_output_path,
json_summary_folder,
property_file,
warehouse_path,
warehouse_type,
keep_sc,
delta_unmanaged=False):
# TODO: Duplicate code in nds_power.py. Refactor this part, make it general.
execution_time_list = []
check_json_summary_folder(json_summary_folder)
# Run query
total_time_start = datetime.now()
spark_app_id = spark_session.sparkContext.applicationId
DM_start = datetime.now()
if warehouse_type == 'delta' and delta_unmanaged:
execution_time_list = register_delta_tables(spark_session, warehouse_path, execution_time_list)
for query_name, q_content in query_dict.items():
# show query name in Spark web UI
spark_session.sparkContext.setJobGroup(query_name, query_name)
print(f"====== Run {query_name} ======")
q_report = PysparkBenchReport(spark_session)
summary = q_report.report_on(run_dm_query, spark_session,
q_content,
query_name,
warehouse_type)
print(f"Time taken: {summary['queryTimes']} millis for {query_name}")
execution_time_list.append((spark_app_id, query_name, summary['queryTimes']))
if json_summary_folder:
# property_file e.g.: "property/aqe-on.properties" or just "aqe-off.properties"
if property_file:
summary_prefix = os.path.join(
json_summary_folder, os.path.basename(property_file).split('.')[0])
else:
summary_prefix = os.path.join(json_summary_folder, '')
q_report.write_summary(query_name, prefix=summary_prefix)
if not keep_sc:
spark_session.sparkContext.stop()
DM_end = datetime.now()
DM_elapse = (DM_end - DM_start).total_seconds()
total_elapse = (DM_end - total_time_start).total_seconds()
print(f"====== Data Maintenance Start Time: {DM_start}")
print(f"====== Data Maintenance Time: {DM_elapse} s ======")
print(f"====== Total Time: {total_elapse} s ======")
execution_time_list.append(
(spark_app_id, "Data Maintenance Start Time", DM_start)
)
execution_time_list.append(
(spark_app_id, "Data Maintenance End Time", DM_end)
)
execution_time_list.append(
(spark_app_id, "Data Maintenance Time", DM_elapse))
execution_time_list.append(
(spark_app_id, "Total Time", total_elapse))
# write to local csv file
header = ["application_id", "query", "time/s"]
with open(time_log_output_path, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(execution_time_list)
def register_temp_views(spark_session, refresh_data_path):
refresh_tables = get_maintenance_schemas(True)
for table, schema in refresh_tables.items():
spark_session.read.option("delimiter", '|').option(
"header", "false").csv(refresh_data_path + '/' + table, schema=schema).createOrReplaceTempView(table)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument('warehouse_path',
help='warehouse path for Data Maintenance test.')
parser.add_argument('refresh_data_path',
help='path to refresh data')
parser.add_argument('maintenance_queries_folder',
help='folder contains all NDS Data Maintenance queries. If ' +
'"--maintenance_queries" is not set, all queries under the folder will be' +
'executed.')
parser.add_argument('time_log',
help='path to execution time log, only support local path.',
default="")
parser.add_argument('--maintenance_queries',
type=lambda s: s.split(','),
help='specify Data Maintenance query names by a comma seprated string.' +
' e.g. "LF_CR,LF_CS"')
parser.add_argument('--property_file',
help='property file for Spark configuration.')
parser.add_argument('--json_summary_folder',
help='Empty folder/path (will create if not exist) to save JSON summary file for each query.')
parser.add_argument('--warehouse_type',
help='Type of the warehouse used for Data Maintenance test.',
choices=['iceberg', 'delta'],
default='iceberg')
parser.add_argument('--keep_sc',
action='store_true',
help='Keep SparkContext alive after running all queries. This is a ' +
'limitation on Databricks runtime environment. User should always attach ' +
'this flag when running on Databricks.')
parser.add_argument('--delta_unmanaged',
action='store_true',
help='Use unmanaged tables for DeltaLake. This is useful for testing DeltaLake without ' +
' leveraging a Metastore service.')
args = parser.parse_args()
valid_queries = get_valid_query_names(args.maintenance_queries)
spark_session = create_spark_session(valid_queries, args.warehouse_path, args.warehouse_type)
register_temp_views(spark_session, args.refresh_data_path)
query_dict = get_maintenance_queries(spark_session,
args.maintenance_queries_folder,
valid_queries)
run_query(spark_session, query_dict, args.time_log, args.json_summary_folder,
args.property_file, args.warehouse_path, args.warehouse_type, args.keep_sc,
args.delta_unmanaged)
| spark-rapids-benchmarks-dev | nds/nds_maintenance.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import os
import shutil
import subprocess
from check import check_build, check_version, get_abs_path, get_dir_size, parallel_value_type, valid_range
check_version()
source_table_names = [
'call_center',
'catalog_page',
'catalog_returns',
'catalog_sales',
'customer',
'customer_address',
'customer_demographics',
'date_dim',
'dbgen_version',
'household_demographics',
'income_band',
'inventory',
'item',
'promotion',
'reason',
'ship_mode',
'store',
'store_returns',
'store_sales',
'time_dim',
'warehouse',
'web_page',
'web_returns',
'web_sales',
'web_site',
]
maintenance_table_names = [
's_catalog_order',
's_catalog_order_lineitem',
's_catalog_returns',
's_inventory',
's_purchase',
's_purchase_lineitem',
's_store_returns',
's_web_order',
's_web_order_lineitem',
's_web_returns',
'delete',
'inventory_delete'
]
def clean_temp_data(temp_data_path):
cmd = ['hadoop', 'fs', '-rm', '-r', '-skipTrash', temp_data_path]
print(" ".join(cmd))
subprocess.run(cmd)
def merge_temp_tables(temp_data_path, parent_data_path, update):
"""Helper functions for incremental data generation. Move data in temporary child range path to
parent directory.
Args:
temp_data_path (str): temorary child range data path
parent_data_path (str): parent data path
"""
if update:
table_names = maintenance_table_names
else:
table_names = source_table_names
for table_name in table_names:
# manually create table sub-folders
# redundant step if it's not the first range part.
cmd = ['hadoop', 'fs', '-mkdir', parent_data_path + '/' + table_name]
print(" ".join(cmd))
subprocess.run(cmd)
# move temp content to upper folder
# note not all tables are generated in different child range step
# please ignore messages like "mv: `.../reason/*': No such file or directory"
temp_table_data_path = temp_data_path + '/' + table_name + '/*'
cmd = ['hadoop', 'fs', '-mv', temp_table_data_path,
parent_data_path + '/' + table_name + '/']
print(" ".join(cmd))
subprocess.run(cmd)
clean_temp_data(temp_data_path)
def move_delete_date_tables(base_path, update):
# delete date table are special, move them separately
# with --update 2, it'll generate the files named like delete_2.dat-m-00000, delete_2.dat-m-00001...
# the number of files is decided by the parallel value, and they all have same content
# So we just copy the first one
for delete_table in ['delete', 'inventory_delete']:
mkdir = ['hadoop', 'fs', '-mkdir', '-p', base_path + '/' + delete_table]
move = ['hadoop', 'fs', '-mv', base_path + '/' + delete_table + f'_{update}.dat-m-00000', base_path + '/' + delete_table + '/']
subprocess.run(mkdir, check=True)
subprocess.run(move, check=True)
def generate_data_hdfs(args, jar_path):
"""generate data to hdfs using TPC-DS dsdgen tool. Support incremental generation: due to the
limit of hdfs, each range data will be generated under a temporary folder then move to target
folder.
Args:
args (Namespace): Namespace from argparser
jar_path (str): path to the target jar
Raises:
Exception: if Hadoop binary is not installed.
"""
# Check if hadoop is installed.
if shutil.which('hadoop') is None:
raise Exception('No Hadoop binary found in current environment, ' +
'please install Hadoop for data generation in cluster.')
# Submit hadoop MR job to generate data
cmd = ['hadoop', 'jar', str(jar_path)]
if args.replication:
cmd += ["-D", f"dfs.replication={args.replication}"]
cmd += ['-p', args.parallel, '-s', args.scale]
# get dsdgen.jar path, assume user won't change file structure
tpcds_gen_path = jar_path.parent.parent.absolute()
if args.overwrite_output:
cmd += ['-o']
if args.update:
cmd += ["-u", args.update]
if args.range:
# use a temp folder to save the specific range data.
# will move the content to parent folder afterwards.
# it's a workaround for "Output directory ... already exists" in incremental generation
temp_data_path = args.data_dir + '/_temp_'
# before generation, we remove "_temp_" folders in case they contain garbage generated by
# previous user runs.
clean_temp_data(temp_data_path)
cmd.extend(["-r", args.range])
cmd.extend(["-d", temp_data_path])
try:
subprocess.run(cmd, check=True, cwd=str(tpcds_gen_path))
# only move delete table for data maintenance
if args.update:
move_delete_date_tables(temp_data_path, args.update)
merge_temp_tables(temp_data_path, args.data_dir, args.update)
finally:
clean_temp_data(temp_data_path)
else:
cmd.extend(["-d", args.data_dir])
subprocess.run(cmd, check=True, cwd=str(tpcds_gen_path))
# only move delete table for data maintenance
if args.update:
move_delete_date_tables(args.data_dir, args.update)
def generate_data_local(args, range_start, range_end, tool_path):
"""Generate data to local file system. TPC-DS tool will generate all table data under target
folder without creating sub-folders for each table. So we add extra code to create sub folder
for each table and move data there respectively.
Args:
args (Namepace): Namespace from argparser
range_start (int): start index of the data portion to be generated
range_end (int): end index of the data portion tobe generated
tool_path (str): path to the dsdgen tool
Raises:
Exception: if data already exists and overwrite_output is not honored
Exception: dsdgen failed
"""
data_dir = get_abs_path(args.data_dir)
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
else:
# Verify if there's already data in this path
if get_dir_size(data_dir) > 0 and not args.overwrite_output:
raise Exception(
"There's already been data exists in directory {}.".format(data_dir) +
" Use '--overwrite_output' to overwrite.")
# working directory for dsdgen
work_dir = tool_path.parent
procs = []
for i in range(range_start, range_end + 1):
dsdgen_args = ["-scale", args.scale,
"-dir", data_dir,
"-parallel", args.parallel,
"-child", str(i),
"-verbose", "Y"]
if args.overwrite_output:
dsdgen_args += ["-force", "Y"]
if args.update:
dsdgen_args += ["-update", args.update]
procs.append(subprocess.Popen(
["./dsdgen"] + dsdgen_args, cwd=str(work_dir)))
# wait for data generation to complete
for p in procs:
p.wait()
if p.returncode != 0:
print("dsdgen failed with return code {}".format(p.returncode))
raise Exception("dsdgen failed")
# move multi-partition files into table folders
if args.update:
table_names = maintenance_table_names
else:
table_names = source_table_names
for table in table_names:
print('mkdir -p {}/{}'.format(data_dir, table))
subprocess.run(['mkdir', '-p', data_dir + '/' + table])
for i in range(range_start, range_end + 1):
subprocess.run(['mv', f'{data_dir}/{table}_{i}_{args.parallel}.dat',
f'{data_dir}/{table}/'], stderr=subprocess.DEVNULL)
# delete date file has no parallel number suffix in the file name, move separately
subprocess.run(['mv', f'{data_dir}/{table}_1.dat',
f'{data_dir}/{table}/'], stderr=subprocess.DEVNULL)
# show summary
subprocess.run(['du', '-h', '-d1', data_dir])
def generate_data(args):
jar_path, tool_path = check_build()
range_start = 1
range_end = int(args.parallel)
if args.range:
range_start, range_end = valid_range(args.range, args.parallel)
if args.type == 'hdfs':
generate_data_hdfs(args, jar_path)
if args.type == 'local':
generate_data_local(args, range_start, range_end, tool_path)
if __name__ == "__main__":
parser = parser = argparse.ArgumentParser()
parser.add_argument("type",
choices=["local", "hdfs"],
help="file system to save the generated data.")
parser.add_argument("scale",
help="volume of data to generate in GB."
)
parser.add_argument("parallel",
type=parallel_value_type,
help="build data in <parallel_value> separate chunks"
)
parser.add_argument("data_dir",
help="generate data in directory.")
parser.add_argument('--range',
help='Used for incremental data generation, meaning which part of child' +
'chunks are generated in one run. Format: "start,end", both are inclusive. ' +
'e.g. "1,100". Note: the child range must be within the "parallel", ' +
'"--parallel 100 --range 100,200" is illegal.')
parser.add_argument("--overwrite_output",
action="store_true",
help="overwrite if there has already existing data in the path provided.")
parser.add_argument("--replication",
help="the number of replication factor when generating data to HDFS. " +
"if not set, the Hadoop job will use the setting in the Hadoop cluster.")
parser.add_argument("--update",
help="generate update dataset <n>. <n> is identical to the number of " +
"streams used in the Throughput Tests of the benchmark")
args = parser.parse_args()
generate_data(args)
| spark-rapids-benchmarks-dev | nds/nds_gen_data.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import pyspark
from pyspark.sql.types import *
if not hasattr(pyspark.sql.types, "VarcharType"):
# this is a version of Spark that doesn't have fixed- and max-length string types
setattr(pyspark.sql.types, "VarcharType", lambda x: StringType())
setattr(pyspark.sql.types, "CharType", lambda x: StringType())
from pyspark.sql.types import VarcharType, CharType
def decimalType(use_decimal, precision, scale):
if use_decimal:
return DecimalType(precision, scale)
else:
return DoubleType()
def get_schemas(use_decimal):
"""get the schemas of all tables. If use_decimal is True, DecimalType are applied, otherwide,
DoubleType will be used for DecimalType.
Args:
use_decimal (bool): use decimal or not
Returns:
dict: {table_name: schema}
"""
SCHEMAS = {}
# The specification states that "Identifier means that the column shall be able to hold any
# key value generated for that column". Some tables have more rows than others so we can
# choose to use different types per table.
identifier_int = IntegerType()
identifier_long = LongType()
SCHEMAS["customer_address"] = StructType([
StructField("ca_address_sk", identifier_int, nullable=False),
StructField("ca_address_id", CharType(16), nullable=False),
StructField("ca_street_number", CharType(10)),
StructField("ca_street_name", VarcharType(60)),
StructField("ca_street_type", CharType(15)),
StructField("ca_suite_number", CharType(10)),
StructField("ca_city", VarcharType(60)),
StructField("ca_county", VarcharType(30)),
StructField("ca_state", CharType(2)),
StructField("ca_zip", CharType(10)),
StructField("ca_country", VarcharType(20)),
StructField("ca_gmt_offset", decimalType(use_decimal, 5, 2)),
StructField("ca_location_type", CharType(20))
])
SCHEMAS["customer_demographics"] = StructType([
StructField("cd_demo_sk", identifier_int, nullable=False),
StructField("cd_gender", CharType(1)),
StructField("cd_marital_status", CharType(1)),
StructField("cd_education_status", CharType(20)),
StructField("cd_purchase_estimate", LongType()),
StructField("cd_credit_rating", CharType(10)),
StructField("cd_dep_count", LongType()),
StructField("cd_dep_employed_count", LongType()),
StructField("cd_dep_college_count", LongType())
])
SCHEMAS["date_dim"] = StructType([
StructField("d_date_sk", identifier_int, nullable=False),
StructField("d_date_id", CharType(16), nullable=False),
StructField("d_date", DateType()),
StructField("d_month_seq", LongType()),
StructField("d_week_seq", LongType()),
StructField("d_quarter_seq", LongType()),
StructField("d_year", LongType()),
StructField("d_dow", LongType()),
StructField("d_moy", LongType()),
StructField("d_dom", LongType()),
StructField("d_qoy", LongType()),
StructField("d_fy_year", LongType()),
StructField("d_fy_quarter_seq", LongType()),
StructField("d_fy_week_seq", LongType()),
StructField("d_day_name", CharType(9)),
StructField("d_quarter_name", CharType(6)),
StructField("d_holiday", CharType(1)),
StructField("d_weekend", CharType(1)),
StructField("d_following_holiday", CharType(1)),
StructField("d_first_dom", LongType()),
StructField("d_last_dom", LongType()),
StructField("d_same_day_ly", LongType()),
StructField("d_same_day_lq", LongType()),
StructField("d_current_day", CharType(1)),
StructField("d_current_week", CharType(1)),
StructField("d_current_month", CharType(1)),
StructField("d_current_quarter", CharType(1)),
StructField("d_current_year", CharType(1))
])
SCHEMAS["warehouse"] = StructType([
StructField("w_warehouse_sk", identifier_int, nullable=False),
StructField("w_warehouse_id", CharType(16), nullable=False),
StructField("w_warehouse_name", VarcharType(20)),
StructField("w_warehouse_sq_ft", LongType()),
StructField("w_street_number", CharType(10)),
StructField("w_street_name", VarcharType(60)),
StructField("w_street_type", CharType(15)),
StructField("w_suite_number", CharType(10)),
StructField("w_city", VarcharType(60)),
StructField("w_county", VarcharType(30)),
StructField("w_state", CharType(2)),
StructField("w_zip", CharType(10)),
StructField("w_country", VarcharType(20)),
StructField("w_gmt_offset", decimalType(use_decimal, 5, 2))
])
SCHEMAS["ship_mode"] = StructType([
StructField("sm_ship_mode_sk", identifier_int, nullable=False),
StructField("sm_ship_mode_id", CharType(16), nullable=False),
StructField("sm_type", CharType(30)),
StructField("sm_code", CharType(10)),
StructField("sm_carrier", CharType(20)),
StructField("sm_contract", CharType(20))
])
SCHEMAS["time_dim"] = StructType([
StructField("t_time_sk", identifier_int, nullable=False),
StructField("t_time_id", CharType(16), nullable=False),
StructField("t_time", LongType(), nullable=False),
StructField("t_hour", LongType()),
StructField("t_minute", LongType()),
StructField("t_second", LongType()),
StructField("t_am_pm", CharType(2)),
StructField("t_shift", CharType(20)),
StructField("t_sub_shift", CharType(20)),
StructField("t_meal_time", CharType(20))
])
SCHEMAS["reason"] = StructType([
StructField("r_reason_sk", identifier_int, nullable=False),
StructField("r_reason_id", CharType(16), nullable=False),
StructField("r_reason_desc", CharType(100))
])
SCHEMAS["income_band"] = StructType([
StructField("ib_income_band_sk", identifier_int, nullable=False),
StructField("ib_lower_bound", LongType()),
StructField("ib_upper_bound", LongType())
])
SCHEMAS["item"] = StructType([
StructField("i_item_sk", identifier_int, nullable=False),
StructField("i_item_id", CharType(16), nullable=False),
StructField("i_rec_start_date", DateType()),
StructField("i_rec_end_date", DateType()),
StructField("i_item_desc", VarcharType(200)),
StructField("i_current_price", decimalType(use_decimal, 7, 2)),
StructField("i_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("i_brand_id", LongType()),
StructField("i_brand", CharType(50)),
StructField("i_class_id", LongType()),
StructField("i_class", CharType(50)),
StructField("i_category_id", LongType()),
StructField("i_category", CharType(50)),
StructField("i_manufact_id", LongType()),
StructField("i_manufact", CharType(50)),
StructField("i_size", CharType(20)),
StructField("i_formulation", CharType(20)),
StructField("i_color", CharType(20)),
StructField("i_units", CharType(10)),
StructField("i_container", CharType(10)),
StructField("i_manager_id", LongType()),
StructField("i_product_name", CharType(50))
])
SCHEMAS["store"] = StructType([
StructField("s_store_sk", identifier_int, nullable=False),
StructField("s_store_id", CharType(16), nullable=False),
StructField("s_rec_start_date", DateType()),
StructField("s_rec_end_date", DateType()),
StructField("s_closed_date_sk", identifier_int),
StructField("s_store_name", VarcharType(50)),
StructField("s_number_employees", LongType()),
StructField("s_floor_space", LongType()),
StructField("s_hours", CharType(20)),
StructField("s_manager", VarcharType(40)),
StructField("s_market_id", LongType()),
StructField("s_geography_class", VarcharType(100)),
StructField("s_market_desc", VarcharType(100)),
StructField("s_market_manager", VarcharType(40)),
StructField("s_division_id", LongType()),
StructField("s_division_name", VarcharType(50)),
StructField("s_company_id", LongType()),
StructField("s_company_name", VarcharType(50)),
StructField("s_street_number", VarcharType(10)),
StructField("s_street_name", VarcharType(60)),
StructField("s_street_type", CharType(15)),
StructField("s_suite_number", CharType(10)),
StructField("s_city", VarcharType(60)),
StructField("s_county", VarcharType(30)),
StructField("s_state", CharType(2)),
StructField("s_zip", CharType(10)),
StructField("s_country", VarcharType(20)),
StructField("s_gmt_offset", decimalType(use_decimal, 5, 2)),
StructField("s_tax_precentage", decimalType(use_decimal, 5, 2))
])
SCHEMAS["call_center"] = StructType([
StructField("cc_call_center_sk", identifier_int, nullable=False),
StructField("cc_call_center_id", CharType(16), nullable=False),
StructField("cc_rec_start_date", DateType()),
StructField("cc_rec_end_date", DateType()),
StructField("cc_closed_date_sk", identifier_int),
StructField("cc_open_date_sk", identifier_int),
StructField("cc_name", VarcharType(50)),
StructField("cc_class", VarcharType(50)),
StructField("cc_employees", LongType()),
StructField("cc_sq_ft", LongType()),
StructField("cc_hours", CharType(20)),
StructField("cc_manager", VarcharType(40)),
StructField("cc_mkt_id", LongType()),
StructField("cc_mkt_class", CharType(50)),
StructField("cc_mkt_desc", VarcharType(100)),
StructField("cc_market_manager", VarcharType(40)),
StructField("cc_division", LongType()),
StructField("cc_division_name", VarcharType(50)),
StructField("cc_company", LongType()),
StructField("cc_company_name", CharType(50)),
StructField("cc_street_number", CharType(10)),
StructField("cc_street_name", VarcharType(60)),
StructField("cc_street_type", CharType(15)),
StructField("cc_suite_number", CharType(10)),
StructField("cc_city", VarcharType(60)),
StructField("cc_county", VarcharType(30)),
StructField("cc_state", CharType(2)),
StructField("cc_zip", CharType(10)),
StructField("cc_country", VarcharType(20)),
StructField("cc_gmt_offset", decimalType(use_decimal, 5, 2)),
StructField("cc_tax_percentage", decimalType(use_decimal, 5, 2))
])
SCHEMAS["customer"] = StructType([
StructField("c_customer_sk", identifier_int, nullable=False),
StructField("c_customer_id", CharType(16), nullable=False),
StructField("c_current_cdemo_sk", identifier_int),
StructField("c_current_hdemo_sk", identifier_int),
StructField("c_current_addr_sk", identifier_int),
StructField("c_first_shipto_date_sk", identifier_int),
StructField("c_first_sales_date_sk", identifier_int),
StructField("c_salutation", CharType(10)),
StructField("c_first_name", CharType(20)),
StructField("c_last_name", CharType(30)),
StructField("c_preferred_cust_flag", CharType(1)),
StructField("c_birth_day", LongType()),
StructField("c_birth_month", LongType()),
StructField("c_birth_year", LongType()),
StructField("c_birth_country", VarcharType(20)),
StructField("c_login", CharType(13)),
StructField("c_email_address", CharType(50)),
StructField("c_last_review_date_sk", identifier_int)
])
SCHEMAS["web_site"] = StructType([
StructField("web_site_sk", identifier_int, nullable=False),
StructField("web_site_id", CharType(16), nullable=False),
StructField("web_rec_start_date", DateType()),
StructField("web_rec_end_date", DateType()),
StructField("web_name", VarcharType(50)),
StructField("web_open_date_sk", identifier_int),
StructField("web_close_date_sk", identifier_int),
StructField("web_class", VarcharType(50)),
StructField("web_manager", VarcharType(40)),
StructField("web_mkt_id", LongType()),
StructField("web_mkt_class", VarcharType(50)),
StructField("web_mkt_desc", VarcharType(100)),
StructField("web_market_manager", VarcharType(40)),
StructField("web_company_id", LongType()),
StructField("web_company_name", CharType(50)),
StructField("web_street_number", CharType(10)),
StructField("web_street_name", VarcharType(60)),
StructField("web_street_type", CharType(15)),
StructField("web_suite_number", CharType(10)),
StructField("web_city", VarcharType(60)),
StructField("web_county", VarcharType(30)),
StructField("web_state", CharType(2)),
StructField("web_zip", CharType(10)),
StructField("web_country", VarcharType(20)),
StructField("web_gmt_offset", decimalType(use_decimal, 5, 2)),
StructField("web_tax_percentage", decimalType(use_decimal, 5, 2))
])
SCHEMAS["store_returns"] = StructType([
StructField("sr_returned_date_sk", identifier_int),
StructField("sr_return_time_sk", identifier_int),
StructField("sr_item_sk", identifier_int, nullable=False),
StructField("sr_customer_sk", identifier_int),
StructField("sr_cdemo_sk", identifier_int),
StructField("sr_hdemo_sk", identifier_int),
StructField("sr_addr_sk", identifier_int),
StructField("sr_store_sk", identifier_int),
StructField("sr_reason_sk", identifier_int),
# Use LongType due to https://github.com/NVIDIA/spark-rapids-benchmarks/pull/9#issuecomment-1138379596
# Databricks is using LongType as well in their accepted benchmark reports.
# See https://www.tpc.org/results/supporting_files/tpcds/databricks~tpcds~100000~databricks_SQL_8.3~sup-1~2021-11-02~v01.zip
StructField("sr_ticket_number", identifier_long, nullable=False),
StructField("sr_return_quantity", LongType()),
StructField("sr_return_amt", decimalType(use_decimal, 7, 2)),
StructField("sr_return_tax", decimalType(use_decimal, 7, 2)),
StructField("sr_return_amt_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("sr_fee", decimalType(use_decimal, 7, 2)),
StructField("sr_return_ship_cost", decimalType(use_decimal, 7, 2)),
StructField("sr_refunded_cash", decimalType(use_decimal, 7, 2)),
StructField("sr_reversed_charge", decimalType(use_decimal, 7, 2)),
StructField("sr_store_credit", decimalType(use_decimal, 7, 2)),
StructField("sr_net_loss", decimalType(use_decimal, 7, 2))
])
SCHEMAS["household_demographics"] = StructType([
StructField("hd_demo_sk", identifier_int, nullable=False),
StructField("hd_income_band_sk", identifier_int),
StructField("hd_buy_potential", CharType(15)),
StructField("hd_dep_count", LongType()),
StructField("hd_vehicle_count", LongType())
])
SCHEMAS["web_page"] = StructType([
StructField("wp_web_page_sk", identifier_int, nullable=False),
StructField("wp_web_page_id", CharType(16), nullable=False),
StructField("wp_rec_start_date", DateType()),
StructField("wp_rec_end_date", DateType()),
StructField("wp_creation_date_sk", identifier_int),
StructField("wp_access_date_sk", identifier_int),
StructField("wp_autogen_flag", CharType(1)),
StructField("wp_customer_sk", identifier_int),
StructField("wp_url", VarcharType(100)),
StructField("wp_type", CharType(50)),
StructField("wp_char_count", LongType()),
StructField("wp_link_count", LongType()),
StructField("wp_image_count", LongType()),
StructField("wp_max_ad_count", LongType())
])
SCHEMAS["promotion"] = StructType([
StructField("p_promo_sk", identifier_int, nullable=False),
StructField("p_promo_id", CharType(16), nullable=False),
StructField("p_start_date_sk", identifier_int),
StructField("p_end_date_sk", identifier_int),
StructField("p_item_sk", identifier_int),
StructField("p_cost", decimalType(use_decimal, 15, 2)),
StructField("p_response_target", LongType()),
StructField("p_promo_name", CharType(50)),
StructField("p_channel_dmail", CharType(1)),
StructField("p_channel_email", CharType(1)),
StructField("p_channel_catalog", CharType(1)),
StructField("p_channel_tv", CharType(1)),
StructField("p_channel_radio", CharType(1)),
StructField("p_channel_press", CharType(1)),
StructField("p_channel_event", CharType(1)),
StructField("p_channel_demo", CharType(1)),
StructField("p_channel_details", VarcharType(100)),
StructField("p_purpose", CharType(15)),
StructField("p_discount_active", CharType(1))
])
SCHEMAS["catalog_page"] = StructType([
StructField("cp_catalog_page_sk", identifier_int, nullable=False),
StructField("cp_catalog_page_id", CharType(16), nullable=False),
StructField("cp_start_date_sk", identifier_int),
StructField("cp_end_date_sk", identifier_int),
StructField("cp_department", VarcharType(50)),
StructField("cp_catalog_number", LongType()),
StructField("cp_catalog_page_number", LongType()),
StructField("cp_description", VarcharType(100)),
StructField("cp_type", VarcharType(100))
])
SCHEMAS["inventory"] = StructType([
StructField("inv_date_sk", identifier_int, nullable=False),
StructField("inv_item_sk", identifier_int, nullable=False),
StructField("inv_warehouse_sk", identifier_int, nullable=False),
StructField("inv_quantity_on_hand", LongType())
])
SCHEMAS["catalog_returns"] = StructType([
StructField("cr_returned_date_sk", identifier_int),
StructField("cr_returned_time_sk", identifier_int),
StructField("cr_item_sk", identifier_int, nullable=False),
StructField("cr_refunded_customer_sk", identifier_int),
StructField("cr_refunded_cdemo_sk", identifier_int),
StructField("cr_refunded_hdemo_sk", identifier_int),
StructField("cr_refunded_addr_sk", identifier_int),
StructField("cr_returning_customer_sk", identifier_int),
StructField("cr_returning_cdemo_sk", identifier_int),
StructField("cr_returning_hdemo_sk", identifier_int),
StructField("cr_returning_addr_sk", identifier_int),
StructField("cr_call_center_sk", identifier_int),
StructField("cr_catalog_page_sk", identifier_int),
StructField("cr_ship_mode_sk", identifier_int),
StructField("cr_warehouse_sk", identifier_int),
StructField("cr_reason_sk", identifier_int),
StructField("cr_order_number", identifier_int, nullable=False),
StructField("cr_return_quantity", LongType()),
StructField("cr_return_amount", decimalType(use_decimal, 7, 2)),
StructField("cr_return_tax", decimalType(use_decimal, 7, 2)),
StructField("cr_return_amt_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("cr_fee", decimalType(use_decimal, 7, 2)),
StructField("cr_return_ship_cost", decimalType(use_decimal, 7, 2)),
StructField("cr_refunded_cash", decimalType(use_decimal, 7, 2)),
StructField("cr_reversed_charge", decimalType(use_decimal, 7, 2)),
StructField("cr_store_credit", decimalType(use_decimal, 7, 2)),
StructField("cr_net_loss", decimalType(use_decimal, 7, 2))
])
SCHEMAS["web_returns"] = StructType([
StructField("wr_returned_date_sk", identifier_int),
StructField("wr_returned_time_sk", identifier_int),
StructField("wr_item_sk", identifier_int, nullable=False),
StructField("wr_refunded_customer_sk", identifier_int),
StructField("wr_refunded_cdemo_sk", identifier_int),
StructField("wr_refunded_hdemo_sk", identifier_int),
StructField("wr_refunded_addr_sk", identifier_int),
StructField("wr_returning_customer_sk", identifier_int),
StructField("wr_returning_cdemo_sk", identifier_int),
StructField("wr_returning_hdemo_sk", identifier_int),
StructField("wr_returning_addr_sk", identifier_int),
StructField("wr_web_page_sk", identifier_int),
StructField("wr_reason_sk", identifier_int),
StructField("wr_order_number", identifier_int, nullable=False),
StructField("wr_return_quantity", LongType()),
StructField("wr_return_amt", decimalType(use_decimal, 7, 2)),
StructField("wr_return_tax", decimalType(use_decimal, 7, 2)),
StructField("wr_return_amt_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("wr_fee", decimalType(use_decimal, 7, 2)),
StructField("wr_return_ship_cost", decimalType(use_decimal, 7, 2)),
StructField("wr_refunded_cash", decimalType(use_decimal, 7, 2)),
StructField("wr_reversed_charge", decimalType(use_decimal, 7, 2)),
StructField("wr_account_credit", decimalType(use_decimal, 7, 2)),
StructField("wr_net_loss", decimalType(use_decimal, 7, 2))
])
SCHEMAS["web_sales"] = StructType([
StructField("ws_sold_date_sk", identifier_int),
StructField("ws_sold_time_sk", identifier_int),
StructField("ws_ship_date_sk", identifier_int),
StructField("ws_item_sk", identifier_int, nullable=False),
StructField("ws_bill_customer_sk", identifier_int),
StructField("ws_bill_cdemo_sk", identifier_int),
StructField("ws_bill_hdemo_sk", identifier_int),
StructField("ws_bill_addr_sk", identifier_int),
StructField("ws_ship_customer_sk", identifier_int),
StructField("ws_ship_cdemo_sk", identifier_int),
StructField("ws_ship_hdemo_sk", identifier_int),
StructField("ws_ship_addr_sk", identifier_int),
StructField("ws_web_page_sk", identifier_int),
StructField("ws_web_site_sk", identifier_int),
StructField("ws_ship_mode_sk", identifier_int),
StructField("ws_warehouse_sk", identifier_int),
StructField("ws_promo_sk", identifier_int),
StructField("ws_order_number", identifier_int, nullable=False),
StructField("ws_quantity", LongType()),
StructField("ws_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("ws_list_price", decimalType(use_decimal, 7, 2)),
StructField("ws_sales_price", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_discount_amt", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_sales_price", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_list_price", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_tax", decimalType(use_decimal, 7, 2)),
StructField("ws_coupon_amt", decimalType(use_decimal, 7, 2)),
StructField("ws_ext_ship_cost", decimalType(use_decimal, 7, 2)),
StructField("ws_net_paid", decimalType(use_decimal, 7, 2)),
StructField("ws_net_paid_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("ws_net_paid_inc_ship", decimalType(use_decimal, 7, 2)),
StructField("ws_net_paid_inc_ship_tax",
decimalType(use_decimal, 7, 2)),
StructField("ws_net_profit", decimalType(use_decimal, 7, 2))
])
SCHEMAS["catalog_sales"] = StructType([
StructField("cs_sold_date_sk", identifier_int),
StructField("cs_sold_time_sk", identifier_int),
StructField("cs_ship_date_sk", identifier_int),
StructField("cs_bill_customer_sk", identifier_int),
StructField("cs_bill_cdemo_sk", identifier_int),
StructField("cs_bill_hdemo_sk", identifier_int),
StructField("cs_bill_addr_sk", identifier_int),
StructField("cs_ship_customer_sk", identifier_int),
StructField("cs_ship_cdemo_sk", identifier_int),
StructField("cs_ship_hdemo_sk", identifier_int),
StructField("cs_ship_addr_sk", identifier_int),
StructField("cs_call_center_sk", identifier_int),
StructField("cs_catalog_page_sk", identifier_int),
StructField("cs_ship_mode_sk", identifier_int),
StructField("cs_warehouse_sk", identifier_int),
StructField("cs_item_sk", identifier_int, nullable=False),
StructField("cs_promo_sk", identifier_int),
StructField("cs_order_number", identifier_int, nullable=False),
StructField("cs_quantity", LongType()),
StructField("cs_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("cs_list_price", decimalType(use_decimal, 7, 2)),
StructField("cs_sales_price", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_discount_amt", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_sales_price", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_list_price", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_tax", decimalType(use_decimal, 7, 2)),
StructField("cs_coupon_amt", decimalType(use_decimal, 7, 2)),
StructField("cs_ext_ship_cost", decimalType(use_decimal, 7, 2)),
StructField("cs_net_paid", decimalType(use_decimal, 7, 2)),
StructField("cs_net_paid_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("cs_net_paid_inc_ship", decimalType(use_decimal, 7, 2)),
StructField("cs_net_paid_inc_ship_tax",
decimalType(use_decimal, 7, 2)),
StructField("cs_net_profit", decimalType(use_decimal, 7, 2))
])
SCHEMAS["store_sales"] = StructType([
StructField("ss_sold_date_sk", identifier_int),
StructField("ss_sold_time_sk", identifier_int),
StructField("ss_item_sk", identifier_int, nullable=False),
StructField("ss_customer_sk", identifier_int),
StructField("ss_cdemo_sk", identifier_int),
StructField("ss_hdemo_sk", identifier_int),
StructField("ss_addr_sk", identifier_int),
StructField("ss_store_sk", identifier_int),
StructField("ss_promo_sk", identifier_int),
StructField("ss_ticket_number", identifier_long, nullable=False),
StructField("ss_quantity", LongType()),
StructField("ss_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("ss_list_price", decimalType(use_decimal, 7, 2)),
StructField("ss_sales_price", decimalType(use_decimal, 7, 2)),
StructField("ss_ext_discount_amt", decimalType(use_decimal, 7, 2)),
StructField("ss_ext_sales_price", decimalType(use_decimal, 7, 2)),
StructField("ss_ext_wholesale_cost", decimalType(use_decimal, 7, 2)),
StructField("ss_ext_list_price", decimalType(use_decimal, 7, 2)),
StructField("ss_ext_tax", decimalType(use_decimal, 7, 2)),
StructField("ss_coupon_amt", decimalType(use_decimal, 7, 2)),
StructField("ss_net_paid", decimalType(use_decimal, 7, 2)),
StructField("ss_net_paid_inc_tax", decimalType(use_decimal, 7, 2)),
StructField("ss_net_profit", decimalType(use_decimal, 7, 2))
])
return SCHEMAS
def get_maintenance_schemas(use_decimal):
MAINTENANCE_SCHEMAS = {}
MAINTENANCE_SCHEMAS["s_purchase_lineitem"] = StructType([
StructField("plin_purchase_id", IntegerType(), nullable=False),
StructField("plin_line_number", IntegerType(), nullable=False),
StructField("plin_item_id", CharType(16)),
StructField("plin_promotion_id", CharType(16)),
StructField("plin_quantity", IntegerType()),
StructField("plin_sale_price", decimalType(use_decimal, 7,2)),
StructField("plin_coupon_amt", decimalType(use_decimal, 7,2)),
StructField("plin_comment", VarcharType(100)),
])
MAINTENANCE_SCHEMAS["s_purchase"] = StructType([
StructField("purc_purchase_id", IntegerType(), nullable=False),
StructField("purc_store_id", CharType(16)),
StructField("purc_customer_id", CharType(16)),
StructField("purc_purchase_date", CharType(10)),
StructField("purc_purchase_time", IntegerType()),
StructField("purc_register_id", IntegerType()),
StructField("purc_clerk_id", IntegerType()),
StructField("purc_comment", CharType(100)),
])
MAINTENANCE_SCHEMAS["s_catalog_order"] = StructType([
StructField("cord_order_id", IntegerType(), nullable=False),
StructField("cord_bill_customer_id", CharType(16)),
StructField("cord_ship_customer_id", CharType(16)),
StructField("cord_order_date", CharType(10)),
StructField("cord_order_time", IntegerType()),
StructField("cord_ship_mode_id", CharType(16)),
StructField("cord_call_center_id", CharType(16)),
StructField("cord_order_comments", VarcharType(100)),
])
MAINTENANCE_SCHEMAS["s_web_order"] = StructType([
StructField("word_order_id", IntegerType(), nullable=False),
StructField("word_bill_customer_id", CharType(16)),
StructField("word_ship_customer_id", CharType(16)),
StructField("word_order_date", CharType(10)),
StructField("word_order_time", IntegerType()),
StructField("word_ship_mode_id", CharType(16)),
StructField("word_web_site_id", CharType(16)),
StructField("word_order_comments", CharType(100)),
])
MAINTENANCE_SCHEMAS["s_catalog_order_lineitem"] = StructType([
StructField("clin_order_id", IntegerType(), nullable=False),
StructField("clin_line_number", IntegerType(), nullable=False),
StructField("clin_item_id", CharType(16)),
StructField("clin_promotion_id", CharType(16)),
StructField("clin_quantity", IntegerType()),
StructField("clin_sales_price", decimalType(use_decimal, 7,2)),
StructField("clin_coupon_amt", decimalType(use_decimal, 7,2)),
StructField("clin_warehouse_id", CharType(16)),
StructField("clin_ship_date", CharType(10)),
StructField("clin_catalog_number", IntegerType()),
StructField("clin_catalog_page_number", IntegerType()),
StructField("clin_ship_cost", decimalType(use_decimal, 7,2)),
])
MAINTENANCE_SCHEMAS["s_web_order_lineitem"] = StructType([
StructField("wlin_order_id", IntegerType(), nullable=False),
StructField("wlin_line_number", IntegerType(), nullable=False),
StructField("wlin_item_id", CharType(16)),
StructField("wlin_promotion_id", CharType(16)),
StructField("wlin_quantity", IntegerType()),
StructField("wlin_sales_price", decimalType(use_decimal, 7,2)),
StructField("wlin_coupon_amt", decimalType(use_decimal, 7,2)),
StructField("wlin_warehouse_id", CharType(16)),
StructField("wlin_ship_date", CharType(10)),
StructField("wlin_ship_cost", decimalType(use_decimal, 7,2)),
StructField("wlin_web_page_id", CharType(16)),
])
MAINTENANCE_SCHEMAS["s_store_returns"] = StructType([
StructField("sret_store_id", CharType(16)),
StructField("sret_purchase_id", CharType(16), nullable=False),
StructField("sret_line_number", IntegerType(), nullable=False),
StructField("sret_item_id", CharType(16), nullable=False),
StructField("sret_customer_id", CharType(16)),
StructField("sret_return_date", CharType(10)),
StructField("sret_return_time", CharType(10)),
StructField("sret_ticket_number", LongType()),
StructField("sret_return_qty", IntegerType()),
StructField("sret_return_amt", decimalType(use_decimal, 7,2)),
StructField("sret_return_tax", decimalType(use_decimal, 7,2)),
StructField("sret_return_fee", decimalType(use_decimal, 7,2)),
StructField("sret_return_ship_cost", decimalType(use_decimal, 7,2)),
StructField("sret_refunded_cash", decimalType(use_decimal, 7,2)),
StructField("sret_reversed_charge", decimalType(use_decimal, 7,2)),
StructField("sret_store_credit", decimalType(use_decimal, 7,2)),
StructField("sret_reason_id", CharType(16)),
])
MAINTENANCE_SCHEMAS["s_catalog_returns"] = StructType([
StructField("cret_call_center_id", CharType(16)),
StructField("cret_order_id", IntegerType(), nullable=False),
StructField("cret_line_number", IntegerType(), nullable=False),
StructField("cret_item_id", CharType(16), nullable=False),
StructField("cret_return_customer_id", CharType(16)),
StructField("cret_refund_customer_id", CharType(16)),
StructField("cret_return_date", CharType(10)),
StructField("cret_return_time", CharType(10)),
StructField("cret_return_qty", IntegerType()),
StructField("cret_return_amt", decimalType(use_decimal, 7,2)),
StructField("cret_return_tax", decimalType(use_decimal, 7,2)),
StructField("cret_return_fee", decimalType(use_decimal, 7,2)),
StructField("cret_return_ship_cost", decimalType(use_decimal, 7,2)),
StructField("cret_refunded_cash", decimalType(use_decimal, 7,2)),
StructField("cret_reversed_charge", decimalType(use_decimal, 7,2)),
StructField("cret_merchant_credit", decimalType(use_decimal, 7,2)),
StructField("cret_reason_id", CharType(16)),
StructField("cret_shipmode_id", CharType(16)),
StructField("cret_catalog_page_id", CharType(16)),
StructField("cret_warehouse_id", CharType(16)),
])
MAINTENANCE_SCHEMAS["s_web_returns"] = StructType([
StructField("wret_web_page_id", CharType(16)),
StructField("wret_order_id", IntegerType(), nullable=False),
StructField("wret_line_number", IntegerType(), nullable=False),
StructField("wret_item_id", CharType(16), nullable=False),
StructField("wret_return_customer_id", CharType(16)),
StructField("wret_refund_customer_id", CharType(16)),
StructField("wret_return_date", CharType(10)),
StructField("wret_return_time", CharType(10)),
StructField("wret_return_qty", IntegerType()),
StructField("wret_return_amt", decimalType(use_decimal,7,2)),
StructField("wret_return_tax", decimalType(use_decimal,7,2)),
StructField("wret_return_fee", decimalType(use_decimal,7,2)),
StructField("wret_return_ship_cost", decimalType(use_decimal,7,2)),
StructField("wret_refunded_cash", decimalType(use_decimal,7,2)),
StructField("wret_reversed_charge", decimalType(use_decimal,7,2)),
StructField("wret_account_credit", decimalType(use_decimal,7,2)),
StructField("wret_reason_id", CharType(16)),
])
MAINTENANCE_SCHEMAS["s_inventory"] = StructType([
StructField("invn_warehouse_id", CharType(16), nullable=False),
StructField("invn_item_id", CharType(16), nullable=False),
StructField("invn_date", CharType(10), nullable=False),
StructField("invn_qty_on_hand", IntegerType()),
])
MAINTENANCE_SCHEMAS["delete"] = StructType([
StructField("date1", StringType(), nullable=False),
StructField("date2", StringType(), nullable=False),
])
MAINTENANCE_SCHEMAS["inventory_delete"] = StructType([
StructField("date1", StringType(), nullable=False),
StructField("date2", StringType(), nullable=False),
])
return MAINTENANCE_SCHEMAS
if __name__ == "__main__":
# Test code
print(get_schemas(False))
print(get_schemas(True))
print(get_maintenance_schemas(False))
print(get_maintenance_schemas(True)) | spark-rapids-benchmarks-dev | nds/nds_schema.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
import argparse
import os
import subprocess
import sys
from check import check_build, check_version, get_abs_path
check_version()
def generate_query_streams(args, tool_path):
"""call TPC-DS dsqgen tool to generate a specific query or query stream(s) that contains all
TPC-DS queries.
Args:
args (Namespace): Namespace from argparser
tool_path (str): path to the tool
"""
# move to the tools directory
work_dir = tool_path.parent
output_dir = get_abs_path(args.output_dir)
template_dir = get_abs_path(args.template_dir)
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
base_cmd = ['./dsqgen',
'-scale', args.scale,
'-directory', template_dir,
'-dialect', 'spark',
'-output_dir', output_dir]
if args.streams:
cmd = base_cmd + ['-input', template_dir + '/' + 'templates.lst',
'-streams', args.streams]
else:
cmd = base_cmd + ['-template', args.template]
if args.rngseed:
cmd += ['-rngseed', args.rngseed]
subprocess.run(cmd, check=True, cwd=str(work_dir))
if args.template:
# It's specific query, rename the stream file to its template query name
# Special cases for query 14,23,24,39. They contains two queries in one template
if any(q_num in args.template for q_num in ['14', '23', '24', '39']):
with open(output_dir + '/' + 'query_0.sql', 'r') as f:
full_content = f.read()
part_1, part_2 = split_special_query(full_content)
with open(output_dir + '/' + args.template[:-4] + '_part1.sql', 'w') as f:
f.write(part_1)
with open(output_dir + '/' + args.template[:-4] + '_part2.sql', 'w') as f:
f.write(part_2)
cmd = ['rm', output_dir + '/' + 'query_0.sql']
subprocess.run(cmd, check=True, cwd=str(work_dir))
else:
subprocess.run(['mv',
output_dir + '/' + 'query_0.sql',
output_dir + '/' + args.template[:-4] + '.sql'],
check=True, cwd=str(work_dir))
def split_special_query(q):
split_q = q.split(';')
# now split_q has 3 items:
# 1. "query x in stream x using template query[xx].tpl query_part_1"
# 2. "query_part_2"
# 3. "-- end query [x] in stream [x] using template query[xx].tpl"
part_1 = split_q[0].replace('.tpl', '_part1.tpl')
part_1 += ';'
head = split_q[0].split('\n')[0]
part_2 = head.replace('.tpl', '_part2.tpl') + '\n'
part_2 += split_q[1]
part_2 += ';'
return part_1, part_2
if __name__ == "__main__":
_, tool_path = check_build()
parser = parser = argparse.ArgumentParser()
parser.add_argument('template_dir',
help='directory to find query templates and dialect file.')
parser.add_argument("scale",
help="assume a database of this scale factor."
)
parser.add_argument("output_dir",
help="generate query in directory.")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--template",
help="build queries from this template. Only used to generate one query " +
"from one tempalte. This argument is mutually exclusive with --streams. " +
"It is often used for test purpose.")
group.add_argument('--streams',
help='generate how many query streams. ' +
'This argument is mutually exclusive with --template.')
parser.add_argument('--rngseed',
help='seed the random generation seed.')
args = parser.parse_args()
generate_query_streams(args, tool_path)
| spark-rapids-benchmarks-dev | nds/nds_gen_query_stream.py |
#!/usr/bin/env python3
from pyspark import SparkContext
from pyspark.java_gateway import ensure_callback_server_started
class PythonListener(object):
package = "com.nvidia.spark.rapids.listener"
@staticmethod
def get_manager():
jvm = SparkContext.getOrCreate()._jvm
manager = getattr(jvm, "{}.{}".format(PythonListener.package, "Manager"))
return manager
def __init__(self):
self.uuid = None
self.failures = []
def notify(self, obj):
"""This method is required by Scala Listener interface
we defined above.
"""
self.failures.append(obj)
def register(self):
ensure_callback_server_started(gw = SparkContext.getOrCreate()._gateway)
manager = PythonListener.get_manager()
self.uuid = manager.register(self)
return self.uuid
def unregister(self):
manager = PythonListener.get_manager()
manager.unregister(self.uuid)
self.uuid = None
# should call after register
def register_spark_listener(self):
manager = PythonListener.get_manager()
manager.registerSparkListener()
def unregister_spark_listener(self):
manager = PythonListener.get_manager()
manager.unregisterSparkListener()
class Java:
implements = ["com.nvidia.spark.rapids.listener.Listener"]
| spark-rapids-benchmarks-dev | nds/python_listener/PythonListener.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----
#
# Certain portions of the contents of this file are derived from TPC-DS version 3.2.0
# (retrieved from www.tpc.org/tpc_documents_current_versions/current_specifications5.asp).
# Such portions are subject to copyrights held by Transaction Processing Performance Council (“TPC”)
# and licensed under the TPC EULA (a copy of which accompanies this file as “TPC EULA” and is also
# available at http://www.tpc.org/tpc_documents_current_versions/current_specifications5.asp) (the “TPC EULA”).
#
# You may not use this file except in compliance with the TPC EULA.
# DISCLAIMER: Portions of this file is derived from the TPC-DS Benchmark and as such any results
# obtained using this file are not comparable to published TPC-DS Benchmark results, as the results
# obtained from using this file do not comply with the TPC-DS Benchmark.
#
from .PythonListener import PythonListener
| spark-rapids-benchmarks-dev | nds/python_listener/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Release mechanism for TAO Toolkit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | release/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper utils for packaging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
def get_long_description(root):
"""Get the long description for the launcher wheel."""
long_desc_file = os.path.join(root, "tao_pypi_description.md")
with open(long_desc_file, "r") as lfile:
data = lfile.read()
return data
def up_directory(dir_path, n=1):
"""Go up n directories from dir_path."""
dir_up = dir_path
for _ in range(n):
dir_up = os.path.split(dir_up)[0]
return dir_up
def remove_prefix(dir_path):
"""Remove a certain prefix from path."""
max_path = 8
prefix = dir_path
while max_path > 0:
prefix = os.path.split(prefix)[0]
if prefix.endswith('ai_infra'):
return dir_path[len(prefix) + 1:]
max_path -= 1
return dir_path
def get_subdirs(path):
"""Get all subdirs of given path."""
dirs = os.walk(path)
return [remove_prefix(x[0]) for x in dirs]
def rename_py_files(path, ext, new_ext, ignore_files):
"""Rename all .ext files in a path to .new_ext except __init__ files."""
files = glob.glob(path + '/*' + ext)
for ignore_file in ignore_files:
files = [f for f in files if ignore_file not in f]
for filename in files:
os.rename(filename, filename.replace(ext, new_ext))
| tao_launcher-main | release/utils.py |
# Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
"""Modules required to build the TAO CLI package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | release/tao/__init__.py |
# Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
"""Setup script to build the TAO Toolkit CLI package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import os
import setuptools
import sys
from release import utils
# Define env paths.
LOCAL_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TOP_LEVEL_DIR = utils.up_directory(LOCAL_DIR, 1)
LAUNCHER_SDK_PATH = os.path.join(TOP_LEVEL_DIR, 'nvidia_tao_cli')
IGNORE_LIST = ['__init__.py', 'version.py']
# Get current __version__
def get_version_locals(package_root):
"""Get the package information."""
version_locals = {}
with open(os.path.join(package_root, 'version.py')) as version_file:
exec(version_file.read(), {}, version_locals)
return version_locals
def get_python_requirements():
"""Python version requirement."""
# Set the required version of python - required when doing obfuscation.
__python_version__ = ">=3.6"
return __python_version__
def get_launcher_package(package_root=LAUNCHER_SDK_PATH, is_build_action=True):
"""Get TAO Launcher packages."""
req_subdirs = utils.get_subdirs(package_root)
if is_build_action:
# Pick up the TAO launcher.
launcher_packages = setuptools.find_packages(LAUNCHER_SDK_PATH)
launcher_packages = ["nvidia_tao_cli." + f for f in launcher_packages]
launcher_packages.append("nvidia_tao_cli")
return launcher_packages
# Cleanup. Rename all .py_tmp files back to .py and delete pyc files
for dir_path in req_subdirs:
dir_path = os.path.join(TOP_LEVEL_DIR, dir_path)
pyc_list = glob.glob(dir_path + '/*.pyc')
for pyc_file in pyc_list:
os.remove(pyc_file)
return []
# Getting dependencies.
def get_requirements(package_root):
"""Simple function to get packages."""
with open(os.path.join(TOP_LEVEL_DIR, "dependencies/requirements-pip.txt"), 'r') as req_file:
requirements = [r.replace('\n', '')for r in req_file.readlines()]
return requirements
def main(args=sys.argv[1:]):
"""Main wrapper to run setup.py"""
# Get package related information.
version_locals = get_version_locals(LAUNCHER_SDK_PATH)
__python_version__ = get_python_requirements()
__long_description__ = utils.get_long_description(TOP_LEVEL_DIR)
__long_description_content_type__ = "text/markdown"
requirements = get_requirements(LAUNCHER_SDK_PATH)
launcher_packages = get_launcher_package(
package_root=LAUNCHER_SDK_PATH,
is_build_action=True
)
# TODO: Modify script entry points
setuptools.setup(
name='nvidia-tao',
version=version_locals['__version__'],
description=version_locals["__description__"],
author=version_locals["__contact_names__"],
author_email=version_locals["__contact_emails__"],
long_description=__long_description__,
long_description_content_type=__long_description_content_type__,
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Intended Audience :: Developers',
# Indicate what your project relates to
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: {}'.format(
sys.version_info.major
),
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords=version_locals["__keywords__"],
packages=launcher_packages,
license="NVIDIA Proprietary Software",
package_dir={'': os.path.relpath(TOP_LEVEL_DIR)},
python_requires=__python_version__,
package_data={
'': ['*.py', '*.json', '*.pdf'],
},
include_package_data=True,
install_requires=requirements,
zip_safe=False,
entry_points={
'console_scripts': [
'tao=nvidia_tao_cli.entrypoint.tao_launcher:main',
]
}
)
# Clean up packages post installation.
get_launcher_package(package_root=LAUNCHER_SDK_PATH, is_build_action=False)
if __name__ == "__main__":
main()
| tao_launcher-main | release/tao/setup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launcher SDK version."""
MAJOR = "5"
MINOR = "0"
PATCH = "0"
PRE_RELEASE = ''
# Getting the build number.
def get_build_info():
"""Get the build version number."""
# required since setup.py runs a version string and global imports aren't executed.
import os # noqa pylint: disable=import-outside-toplevel
build_file = "build.info"
if not os.path.exists(build_file):
raise FileNotFoundError("Build file doesn't exist.")
patch = 0
with open(build_file, 'r') as bfile:
patch = bfile.read().strip()
assert bfile.closed, "Build file wasn't closed properly."
return patch
try:
PATCH = get_build_info()
except FileNotFoundError:
pass
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
# Version of the library.
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
# Version of the file format.
__format_version__ = 2
# Other package info.
__package_name__ = "nvidia-tao"
__description__ = "NVIDIA's Launcher for TAO Toolkit."
__keywords__ = "nvidia, tao, launcher"
__contact_names__ = "Varun Praveen"
__contact_emails__ = "[email protected]"
__license__ = "Apache 2.0"
| tao_launcher-main | nvidia_tao_cli/version.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python package for the TAO Toolkit Launcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from nvidia_tao_cli.version import __version__ # noqa pylint: disable=unused-import
| tao_launcher-main | nvidia_tao_cli/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configurations for TAO Toolkit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | nvidia_tao_cli/config/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Component implementations for the TAO Toolkit launcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | nvidia_tao_cli/components/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define a data structure containing information about a task class."""
import json
import os
OVERRIDE_REGISTRY = os.getenv("OVERRIDE_REGISTRY", None)
class Task(object):
"""Define the task data structure."""
def __init__(self, name, docker_image=None, docker_tag=None,
docker_registry=None, docker_digest=None):
"""Initialize the task data structure.
Args:
name(str): Name of the task.
docker_image(str): Name of the docker image to be mapped to.
docker_tag(str) (optional): Tag of the docker.
docker_registry (str): Registry from where the docker should be
pulled.
docker_digest(str): Digest value of the docker in the registry.
"""
self.name = name
self.docker_image = docker_image
self.docker_tag = docker_tag
self.docker_registry = docker_registry
if OVERRIDE_REGISTRY is not None:
self.docker_registry = OVERRIDE_REGISTRY
self.docker_digest = docker_digest
def get_config(self):
"""Return the Task configuration as a dict."""
config = {
"name": self.name,
"docker_image": self.docker_image,
"docker_registry": self.docker_registry,
"docker_tag": self.docker_tag,
"docker_digest": self.docker_digest
}
return config
@classmethod
def from_config(cls, config_data):
"""Return a task data structure from config."""
assert isinstance(config_data, dict), (
"The config data should be a dictionary."
)
mandatory_args = ["name", "docker_image", "docker_tag", "docker_registry"]
optional_args = ["docker_digest"]
assert all([key in mandatory_args for key in list(config_data.keys())])
args = [
config_data["name"],
config_data["docker_image"],
config_data["docker_tag"],
config_data["docker_registry"]
]
kwargs = {}
for arg in optional_args:
if arg in config_data.keys():
kwargs[arg] = config_data[arg]
return Task(*args, **kwargs)
def __str__(self):
"""String representation of this task."""
config = self.get_config()
return json.dumps(
config, indent=4
)
| tao_launcher-main | nvidia_tao_cli/components/types/task.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining the required data structures."""
| tao_launcher-main | nvidia_tao_cli/components/types/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for the TAO Toolkit instance."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
INSTANCE_HANDLER_TASKS = ["list", "stop", "info"]
class TAOInstance(object):
"""Simple class definition for a TAO instance."""
def __init__(self, task_map, docker_images=None):
"""Intialize a base instance of the tao instance handler."""
self.task_map = task_map
self.dl_tasks = sorted(list(task_map.keys()))
self.docker_images = docker_images
self.instance_handler_tasks = INSTANCE_HANDLER_TASKS
@staticmethod
@abstractmethod
def load_config(config_path):
"""Load TAO Toolkit instance config file."""
raise NotImplementedError("Base class doesn't have this method implemented.")
@classmethod
def from_config(cls, config_path):
"""Initialize an instance from config."""
raise NotImplementedError("Base class doesn't have this method implemented.")
@abstractmethod
def launch_command(self, command, args):
"""Launch the TAO Toolkit command."""
raise NotImplementedError("Base class doesn't have this method implemented.")
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/base_instance.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit instance handler for launching jobs locally."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import sys
import textwrap
from tabulate import tabulate
from nvidia_tao_cli.components.docker_handler.docker_handler import (
DOCKER_MOUNT_FILE,
DockerHandler
)
from nvidia_tao_cli.components.instance_handler.base_instance import TAOInstance
from nvidia_tao_cli.components.instance_handler.utils import (
docker_logged_in,
load_config_file,
)
from nvidia_tao_cli.components.types.task import Task
logger = logging.getLogger(__name__)
TAB_SPACE = 4
TABS = " " * TAB_SPACE
class LocalInstance(TAOInstance):
"""Instance handler class to define a TAO Toolkit instance."""
def __init__(self, task_map, docker_images, config_path):
"""Initialize a local TAO Toolkit instance.
Args:
task_map(dict): Dictionary of task name to Task data structure.
docker_images(list): List of docker image names.
"""
super(LocalInstance, self).__init__(
task_map=task_map,
docker_images=docker_images
)
self.current_config_path = config_path
logger.debug("Current config file imported from: {}".format(
self.current_config_path
))
@staticmethod
def load_config(config_path):
"""Function to load the json config file.
Args:
config_path(str): Unix style path to the config file.
Returns:
config_data(dict): Parsed config data.
"""
data = load_config_file(config_path)
return data
@staticmethod
def parse_launcher_config(config_data):
"""Parse launcher configuration data based on format version.
Args:
data(dict): Data containing configuration parameters for the launcher instance
Returns:
task_map(dict): Dictionary of tasks mapped to the respective dockers.
"""
if "format_version" not in config_data.keys():
raise KeyError("format is a required key in the launcher config.")
task_map = {}
docker_images = set()
if config_data["format_version"] == 1.0:
local_map = {}
for image in list(config_data["dockers"].keys()):
logger.debug("Processing {}".format(image))
docker_data = config_data["dockers"][image]
if "tasks" not in list(docker_data.keys()):
raise NotImplementedError(
"The config data must contain tasks associated with the "
"respective docker."
)
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=docker_data["docker_tag"],
docker_registry=docker_data["docker_registry"],
docker_digest=docker_data["docker_digest"] if "docker_digest" in docker_data.keys() else None
) for task in docker_data["tasks"]
})
docker_images.add(image)
task_map["container_actions"] = local_map
elif config_data["format_version"] == 2.0:
local_map = {}
for image in list(config_data["dockers"].keys()):
logger.debug("Processing {}".format(image))
docker_data = config_data["dockers"][image]
if not isinstance(docker_data, dict):
raise ValueError("Invalid format.")
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=tag,
docker_registry=docker_data[tag]["docker_registry"],
docker_digest=docker_data[tag]["docker_digest"] if "docker_digest" in docker_data[tag].keys() else None
) for tag in docker_data.keys() for task in docker_data[tag]["tasks"]
})
docker_images.add(image)
task_map["container_actions"] = local_map
elif config_data["format_version"] == 3.0:
for task_group, group_data in config_data["task_group"].items():
logger.debug("Configuring task group {task_group}".format(
task_group=task_group
))
local_map = {}
for image, image_data in group_data["dockers"].items():
logger.debug(
"Extracting tasks from docker {image}".format(
image=image
)
)
if not isinstance(image_data, dict):
raise ValueError(f"Invalid data format for images {type(image_data)} encountered.")
logger.debug(json.dumps(image_data, indent=4))
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=tag,
docker_registry=image_data[tag]["docker_registry"],
docker_digest=image_data[tag].get("docker_digest", None)
) for tag in image_data.keys() for task in image_data[tag]["tasks"]
})
docker_images.add(image)
task_map[task_group] = local_map
else:
raise NotImplementedError("Invalid format type: {}".format(config_data["format_version"]))
return task_map, docker_images
@classmethod
def from_config(cls, config_path):
"""Instantiate a TAO Toolkit instance from a config file.
Args:
config_path(str): Path to the launcher config file.
Returns:
Initialized LocalInstance object.
"""
config_data = cls.load_config(config_path)
task_map, docker_images = cls.parse_launcher_config(config_data)
debug_string = ""
for task_name, task in task_map.items():
if config_data["format_version"] == 3.0:
task_list = ""
for name, task_data in task.items():
task_list += f"{name}: {str(task_data)}\n"
debug_string = f"{task_name}: {task_list}"
else:
debug_string += f"\n {task_name}: {str(task)}\n"
logger.debug(debug_string)
return LocalInstance(
task_map,
docker_images,
config_path
)
@property
def handler_map(self):
"""Get image to handler map."""
handler_map = {}
# Build a handler map for the local instance.
assert bool(self.task_map), (
"A valid task map wasn't provided."
)
logger.debug("Acquiring handler map for dockers.")
for _, tasks_dict in self.task_map.items():
for _, map_val in tasks_dict.items():
handler_key = f"{map_val.docker_image}:{map_val.docker_tag}"
if handler_key not in handler_map.keys():
handler_map[handler_key] = DockerHandler(
docker_registry=map_val.docker_registry,
image_name=map_val.docker_image,
docker_tag=map_val.docker_tag,
docker_digest=map_val.docker_digest,
docker_mount_file=os.getenv("LAUNCHER_MOUNTS", DOCKER_MOUNT_FILE)
)
return handler_map
@property
def _docker_client(self):
"""Get a docker handler to interact with the docker client."""
docker_handler = list(self.handler_map.values())[0]
return docker_handler._docker_client
def _get_running_containers(self):
"""Return a list of running TAO Toolkit containers."""
assert len(list(self.handler_map.keys())) > 0, (
"A valid handler map was not defined."
)
return [container for container in self._docker_client.containers.list()
for image in self.handler_map.keys()
if image in container.attrs["Config"]["Image"]]
def kill_containers(self, container_ids, kill_all=False):
"""Kill containers by a list of container ids."""
if kill_all:
for container in self._get_running_containers():
container.stop()
else:
# Containers don't exist.
if isinstance(container_ids, list):
for idx in container_ids:
container = self._docker_client.containers.get(idx)
container.stop()
else:
print("No containers provided in the list to stop. "
"Please run tao stop --help for more information.")
def list_running_jobs(self):
"""Simple function to list all existing jobs in a container."""
container_list = self._get_running_containers()
command_per_container = {}
# Map the commands to the containers.
for task_group in self.task_map.keys():
tasks_per_group = self.task_map[task_group].keys()
for container in container_list:
procs_list = container.top()
command_per_container[container] = ""
for item in procs_list["Processes"]:
# Getting the command from the row returned by container top.
command = item[-1:][0]
# Extracting entrypoint from the running command.
task_point = command.split(" ")
if len(task_point) < 2:
continue
task_point = task_point[1]
# Since only 1 entrypoint will be launched per container, we only care
# about finding the process for that entrypoint.
if task_point.split("/")[-1] in tasks_per_group:
command_per_container[container] = "{} {}".format(
task_point.split("/")[-1],
" ".join(command.split(" ")[2:])
)
# We break from here because the launcher is currently designed to
# handle one command per container. And since we only expose the entrypoints
# we only need to look for the entrypoints running in the container.
# TODO @vpraveen: We may need to change this in the future.
break
# Tabulate and print out the processes.
self.pretty_print(command_per_container)
def print_information(self, verbose=False):
"""Print the information of the current TAO Toolkit."""
print("Configuration of the TAO Toolkit Instance")
try:
config = self.load_config(self.current_config_path)
except AssertionError:
print("Config file doesn't exist. Aborting information printing")
sys.exit(-1)
if verbose:
print(self.dict_print(config))
else:
for key, value in config.items():
print_value = value
if isinstance(value, dict):
print_value = list(value.keys())
print("{}: {}".format(key, print_value))
return False
def dict_print(self, dictionary, nlevels=0):
"""Print the dictionary element.
Args:
dictionary (dict): Dictionary to recursive print
nlevels (int): Tab indentation level.
Returns:
output_string (str): Formatted print string.
"""
assert isinstance(dictionary, dict), ""
output_string = "{}".format(f"{TABS}" * nlevels)
for key, value in dictionary.items():
output_string += "\n{}{}: ".format(f"{TABS}" * nlevels, key)
if isinstance(value, dict):
output_string += "{}{}".format(
TABS, self.dict_print(value, nlevels + 1)
)
elif isinstance(value, list):
for idx, item in enumerate(value, start=1):
output_string += "\n{}{}. {}".format(f"{TABS}" * (nlevels + 1),
idx,
item)
else:
output_string += "{}".format(value)
return output_string
@staticmethod
def pretty_print(container_dict):
"""Tabulate and print out the container status."""
headers = ["container_id", "container_status", "command"]
data = []
for container in list(container_dict.keys()):
container_string = "Not in support DNN tasks."
if container_dict[container] != "":
container_string = container_dict[container]
data.append([
container.short_id,
container.status,
textwrap.fill(container_string, width=100)]
)
print(tabulate(data, headers=headers, tablefmt="rst"))
def launch_command(self, task_group, task, args):
"""Launch command in the respective docker.
Args:
task(str): Name of the task from the entrypoint.
args(list): List of args to the task in the docker.
Returns:
No explicit returns.
"""
if task_group in self.task_map.keys():
task_map = self.task_map[task_group]
if task in list(task_map.keys()):
assert isinstance(args, list), (
"The arguments must be given as a list to be passed "
"to the docker. Got a {} instead".format(
type(args)
)
)
docker_logged_in(required_registry=task_map[task].docker_registry)
docker_handler = self.handler_map[
f"{task_map[task].docker_image}:{task_map[task].docker_tag}"
]
logger.info(
"Running command in container: {}".format(docker_handler.docker_image)
)
if args:
command = ""
if args[0] == "run":
args.pop(0)
else:
command = "{} ".format(task)
command += " ".join(args)
else:
logger.info(
"No commands provided to the launcher\n"
"Kicking off an interactive docker session.\n"
"NOTE: This container instance will be terminated "
"when you exit."
)
command = "/bin/bash"
# Running command in the container.
if os.getenv("CI_PROJECT_DIR", None) is not None:
docker_handler.run_container_on_ci(command)
else:
docker_handler.run_container(command)
else:
assert task_group in self.instance_handler_tasks, (
"The tasks provided must be in instance handlers tasks or supported DL tasks."
)
assert isinstance(args, argparse.Namespace), {
"The arguments passed to the instance tasks must be argpase.Namespace to a dictionary."
"Type got here is: {}".format(type(args))
}
if task_group == "list":
self.list_running_jobs()
elif task_group == "stop":
self.kill_containers(args.container_id, args.all)
elif task_group == "info":
self.print_information(verbose=args.verbose)
else:
raise NotImplementedError(
"Task asked for wasn't implemented. {}".format(task))
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/local_instance.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A TLT instance class containing all the requisite information to launch a TAO Toolkit command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Returns Docker or WHL instance with their supported tasks."""
import os
from nvidia_tao_cli.components.instance_handler.base_instance import INSTANCE_HANDLER_TASKS as CLI_TASKS
if os.environ.get('TAO_DOCKER_DISABLE', '0') != '0':
from nvidia_tao_cli.components.instance_handler.whl_instance import WHLInstance
else:
from nvidia_tao_cli.components.instance_handler.local_instance import LocalInstance
def get_launcher(launcher_config_file):
"""Choose between WHL and Docker based instance.
Args: launcher_config_file.
Returns: Instance along with the supported tasks.
"""
assert os.environ.get('TAO_DOCKER_DISABLE', '0') in ['0', '1'], 'Invalid value for TAO_DOCKER_DISABLE'
if os.environ.get('TAO_DOCKER_DISABLE', '0') == '1':
instance = WHLInstance.from_config(launcher_config_file)
supported_tasks = [*instance.dl_tasks]
else:
instance = LocalInstance.from_config(launcher_config_file)
supported_tasks = [*CLI_TASKS, *instance.dl_tasks]
return instance, supported_tasks
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/builder.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the TAO Toolkit instance handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import shutil
logger = logging.getLogger(__name__)
# Setup default paths.
DOCKER_CONFIG = os.path.expanduser("~/.docker/config.json")
# Launcher config and drive maps.
OVERRIDE_CONFIG = os.path.expanduser("~/.tao/config.json")
DEPLOY_OVERRIDE_CONFIG = os.path.expanduser("~/.tao/config_deploy.json")
# Docker registries supported.
INTERNAL = os.getenv("LAUNCHER_MODE_INTERNAL", "0") == "1"
REQUIRED_REGISTRIES = ["nvcr.io"]
if INTERNAL:
REQUIRED_REGISTRIES.append("stg.nvcr.io")
def up_directories(path, n):
"""Recursively travel up the directory tree."""
if n == 0:
return os.path.dirname(path)
return up_directories(os.path.dirname(path), n - 1)
def get_config_file(entrypoint_type='tao'):
"""Download a config file to the config_dir.
Args:
entrypoint_type (str): Which type of entrypoint to use. (Choices: [tao, tao-deploy]).
Returns:
config_file (str): Path to the config file.
"""
assert entrypoint_type in ['tao', 'tao-deploy'], f"Incorrect entrypoint type named {entrypoint_type}"
if entrypoint_type == "tao-deploy":
if os.path.exists(DEPLOY_OVERRIDE_CONFIG) and os.path.isfile(DEPLOY_OVERRIDE_CONFIG):
logger.info("Initializing configuration from: {}".format(DEPLOY_OVERRIDE_CONFIG))
return DEPLOY_OVERRIDE_CONFIG
config_dir = os.path.join(up_directories(__file__, 2), "config")
config_file = os.path.join(config_dir, "config_deploy.json")
else:
if os.path.exists(OVERRIDE_CONFIG) and os.path.isfile(OVERRIDE_CONFIG):
logger.info("Initializing configuration from: {}".format(OVERRIDE_CONFIG))
return OVERRIDE_CONFIG
config_dir = os.path.join(up_directories(__file__, 2), "config")
config_file = os.path.join(config_dir, "config.json")
logger.debug("Loading default config file from: {}".format(config_file))
return config_file
def load_config_file(config_path):
"""Load a config file and return it's data.
Args:
config_path(str): Unix style path to the config file.
Returns:
data(dict): Parsed config file.
"""
assert os.path.exists(config_path), (
"Config path must be a valid unix path. "
"No file found at: {}. Did you run docker login?".format(config_path)
)
# Read the config file and load the data.
with open(config_path, 'r') as cfile:
data = json.load(cfile)
return data
def validate_config_file(config_path):
"""Validate a TAO Toolkit config file.
Args:
config_file(str): Unix style path to store the config file.
Returns:
True/False: Boolean of whether the downloaded file was valid.
"""
data = load_config_file(config_path)
# TODO @vpraveen: This needs to change to the mdf5 based validation
# once the config file has been formatted.
return isinstance(data, dict)
def update_config_file(tmpdir_path, config_file_path):
"""Update the current config file and move the previous ones to a new location.
This function downloads the latest config file, validates the downloaded file,
hosted in the TAO Toolkit link, backs up the previous config files and places the
new config at the DEFAULT_CONFIG_FILE path where the local instance expects
a valid config file.
**This function has been deprecated**
Args:
tmp_dir_path(str): Unix style path to the tmpdir where the instance
config is downloaded.
config_file_path(str): Unix style path to where the config file new
file should be placed.
Returns:
True/False: Status of a successful or failed update.
"""
target_config_dir = os.path.dirname(config_file_path)
# version the previous config files.
logger.info("Backing up older configs.")
# Move current config to config_1.json
toolkit_version = load_config_file(config_file_path)["toolkit_version"]
shutil.move(config_file_path, os.path.join(
target_config_dir, "config_{}.json".format(toolkit_version))
)
# Move downloaded directory to config.json
shutil.move(
os.path.join(tmpdir_path, "config.json"),
config_file_path
)
return True
def docker_logged_in(docker_config=DOCKER_CONFIG, required_registry=REQUIRED_REGISTRIES):
"""Simple function to warn the user the docker registry required hasn't been logged in."""
override_registry = os.getenv("OVERRIDE_REGISTRY", None)
if override_registry is None:
data = load_config_file(docker_config)
if "auths" not in list(data.keys()):
raise ValueError(
"Docker CLI hasn't been logged in to a registry."
"Please run `docker login nvcr.io`"
)
if not isinstance(required_registry, list):
required_registry = [required_registry]
logging.info("Registry: {}".format(required_registry))
registry_status = [registry in list(data["auths"].keys()) for registry in required_registry]
def error_msg(registry_status):
emsg = ""
for idx, status in enumerate(registry_status):
if not status:
emsg += "\nDocker not logged in to {}. Please run docker login {}".format(
required_registry[idx], required_registry[idx]
)
return emsg
assert all(
[registry in list(data["auths"].keys()) for registry in required_registry]
), error_msg(registry_status)
else:
logger.info("Skipping docker login check.")
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TAO Toolkit instance handler for launching jobs on Whl based non-docker instances."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import sys
import subprocess
from nvidia_tao_cli.components.instance_handler.base_instance import TAOInstance
from nvidia_tao_cli.components.instance_handler.utils import (
load_config_file,
)
from nvidia_tao_cli.components.types.task import Task
logger = logging.getLogger(__name__)
class WHLInstance(TAOInstance):
"""Instance handler class to define a TAO Toolkit instance."""
def __init__(self, task_map, config_path):
"""Initialize a Wheel based TAO Toolkit instance.
Args:
task_map(dict): Dictionary of task name to Task data structure.
"""
super(WHLInstance, self).__init__(
task_map=task_map,
)
self.current_config_path = config_path
logger.debug("Current config file imported from: {}".format(
self.current_config_path
))
@staticmethod
def load_config(config_path):
"""Function to load the json config file.
Args:
config_path(str): Unix style path to the config file.
Returns:
config_data(dict): Parsed config data.
"""
data = load_config_file(config_path)
return data
@staticmethod
def parse_launcher_config(config_data):
"""Parse launcher configuration data based on format version.
Args:
data(dict): Data containing configuration parameters for the launcher instance
Returns:
task_map(dict): Dictionary of tasks mapped to the respective dockers.
"""
if "format_version" not in config_data.keys():
raise KeyError("format is a required key in the launcher config.")
task_map = {}
docker_images = set()
if config_data["format_version"] == 1.0:
local_map = {}
for image in list(config_data["dockers"].keys()):
logger.debug("Processing {}".format(image))
docker_data = config_data["dockers"][image]
if "tasks" not in list(docker_data.keys()):
raise NotImplementedError(
"The config data must contain tasks associated with the "
"respective docker."
)
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=docker_data["docker_tag"],
docker_registry=docker_data["docker_registry"],
docker_digest=docker_data["docker_digest"] if "docker_digest" in docker_data.keys() else None
) for task in docker_data["tasks"]
})
docker_images.add(image)
task_map["container_actions"] = local_map
elif config_data["format_version"] == 2.0:
local_map = {}
for image in list(config_data["dockers"].keys()):
logger.debug("Processing {}".format(image))
docker_data = config_data["dockers"][image]
if not isinstance(docker_data, dict):
raise ValueError("Invalid format.")
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=tag,
docker_registry=docker_data[tag]["docker_registry"],
docker_digest=docker_data[tag]["docker_digest"] if "docker_digest" in docker_data[tag].keys() else None
) for tag in docker_data.keys() for task in docker_data[tag]["tasks"]
})
docker_images.add(image)
task_map["container_actions"] = local_map
elif config_data["format_version"] == 3.0:
for task_group, group_data in config_data["task_group"].items():
logger.debug("Configuring task group {task_group}".format(
task_group=task_group
))
local_map = {}
for image, image_data in group_data["dockers"].items():
logger.debug(
"Extracting tasks from docker {image}".format(
image=image
)
)
if not isinstance(image_data, dict):
raise ValueError(f"Invalid data format for images {type(image_data)} encountered.")
local_map.update({
task: Task(
name=task,
docker_image=image,
docker_tag=tag,
docker_registry=image_data[tag]["docker_registry"],
docker_digest=image_data[tag].get("docker_digest", None)
) for tag in image_data.keys() for task in image_data[tag]["tasks"]
})
docker_images.add(image)
task_map[task_group] = local_map
else:
raise NotImplementedError("Invalid format type: {}".format(config_data["format_version"]))
return task_map, docker_images
@classmethod
def from_config(cls, config_path):
"""Instantiate a TAO Toolkit instance from a config file.
Args:
config_path(str): Path to the launcher config file.
Returns:
Initialized WHLInstance object.
"""
config_data = cls.load_config(config_path)
task_map, _ = cls.parse_launcher_config(config_data)
debug_string = ""
for task_name, task in task_map.items():
debug_string += f"{task_name}: {str(task)}\n"
logger.debug(debug_string)
return WHLInstance(
task_map,
config_path
)
def launch_command(self, task_group, task, args):
"""Launch command for tasks.
Args:
task(str): Name of the task from the entrypoint.
args(list): List of args to the task.
Returns:
No explicit returns.
"""
if task_group in self.task_map.keys():
task_map = self.task_map[task_group]
if task in list(task_map.keys()):
assert isinstance(args, list), (
"The arguments must be given as a list to be passed. "
"Got a {} instead".format(
type(args)
)
)
if args:
command = ""
if args[0] == "run":
args.pop(0)
else:
command = "{} ".format(task)
command += " ".join(args)
else:
logger.info(
"No commands provided to the launcher\n"
"Listing the help options "
"when you exit."
)
command += " -h"
try:
subprocess.check_call(
command,
shell=True,
stdout=sys.stdout
)
except subprocess.CalledProcessError as e:
if e.output is not None:
print("TAO Toolkit command run failed with error: {}".format(e.output))
sys.exit(-1)
else:
raise NotImplementedError(
"Task asked for wasn't implemented to run on WHL instance. {}".format(task))
else:
raise NotImplementedError(
f"Task group asked for wasn't implemented to run on WHL instance: {task_group}"
)
| tao_launcher-main | nvidia_tao_cli/components/instance_handler/whl_instance.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A docker handler to interface with the docker.
This component is responsible for:
1. Interacting with the docker registry
2. Pulling the docker from the registry
3. Instantiating a docker locally.
4. Executing the command locally.
"""
import json
import logging
import os
import sys
import subprocess
import docker
from tabulate import tabulate
DOCKER_COMMAND = "docker"
DEFAULT_DOCKER_PATH = "unix://var/run/docker.sock"
VALID_PORT_PROTOCOLS = ["tcp", "udp", "sctp"]
VALID_DOCKER_ARGS = ["user", "ports", "shm_size", "ulimits", "privileged", "network", "tty"]
logger = logging.getLogger(__name__)
def get_default_mountsfile():
"""Get the default mounts file."""
default_mounts = "~/.tao_mounts.json"
if not os.path.exists(os.path.expanduser(default_mounts)):
print(
"~/.tao_mounts.json wasn't found. Falling back to obtain "
"mount points and docker configs from ~/.tao_mounts.json.\n"
"Please note that this will be deprecated going forward."
)
default_mounts = "~/.tao_mounts.json"
return default_mounts
DOCKER_MOUNT_FILE = get_default_mountsfile()
class DockerHandler(object):
"""Handler to control docker interactions.
This is an object to encapsulate the interactions of a docker container. It contains routines to
1. Start a container.
2. Launch a command
3. Inspect a container's processes
4. Stop a container.
"""
def __init__(self,
docker_registry=None,
image_name=None,
docker_tag=None,
docker_digest=None,
docker_mount_file=DOCKER_MOUNT_FILE,
docker_env_path=DEFAULT_DOCKER_PATH):
"""Initialize the docker handler object."""
self._docker_client = docker.from_env()
self._api_client = docker.APIClient(base_url=docker_env_path)
self._docker_registry = docker_registry
self._image_name = image_name
self._docker_mount_file = os.path.expanduser(docker_mount_file)
self._docker_tag = docker_tag
self._docker_digest = docker_digest
self.docker_exec_command = "docker exec"
self.initialized = True
self._container = None
@staticmethod
def _load_mounts_file(docker_mount_file):
"""Simple function to load the mount file."""
with open(docker_mount_file, "r") as mfile:
data = json.load(mfile)
return data
def _get_mount_env_data(self):
"""Get the mounts from the tao_mount.json file."""
mount_points = []
env_vars = []
docker_options = dict()
if not os.path.exists(self._docker_mount_file):
logging.info(
"No mount points were found in the {} file.".format(self._docker_mount_file)
)
return mount_points, env_vars, docker_options
# Load mounts file.
data = self._load_mounts_file(self._docker_mount_file)
# Extract mounts and environment variables.
assert "Mounts" in list(data.keys()), (
"Invalid json file. Requires Mounts key."
)
for key, value in data.items():
if key == "Mounts":
for mount in value:
assert 'source' in list(mount.keys()) and 'destination' in list(mount.keys()), (
"Mounts are not formatted correctly."
)
mount["source"] = os.path.realpath(
os.path.expanduser(mount["source"])
)
mount["destination"] = os.path.realpath(
os.path.expanduser(mount["destination"])
)
logger.debug("Source path: {}, Destination path: {}".format(mount["source"], mount["destination"]))
if not os.path.exists(mount['source']):
raise ValueError("Mount point source path doesn't exist. {}".format(mount['source']))
mount_points.append(mount)
elif key == "Envs":
for env_var in value:
assert "variable" in list(env_var.keys()) and "value" in list(env_var.keys()), (
"Env variables aren't formatter correctly."
)
env_vars.append(env_var)
elif key == "DockerOptions":
docker_options = value
else:
raise KeyError("Invalid field {} found in {} file.".format(key, self._docker_mount_file))
# Extract env variables.
return mount_points, env_vars, docker_options
def _check_image_exists(self):
"""Check if the image exists locally."""
image_list = self._docker_client.images.list()
assert isinstance(image_list, list), (
"image_list should be a list."
)
for image in image_list:
image_inspection_content = self._api_client.inspect_image(image.attrs["Id"])
if image_inspection_content["RepoTags"]:
if self.docker_image in image_inspection_content["RepoTags"]:
return True
return False
def pull(self):
"""Pull the base docker."""
logger.info(
"Pulling the required container. This may take several minutes if you're doing this for the first time. "
"Please wait here.\n...")
try:
repository = "{}/{}".format(self._docker_registry, self._image_name)
print("Pulling from repository: {}".format(repository))
self._api_client.pull(repository=repository, tag=self._docker_tag)
except docker.errors.APIError as e:
print("Docker pull failed. {}".format(e))
sys.exit(1)
logger.info("Container pull complete.")
@property
def docker_image(self):
"""Get the docker image name."""
if not self.initialized:
raise ValueError("Docker instance wasn't initialized")
return "{}/{}:{}".format(self._docker_registry,
self._image_name,
self._docker_tag)
@staticmethod
def formatted_mounts(mountpoints_list):
"""Get formatted mounts for the docker command."""
assert isinstance(mountpoints_list, list), (
"Mount points provided to format must be a list"
)
volumes = {}
for mount in mountpoints_list:
logger.debug("formatting the mounts")
source_path = os.path.realpath(mount["source"])
destination_path = os.path.realpath(mount["destination"])
volumes[source_path] = {
"bind": destination_path,
"mode": "rw"
}
return volumes
@staticmethod
def formatted_envs(env_var_list):
"""Get a formatted list of env vars for the docker."""
assert isinstance(env_var_list, list), (
"Env variables provided must be a list"
)
env_vars = [
"{}={}".format(
item["variable"],
item["value"]) for item in env_var_list
]
return env_vars
@staticmethod
def get_device_requests():
"""Create device requests for the docker."""
device_requests = [docker.types.DeviceRequest(count=-1, capabilities=[["gpu"]])]
return device_requests
@staticmethod
def get_docker_ulimits(name, value):
"""Get ulimits for the host config.
Args:
name (str): Name of the ulimit property.
value (str): Value of the property. This is the same value
set as soft and hard limit.
Return:
docker_limits (list): Listof docker.types.Ulimit objects
to be used with docker start.
"""
return docker.types.Ulimit(name=name, soft=value, hard=value)
def get_docker_option_args(self, docker_options):
"""Setting options for docker args.
Args:
docker_options (dict): Dictionary of docker config params.
Returns:
docker_args (dict): Keyword args for docker options to be
defined for docker start.
"""
docker_args = {}
if docker_options is not None:
for key, value in docker_options.items():
assert key in VALID_DOCKER_ARGS, (
"The parameter \"{}\" mentioned in the config file isn't a valid option."
"\nPlease choose one of the following: {}".format(key, VALID_DOCKER_ARGS)
)
if key == "ulimits":
assert isinstance(value, dict), (
"Ulimits should be a dictionary of params"
)
docker_args[key] = [
self.get_docker_ulimits(p_name, p_value)
for p_name, p_value in value.items()
]
else:
docker_args[key] = value
return docker_args
def start_container(self, volumes, env_vars, docker_options=None):
"""This will create a docker container."""
# Getting user limits for the docker.
docker_args = self.get_docker_option_args(docker_options)
if "user" not in list(docker_args.keys()):
logger.warning(
"\nDocker will run the commands as root. If you would like to retain your"
"\nlocal host permissions, please add the \"user\":\"UID:GID\" in the"
"\nDockerOptions portion of the \"{}\" file. You can obtain your"
"\nusers UID and GID by using the \"id -u\" and \"id -g\" commands on the"
"\nterminal.".format(self._docker_mount_file)
)
# Try instantiating a container and return error.
try:
logger.debug("Starting the TAO Toolkit Container: {}".format(self.docker_image))
tty = docker_args.get("tty", True)
if "tty" in docker_args.keys():
docker_args.pop("tty")
logger.info("Printing tty value {tty}".format(tty=tty))
self._container = self._docker_client.containers.run(
"{}".format(self.docker_image),
command=None,
device_requests=self.get_device_requests(),
tty=tty,
stderr=True,
stdout=True,
detach=True,
volumes=volumes,
environment=env_vars,
remove=True,
**docker_args
)
except docker.errors.APIError as e:
print("Docker instantiation failed with error: {}".format(e))
sys.exit(1)
def run_container(self, task_command):
"""Instantiating an instance of the TAO Toolkit docker."""
if not self._check_image_exists():
logger.info(
"The required docker doesn't exist locally/the manifest has changed. "
"Pulling a new docker.")
self.pull()
mount_data, env_vars, docker_options = self._get_mount_env_data()
volumes = self.formatted_mounts(mount_data)
env_variables = self.formatted_envs(env_vars)
# Start the container if the it isn't already.
tty = True
if "tty" in docker_options.keys():
tty = docker_options["tty"]
self.start_container(volumes, env_variables, docker_options)
interactive_option = "-i"
if tty:
interactive_option = "-it"
formatted_command = "bash -c \'{} {} {} {}\'".format(
self.docker_exec_command,
interactive_option,
self._container.id,
task_command
)
logger.debug("volumes: {}".format(volumes))
logger.debug("formatted_command: {}\nExecuting the command.".format(formatted_command))
try:
subprocess.check_call(
formatted_command,
shell=True,
stdout=sys.stdout
)
except subprocess.CalledProcessError as e:
if e.output is not None:
print("TAO command run failed with error: {}".format(e.output))
if self._container:
logger.info("Stopping container post instantiation")
self.stop_container()
sys.exit(-1)
finally:
if self._container:
logger.info("Stopping container.")
self.stop_container()
def run_container_on_ci(self, task_command):
"""Simple function to run command on gitlab-ci."""
# TODO: @vpraveen: This is a temporary WAR to make sure that the
# gitlab tty issue doesn't block CI automation of notebooks.
# will need to revisit this asap before the code freeze.
if not self._check_image_exists():
logger.info(
"The required docker doesn't exist locally/the manifest has changed. "
"Pulling a new docker.")
self.pull()
mount_data, env_vars, docker_options = self._get_mount_env_data()
volumes = self.formatted_mounts(mount_data)
env_variables = self.formatted_envs(env_vars)
volume_args = " ".join(
[f"-v {source}:{value['bind']}:{value['mode']}" for source, value in volumes.items()]
)
env_args = " ".join(
[f"-e {env_var}" for env_var in env_variables]
)
# Start the container if the it isn't already.
interactive_option = "-i"
if docker_options.get("tty", True):
interactive_option += "t"
docker_command = [
"docker run",
f"{interactive_option}",
"--rm",
"--gpus all",
f"{volume_args}",
f"{env_args}",
]
options = []
for option, value in docker_options.items():
if option == "privileged" and value:
options.append("--privileged")
if option == "shm_size":
options.append(f"--shm-size {value}")
docker_command.extend(options)
formatted_command = "{} {} {}".format(
" ".join(docker_command),
self.docker_image,
task_command
)
logger.debug("volumes: {}".format(volumes))
logger.debug("formatted_command: {}\nExecuting the command.".format(formatted_command))
try:
subprocess.check_call(
formatted_command,
shell=True,
stdout=sys.stdout
)
except subprocess.CalledProcessError as e:
if e.output is not None:
print("TAO command run failed with error: {}".format(e.output))
if self._container:
logger.info("Stopping container post instantiation")
self.stop_container()
sys.exit(-1)
def stop_container(self):
"""Stop an instantiated container."""
logger.debug("Stopping the container: {}".format(
self.container_id
))
self._container.stop()
@property
def container_id(self):
"""Get container id of the current handler."""
assert isinstance(self._container, docker.models.containers.Container), (
"The container object should be a docker.models.container.Container instance"
)
return self._container.short_id
def get_processes(self):
"""Get the list of processes running in the container."""
procs_list = self._container.top()
processes = procs_list["Processes"]
title = procs_list["Titles"]
logger.info(tabulate(processes, headers=title))
return processes, title
| tao_launcher-main | nvidia_tao_cli/components/docker_handler/docker_handler.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker handler to encapsulate docker operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | nvidia_tao_cli/components/docker_handler/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entrypoint to the TAO Toolkit package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| tao_launcher-main | nvidia_tao_cli/entrypoint/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to launch TAO Toolkit commands."""
import argparse
import logging
import os
import sys
from nvidia_tao_cli.components.instance_handler.base_instance import INSTANCE_HANDLER_TASKS as CLI_TASKS
from nvidia_tao_cli.components.instance_handler.builder import get_launcher
from nvidia_tao_cli.components.instance_handler.utils import (
get_config_file
)
logger = logging.getLogger(__name__)
PYTHON_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def build_command_line_parser(parser=None, supported_tasks=None, launcher_instance=None):
"""Build command line parser for the TAO Toolkit launcher."""
if parser is None:
parser = argparse.ArgumentParser(
prog="tao", description="Launcher for TAO Toolkit.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=True
)
module_subparser = parser.add_subparsers(title="task_groups")
# Parser for tao subtasks.
for task_group in supported_tasks:
taskgroup_subparser = module_subparser.add_parser(
task_group,
parents=[parser],
add_help=False,
)
task_subparsers = taskgroup_subparser.add_subparsers(title="task")
if task_group not in CLI_TASKS:
for task in launcher_instance.task_map[task_group].keys():
logger.debug(
"Task group: {task_group} task {task}".format(
task_group=task_group,
task=task
)
)
task_parser = task_subparsers.add_parser(
task,
parents=[taskgroup_subparser],
add_help=False
)
task_parser.add_argument(
"script_args",
nargs=argparse.REMAINDER,
type=str,
default=None,
)
else:
if task_group == "stop":
# List of container id's to be closed.
taskgroup_subparser.add_argument(
"--container_id",
type=str,
nargs="+",
required=False,
default=None,
help="Ids of the containers to be stopped."
)
# Force shutdown all containers.
taskgroup_subparser.add_argument(
"--all",
action="store_true",
default=False,
help="Kill all running TAO Toolkit containers.",
required=False
)
elif task_group == "info":
taskgroup_subparser.add_argument(
"--verbose",
action="store_true",
default=False,
help="Print information about the TAO Toolkit instance."
)
else:
pass
return parser
def main(args=sys.argv[1:]):
"""TLT entrypoint script to the TAO Toolkit Launcher."""
verbosity = logging.INFO
if os.getenv("TAO_LAUNCHER_DEBUG", "0") == "1":
verbosity = logging.DEBUG
# Configuring the logger.
logging.basicConfig(
format='%(asctime)s [TAO Toolkit] [%(levelname)s] %(name)s %(lineno)d: %(message)s',
level=verbosity
)
# Get the default list of tasks to be supported.
launcher_config_file = get_config_file()
instance, supported_tasks = get_launcher(launcher_config_file)
# Build cascaded command line parser.
parser = build_command_line_parser(parser=None, supported_tasks=supported_tasks, launcher_instance=instance)
if not args:
args = ["--help"]
task_group = args[0]
task = None
if len(args) < 2:
if task_group not in instance.instance_handler_tasks:
args += ["--help"]
else:
if args[1] != "--help":
task = args[1]
# Run tasks in container only if the task group and tasks are supported.
if task_group in instance.task_map.keys() and task in instance.task_map[task_group].keys():
instance.launch_command(
task_group,
task,
args[2:]
)
else:
logger.debug("Running command.")
parsed_args, unknown_args = parser.parse_known_args(args) # noqa pylint: disable=W0612
# TODO: CLI related actions to be implemented
# --> init (to download the config, validate and place it at ~/.tao/config.json)
# --> update (to download the latest config and update the config in the default path.)
# --> list (to list active TAO Toolkit container instances.)
# run_cli_instruction(task, parsed_args)
logger.debug(parsed_args)
instance.launch_command(
task_group,
task,
parsed_args
)
if __name__ == "__main__":
main(sys.argv[1:])
| tao_launcher-main | nvidia_tao_cli/entrypoint/tao_launcher.py |
#!/usr/bin/python
import sys
import re
from submodules.rules import rules
def main():
with open(sys.argv[1], "r") as fp:
lines = fp.readlines()
for idx, line in enumerate(lines):
if line.strip() == "# ------------------------ >8 ------------------------":
break
if line[0] == "#":
continue
if not line_valid(idx, line):
print(f"line# {idx} failed")
show_rules()
sys.exit(1)
sys.exit(0)
def line_valid(idx, line):
if idx == 0:
#return re.match("^[A-Z].{,48}[0-9A-z \t]$", line)
return re.match("^\[((?!\s*$).{0,15})\][ \t].*?[A-Z].{0,48}[0-9A-z \t]$", line)
else:
return len(line.strip()) <= 72
def show_rules():
print(rules)
if __name__ == "__main__":
main() | tao_launcher-main | scripts/git-hooks/commit-msg.py |
tao_launcher-main | scripts/git-hooks/submodules/__init__.py |
|
rules = """
# Failing to add message in the mentioned format will
# cause your local commit fail.
#
# Please follow these rules for commit messages:
# ==============================================
# 1. Commit message format - first line is mandatory
# [YOUR_MODULE_NAME] Subject line here not exceeding 50 characters
# * Optional line entry with detail not exceeding 72 characters
# * Optional line entry with detail not exceeding 72 characters
# * Optional line entry with detail not exceeding 72 characters
# 2. Limit the module name (YOUR_MODULE_NAME) to 15 characters length
# 3. Limit the subject(Text part after [YOUR_MODULE_NAME]) line to max
# 50 characters
# 4. Start subject (Text part after [YOUR_MODULE_NAME]) with a Capital
# letter and don't end with a period '.'
# 5. Wrap the body lines (if any) at 72 characters
""" | tao_launcher-main | scripts/git-hooks/submodules/rules.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
from datetime import date
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
year = date.today().year
project = u'NVIDIA Cloud Native Technologies'
copyright = u'2018-%s, NVIDIA Corporation' % year
author = u'NVIDIA Corporation'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
copybutton_prompt_text = "$ "
copybutton_only_copy_prompt_lines = False
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'sphinx_copybutton',
'sphinx.ext.autosectionlabel',
'sphinx_tabs.tabs',
'sphinxcontrib.blockdiag',
'sphinx.ext.intersphinx',
]
blockdiag_fontpath = '/usr/share/fonts/truetype/Roboto-Regular.ttf'
blockdiag_antialias = True
# http://blockdiag.com/en/blockdiag/examples.html
# color picker: https://www.rapidtables.com/web/color/RGB_Color.html
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
html_additional_pages = {
"index": "cnt-landing-page.html",
"contents": "cnt-landing-page.html"
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'contents'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'https://docs.nvidia.com/datacenter/cloud-native/',
'collapse_navigation': True,
'sticky_navigation': True,
'display_version': False,
'logo_only': True,
'navigation_depth': 2,
'includehidden': True,
'display_version': False,
'prev_next_buttons_location': "bottom",
'style_external_links': False,
}
html_logo = "assets/NVLogo_H_B&W.png"
# Download favicon and set it (the variable `html_favicon`) for this project.
# It must be relative path.
html_favicon = "_static/nvidia.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = ['css/custom.css']
html_js_files = [
'js/google-analytics/google-analytics-tracker.js',
'js/google-analytics/google-analytics-write.js',
'//assets.adobedtm.com/b92787824f2e0e9b68dc2e993f9bd995339fe417/satelliteLib-7ba51e58dc61bcb0e9311aadd02a0108ab24cc6c.js',
]
# Add timestamp to each page
html_last_updated_fmt = '%Y-%m-%d'
intersphinx_mapping = {
'dcgm': ('https://docs.nvidia.com/datacenter/dcgm/latest/', None),
}
source_parsers = {
'.md': 'myst_parser.sphinx_',
}
def setup(app):
count_unique_visitor_script = os.getenv("ADD_NVIDIA_VISITS_COUNTING_SCRIPT")
if count_unique_visitor_script:
app.add_js_file(count_unique_visitor_script)
| cloud-native-docs-master | conf.py |
# Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 7:
raise RuntimeError(
f"This version of packman requires Python 3.7.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = os.path.join(os.path.expanduser("~"), "/Library/Application Support/packman-cache")
elif platform_name == "Linux":
try:
cache_root = os.environ["XDG_HOME_CACHE"]
except KeyError:
cache_root = os.path.join(os.path.expanduser("~"), ".cache")
return os.path.join(cache_root, "packman")
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| cloud-native-docs-master | tools/packman/packmanconf.py |
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
install_package(sys.argv[1], sys.argv[2])
| cloud-native-docs-master | tools/packman/bootstrap/install_package.py |
import os
import sys
import io
import contextlib
import packmanapi
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml")
def bootstrap():
"""
Bootstrap all omni.repo modules.
Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing.
"""
with contextlib.redirect_stdout(io.StringIO()):
deps = packmanapi.pull(REPO_DEPS_FILE)
for dep_path in deps.values():
if dep_path not in sys.path:
sys.path.append(dep_path)
if __name__ == "__main__":
bootstrap()
import omni.repo.man
omni.repo.man.main(REPO_ROOT)
| cloud-native-docs-master | tools/repoman/repoman.py |
# Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 7:
raise RuntimeError(
f"This version of packman requires Python 3.7.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = "/Library/Caches/packman"
elif platform_name == "Linux":
root = "/var/tmp/packman"
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| cub-master | docs/tools/packman/packmanconf.py |
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
install_package(sys.argv[1], sys.argv[2])
| cub-master | docs/tools/packman/bootstrap/install_package.py |
import os
import sys
import io
import contextlib
import packmanapi
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml")
def bootstrap():
"""
Bootstrap all omni.repo modules.
Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing.
"""
#with contextlib.redirect_stdout(io.StringIO()):
deps = packmanapi.pull(REPO_DEPS_FILE)
for dep_path in deps.values():
if dep_path not in sys.path:
sys.path.append(dep_path)
if __name__ == "__main__":
bootstrap()
import omni.repo.man
omni.repo.man.main(REPO_ROOT)
| cub-master | docs/tools/repoman/repoman.py |
#!/usr/bin/env python
import hpccm
hpccm.config.set_container_format('docker')
Stage0 += hpccm.primitives.baseimage(image='nvidia/cuda:12.1.0-devel-ubuntu22.04')
Stage0 += hpccm.building_blocks.apt_get(ospackages=['git', 'tmux', 'gcc', 'g++', 'vim', 'python3', 'python-is-python3', 'ninja-build'])
Stage0 += hpccm.building_blocks.llvm(version='15', extra_tools=True, toolset=True)
Stage0 += hpccm.building_blocks.cmake(eula=True, version='3.26.3')
Stage0 += hpccm.building_blocks.nsight_compute(eula=True, version='2023.1.1')
Stage0 += hpccm.building_blocks.pip(packages=['fpzip', 'numpy', 'pandas'], pip='pip3')
| cub-master | benchmarks/docker/recipe.py |
#!/usr/bin/env python3
import cub.bench as bench
# TODO:
# - driver version
# - host compiler + version
# - gpu clocks / pm
# - ecc
def main():
center_estimator = bench.MedianCenterEstimator()
bench.search(bench.BruteForceSeeker(center_estimator, center_estimator))
if __name__ == "__main__":
main()
| cub-master | benchmarks/scripts/search.py |
#!/usr/bin/env python3
import sys
import argparse
import cub.bench
def parse_arguments():
parser = argparse.ArgumentParser(description='Verify tuning variant')
parser.add_argument('--variant', type=str, help='Variant to verify', default=None, required=True)
variant = parser.parse_known_args()[0].variant
sys.argv.remove('--variant={}'.format(variant))
return variant
def workload_header(ct_workload_space, rt_workload_space):
for ct_workload in ct_workload_space:
for rt_workload in rt_workload_space:
workload_point = ct_workload + rt_workload
return ", ".join([x.split('=')[0] for x in workload_point])
def workload_entry(ct_workload, rt_workload):
workload_point = ct_workload + rt_workload
return ", ".join([x.split('=')[1] for x in workload_point])
class VerifySeeker:
def __init__(self, variant_label):
self.label = variant_label
self.estimator = cub.bench.MedianCenterEstimator()
def __call__(self, algname, ct_workload_space, rt_workload_space):
variant_point = cub.bench.Config().label_to_variant_point(algname, self.label)
print("{}, MinS, MedianS, MaxS".format(workload_header(ct_workload_space, rt_workload_space)))
for ct_workload in ct_workload_space:
bench = cub.bench.Bench(algname, variant_point, list(ct_workload))
if bench.build():
base = bench.get_base()
for rt_workload in rt_workload_space:
workload_point = ct_workload + rt_workload
base_samples, base_elapsed = base.do_run(workload_point, None)
variant_samples, _ = bench.do_run(workload_point, base_elapsed * 10)
min_speedup = min(base_samples) / min(variant_samples)
median_speedup = self.estimator(base_samples) / self.estimator(variant_samples)
max_speedup = max(base_samples) / max(variant_samples)
point_str = workload_entry(ct_workload, rt_workload)
print("{}, {}, {}, {}".format(point_str, min_speedup, median_speedup, max_speedup))
def main():
cub.bench.search(VerifySeeker(parse_arguments()))
if __name__ == "__main__":
main()
| cub-master | benchmarks/scripts/verify.py |
#!/usr/bin/env python3
import os
import re
import cub
import math
import argparse
import itertools
import functools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu
from scipy.stats.mstats import hdquantiles
pd.options.display.max_colwidth = 100
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_cycle = itertools.cycle(default_colors)
color_map = {}
precision = 0.01
sensitivity = 0.5
def get_bench_columns():
return ['variant', 'elapsed', 'center', 'samples']
def get_extended_bench_columns():
return get_bench_columns() + ['speedup', 'base_samples']
def compute_speedup(df):
bench_columns = get_bench_columns()
workload_columns = [col for col in df.columns if col not in bench_columns]
base_df = df[df['variant'] == 'base'].drop(columns=['variant']).rename(
columns={'center': 'base_center', 'samples': 'base_samples'})
base_df.drop(columns=['elapsed'], inplace=True)
merged_df = df.merge(
base_df, on=[col for col in df.columns if col in workload_columns])
merged_df['speedup'] = merged_df['base_center'] / merged_df['center']
merged_df = merged_df.drop(columns=['base_center'])
return merged_df
def get_ct_axes(df):
ct_axes = []
for col in df.columns:
if '{ct}' in col:
ct_axes.append(col)
return ct_axes
def get_rt_axes(df):
rt_axes = []
excluded_columns = get_ct_axes(df) + get_extended_bench_columns()
for col in df.columns:
if col not in excluded_columns:
rt_axes.append(col)
return rt_axes
def ct_space(df):
ct_axes = get_ct_axes(df)
unique_ct_combinations = []
for _, row in df[ct_axes].drop_duplicates().iterrows():
unique_ct_combinations.append({})
for col in ct_axes:
unique_ct_combinations[-1][col] = row[col]
return unique_ct_combinations
def extract_case(df, ct_point):
tuning_df_loc = None
for ct_axis in ct_point:
if tuning_df_loc is None:
tuning_df_loc = (df[ct_axis] == ct_point[ct_axis])
else:
tuning_df_loc = tuning_df_loc & (df[ct_axis] == ct_point[ct_axis])
tuning_df = df.loc[tuning_df_loc].copy()
for ct_axis in ct_point:
tuning_df.drop(columns=[ct_axis], inplace=True)
return tuning_df
def extract_rt_axes_values(df):
rt_axes = get_rt_axes(df)
rt_axes_values = {}
for rt_axis in rt_axes:
rt_axes_values[rt_axis] = list(df[rt_axis].unique())
return rt_axes_values
def extract_rt_space(df):
rt_axes = get_rt_axes(df)
rt_axes_values = []
for rt_axis in rt_axes:
values = df[rt_axis].unique()
rt_axes_values.append(["{}={}".format(rt_axis, v) for v in values])
return list(itertools.product(*rt_axes_values))
def filter_variants(df, group):
rt_axes = get_rt_axes(df)
unique_combinations = set(
df[rt_axes].drop_duplicates().itertuples(index=False))
group_combinations = set(
group[rt_axes].drop_duplicates().itertuples(index=False))
has_all_combinations = group_combinations == unique_combinations
return has_all_combinations
def extract_complete_variants(df):
return df.groupby('variant').filter(functools.partial(filter_variants, df))
def compute_workload_score(rt_axes_values, rt_axes_ids, weight_matrix, row):
rt_workload = []
for rt_axis in rt_axes_values:
rt_workload.append("{}={}".format(rt_axis, row[rt_axis]))
weight = cub.bench.get_workload_weight(rt_workload, rt_axes_values, rt_axes_ids, weight_matrix)
return row['speedup'] * weight
def compute_variant_score(rt_axes_values, rt_axes_ids, weight_matrix, group):
workload_score_closure = functools.partial(compute_workload_score, rt_axes_values, rt_axes_ids, weight_matrix)
score_sum = group.apply(workload_score_closure, axis=1).sum()
return score_sum
def extract_scores(df):
rt_axes_values = extract_rt_axes_values(df)
rt_axes_ids = cub.bench.compute_axes_ids(rt_axes_values)
weight_matrix = cub.bench.compute_weight_matrix(rt_axes_values, rt_axes_ids)
score_closure = functools.partial(compute_variant_score, rt_axes_values, rt_axes_ids, weight_matrix)
grouped = df.groupby('variant')
scores = grouped.apply(score_closure).reset_index()
scores.columns = ['variant', 'score']
stat = grouped.agg(mins = ('speedup', 'min'),
means = ('speedup', 'mean'),
maxs = ('speedup', 'max'))
result = pd.merge(scores, stat, on='variant')
return result.sort_values(by=['score'], ascending=False)
def distributions_are_different(alpha, row):
ref_samples = row['base_samples']
cmp_samples = row['samples']
# H0: the distributions are not different
# H1: the distribution are different
_, p = mannwhitneyu(ref_samples, cmp_samples)
# Reject H0
return p < alpha
def remove_matching_distributions(alpha, df):
closure = functools.partial(distributions_are_different, alpha)
return df[df.apply(closure, axis=1)]
def get_filenames_map(arr):
if not arr:
return []
prefix = arr[0]
for string in arr:
while not string.startswith(prefix):
prefix = prefix[:-1]
if not prefix:
break
return {string: string[len(prefix):] for string in arr}
def iterate_case_dfs(args, callable):
storages = {}
algnames = set()
filenames_map = get_filenames_map(args.files)
for file in args.files:
storage = cub.bench.StorageBase(file)
algnames.update(storage.algnames())
storages[filenames_map[file]] = storage
pattern = re.compile(args.R)
for algname in algnames:
if not pattern.match(algname):
continue
case_dfs = {}
for file in storages:
storage = storages[file]
df = storage.alg_to_df(algname)
with pd.option_context('mode.use_inf_as_na', True):
df = df.dropna(subset=['center'], how='all')
for _, row in df[['ctk', 'cub']].drop_duplicates().iterrows():
ctk_version = row['ctk']
cub_version = row['cub']
ctk_cub_df = df[(df['ctk'] == ctk_version) &
(df['cub'] == cub_version)]
for gpu in ctk_cub_df['gpu'].unique():
target_df = ctk_cub_df[ctk_cub_df['gpu'] == gpu]
target_df = target_df.drop(columns=['ctk', 'cub', 'gpu'])
target_df = compute_speedup(target_df)
for ct_point in ct_space(target_df):
point_str = ", ".join(["{}={}".format(k, ct_point[k]) for k in ct_point])
case_df = extract_complete_variants(extract_case(target_df, ct_point))
case_df['variant'] = case_df['variant'].astype(str) + " ({})".format(file)
if point_str not in case_dfs:
case_dfs[point_str] = case_df
else:
case_dfs[point_str] = pd.concat([case_dfs[point_str], case_df])
for point_str in case_dfs:
callable(algname, point_str, case_dfs[point_str])
def case_top(alpha, N, algname, ct_point_name, case_df):
print("{}[{}]:".format(algname, ct_point_name))
if alpha < 1.0:
case_df = remove_matching_distributions(alpha, case_df)
case_df = extract_complete_variants(case_df)
print(extract_scores(case_df).head(N))
def top(args):
iterate_case_dfs(args, functools.partial(case_top, args.alpha, args.top))
def case_coverage(algname, ct_point_name, case_df):
num_variants = cub.bench.Config().variant_space_size(algname)
num_covered_variants = len(case_df['variant'].unique())
coverage = (num_covered_variants / num_variants) * 100
case_str = "{}[{}]".format(algname, ct_point_name)
print("{} coverage: {} / {} ({:.4f}%)".format(
case_str, num_covered_variants, num_variants, coverage))
def coverage(args):
iterate_case_dfs(args, case_coverage)
def qrde_hd(samples):
"""
Computes quantile-respectful density estimation based on the Harrell-Davis
quantile estimator. The implementation is based on the following post:
https://aakinshin.net/posts/qrde-hd by Andrey Akinshin
"""
min_sample, max_sample = min(samples), max(samples)
num_quantiles = math.ceil(1.0 / precision)
quantiles = np.linspace(precision, 1 - precision, num_quantiles - 1)
hd_quantiles = [min_sample] + list(hdquantiles(samples, quantiles)) + [max_sample]
width = [hd_quantiles[idx + 1] - hd_quantiles[idx] for idx in range(num_quantiles)]
p = 1.0 / precision
height = [1.0 / (p * w) for w in width]
return width, height
def extract_peaks(pdf):
peaks = []
for i in range(1, len(pdf) - 1):
if pdf[i - 1] < pdf[i] > pdf[i + 1]:
peaks.append(i)
return peaks
def extract_modes(samples):
"""
Extract modes from the given samples based on the lowland algorithm:
https://aakinshin.net/posts/lowland-multimodality-detection/ by Andrey Akinshin
Implementation is based on the https://github.com/AndreyAkinshin/perfolizer
LowlandModalityDetector class.
"""
mode_ids = []
widths, heights = qrde_hd(samples)
peak_ids = extract_peaks(heights)
bin_area = 1.0 / len(heights)
x = min(samples)
peak_xs = []
peak_ys = []
bin_lower = [x]
for idx in range(len(heights)):
if idx in peak_ids:
peak_ys.append(heights[idx])
peak_xs.append(x + widths[idx] / 2)
x += widths[idx]
bin_lower.append(x)
def lowland_between(mode_candidate, left_peak, right_peak):
left, right = left_peak, right_peak
min_height = min(heights[left_peak], heights[right_peak])
while left < right and heights[left] > min_height:
left += 1
while left < right and heights[right] > min_height:
right -= 1
width = bin_lower[right + 1] - bin_lower[left]
total_area = width * min_height
total_bin_area = (right - left + 1) * bin_area
if total_bin_area / total_area < sensitivity:
mode_ids.append(mode_candidate)
return True
return False
previousPeaks = [peak_ids[0]]
for i in range(1, len(peak_ids)):
currentPeak = peak_ids[i]
while previousPeaks and heights[previousPeaks[-1]] < heights[currentPeak]:
if lowland_between(previousPeaks[0], previousPeaks[-1], currentPeak):
previousPeaks = []
else:
previousPeaks.pop()
if previousPeaks and heights[previousPeaks[-1]] > heights[currentPeak]:
if lowland_between(previousPeaks[0], previousPeaks[-1], currentPeak):
previousPeaks = []
previousPeaks.append(currentPeak)
mode_ids.append(previousPeaks[0])
return mode_ids
def hd_displot(samples, label, ax):
if label not in color_map:
color_map[label] = next(color_cycle)
color = color_map[label]
widths, heights = qrde_hd(samples)
mode_ids = extract_modes(samples)
min_sample, max_sample = min(samples), max(samples)
xs = [min_sample]
ys = [0]
peak_xs = []
peak_ys = []
x = min(samples)
for idx in range(len(widths)):
xs.append(x + widths[idx] / 2)
ys.append(heights[idx])
if idx in mode_ids:
peak_ys.append(heights[idx])
peak_xs.append(x + widths[idx] / 2)
x += widths[idx]
xs = xs + [max_sample]
ys = ys + [0]
ax.fill_between(xs, ys, 0, alpha=0.4, color=color)
quartiles_of_interest = [0.25, 0.5, 0.75]
for quartile in quartiles_of_interest:
bin = int(quartile / precision) + 1
ax.plot([xs[bin], xs[bin]], [0, ys[bin]], color=color)
ax.plot(xs, ys, label=label, color=color)
ax.plot(peak_xs, peak_ys, 'o', color=color)
ax.legend()
def displot(data, ax):
for variant in data:
hd_displot(data[variant], variant, ax)
def variant_ratio(data, variant, ax):
if variant not in color_map:
color_map[variant] = next(color_cycle)
color = color_map[variant]
variant_samples = data[variant]
base_samples = data['base']
variant_widths, variant_heights = qrde_hd(variant_samples)
base_widths, base_heights = qrde_hd(base_samples)
quantiles = []
ratios = []
base_x = min(base_samples)
variant_x = min(variant_samples)
for i in range(1, len(variant_heights) - 1):
base_x += base_widths[i] / 2
variant_x += variant_widths[i] / 2
quantiles.append(i * precision)
ratios.append(base_x / variant_x)
ax.plot(quantiles, ratios, label=variant, color=color)
ax.axhline(1, color='red', alpha=0.7)
ax.legend()
ax.tick_params(axis='both', direction='in', pad=-22)
def ratio(data, ax):
for variant in data:
if variant != 'base':
variant_ratio(data, variant, ax)
def case_variants(pattern, mode, algname, ct_point_name, case_df):
title = "{}[{}]:".format(algname, ct_point_name)
df = case_df[case_df['variant'].str.contains(pattern, regex=True)].reset_index(drop=True)
rt_axes = get_rt_axes(df)
rt_axes_values = extract_rt_axes_values(df)
vertical_axis_name = rt_axes[0]
if 'Elements{io}[pow2]' in rt_axes:
vertical_axis_name = 'Elements{io}[pow2]'
horizontal_axes = rt_axes
horizontal_axes.remove(vertical_axis_name)
vertical_axis_values = rt_axes_values[vertical_axis_name]
vertical_axis_ids = {}
for idx, val in enumerate(vertical_axis_values):
vertical_axis_ids[val] = idx
def extract_horizontal_space(df):
values = []
for rt_axis in horizontal_axes:
values.append(["{}={}".format(rt_axis, v) for v in df[rt_axis].unique()])
return list(itertools.product(*values))
if len(horizontal_axes) > 0:
idx = 0
horizontal_axis_ids = {}
for point in extract_horizontal_space(df):
horizontal_axis_ids[" / ".join(point)] = idx
idx = idx + 1
num_rows = len(vertical_axis_ids)
num_cols = max(1, len(extract_horizontal_space(df)))
if num_rows == 0:
return
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, gridspec_kw = {'wspace': 0, 'hspace': 0})
for _, vertical_row_description in df[[vertical_axis_name]].drop_duplicates().iterrows():
vertical_val = vertical_row_description[vertical_axis_name]
vertical_id = vertical_axis_ids[vertical_val]
vertical_name = "{}={}".format(vertical_axis_name, vertical_val)
vertical_df = df[df[vertical_axis_name] == vertical_val]
for _, horizontal_row_description in vertical_df[horizontal_axes].drop_duplicates().iterrows():
horizontal_df = vertical_df
for axis in horizontal_axes:
horizontal_df = horizontal_df[horizontal_df[axis] == horizontal_row_description[axis]]
horizontal_id = 0
if len(horizontal_axes) > 0:
horizontal_point = []
for rt_axis in horizontal_axes:
horizontal_point.append("{}={}".format(rt_axis, horizontal_row_description[rt_axis]))
horizontal_name = " / ".join(horizontal_point)
horizontal_id = horizontal_axis_ids[horizontal_name]
ax=axes[vertical_id, horizontal_id]
else:
ax=axes[vertical_id]
ax.set_ylabel(vertical_name)
data = {}
for _, variant in horizontal_df[['variant']].drop_duplicates().iterrows():
variant_name = variant['variant']
if 'base' not in data:
data['base'] = horizontal_df[horizontal_df['variant'] == variant_name].iloc[0]['base_samples']
data[variant_name] = horizontal_df[horizontal_df['variant'] == variant_name].iloc[0]['samples']
if mode == 'pdf':
# sns.histplot(data=data, ax=ax, kde=True)
displot(data, ax)
else:
ratio(data, ax)
if len(horizontal_axes) > 0:
ax=axes[vertical_id, horizontal_id]
if vertical_id == (num_rows - 1):
ax.set_xlabel(horizontal_name)
if horizontal_id == 0:
ax.set_ylabel(vertical_name)
else:
ax.set_ylabel('')
for ax in axes.flat:
ax.set_xticklabels([])
fig.suptitle(title)
plt.tight_layout()
plt.show()
def variants(args, mode):
pattern = re.compile(args.variants_pdf) if mode == 'pdf' else re.compile(args.variants_ratio)
iterate_case_dfs(args, functools.partial(case_variants, pattern, mode))
def file_exists(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError(f"The file '{value}' does not exist.")
return value
def parse_arguments():
parser = argparse.ArgumentParser(description="Analyze benchmark results.")
parser.add_argument(
'-R', type=str, default='.*', help="Regex for benchmarks selection.")
parser.add_argument(
'--list-benches', action=argparse.BooleanOptionalAction, help="Show available benchmarks.")
parser.add_argument(
'--coverage', action=argparse.BooleanOptionalAction, help="Show variant space coverage.")
parser.add_argument(
'--top', default=7, type=int, action='store', nargs='?', help="Show top N variants with highest score.")
parser.add_argument(
'files', type=file_exists, nargs='+', help='At least one file is required.')
parser.add_argument(
'--alpha', default=1.0, type=float)
parser.add_argument(
'--variants-pdf', type=str, help="Show matching variants data.")
parser.add_argument(
'--variants-ratio', type=str, help="Show matching variants data.")
return parser.parse_args()
def main():
args = parse_arguments()
if args.list_benches:
cub.bench.list_benches()
return
if args.coverage:
coverage(args)
return
if args.variants_pdf:
variants(args, 'pdf')
return
if args.variants_ratio:
variants(args, 'ratio')
return
top(args)
if __name__ == "__main__":
main()
| cub-master | benchmarks/scripts/analyze.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.