python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
paulis = [[cusv.Pauli.I], [cusv.Pauli.X, cusv.Pauli.Y]]
basisBits = [[1], [1, 2]]
nBasisBits = [len(arr) for arr in basisBits]
exp_values = np.empty(len(paulis), dtype=np.float64)
expected = np.asarray([1.0, -0.14], dtype=np.float64)
d_sv = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# apply Pauli operator
cusv.compute_expectations_on_pauli_basis(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, exp_values.ctypes.data,
paulis, len(paulis), basisBits, nBasisBits)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected, exp_values):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/expectation_pauli.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import time
import cupy as cp
import numpy as np
from mpi4py import MPI
import cuquantum
from cuquantum import cudaDataType
from cuquantum import custatevec as cusv
N_LOCAL_INDEX_BITS = 26
def run_distributed_index_bit_swaps(
rank, size, n_global_index_bits, n_local_index_bits,
index_bit_swaps, mask_bit_string, mask_ordering):
if rank == 0:
print(f"index bit swaps: {index_bit_swaps}")
print(f"mask bit string: {mask_bit_string}")
print(f"mask ordering: {mask_ordering}")
# data type of the state vector, acceptable values are CUDA_C_32F and CUDA_C_64F.
sv_data_type = cudaDataType.CUDA_C_64F
sv_dtype = cp.complex128 if sv_data_type == cudaDataType.CUDA_C_64F else cp.complex64
# the number of index bits corresponding to sub state vectors accessible via GPUDirect P2P,
# and it should be adjusted based on the number of GPUs/node, N, participating in the distributed
# state vector (N=2^n_p2p_device_bits) that supports P2P data transfer
n_p2p_device_bits = 0
n_sub_svs_p2p = 1 << n_p2p_device_bits
# use rank and size to map sub state vectors
# this sample assigns one device to one rank and allocates one sub state vector on the assigned device
# use the rank as the index of the sub state vector locally allocated in this process
org_sub_sv_index = rank
# the number of sub state vectors is identical to the number of processes
n_sub_svs = size
# transfer workspace size
transfer_workspace_size = 1 << N_LOCAL_INDEX_BITS
# bind the device to the process
# this is based on the assumption of the global rank placement that the
# processes are mapped to nodes in contiguous chunks (see the comment below)
num_devices = cp.cuda.runtime.getDeviceCount()
assert num_devices > 0
if n_p2p_device_bits > 0:
assert num_devices >= n_sub_svs_p2p
cp.cuda.runtime.setDevice(rank % n_sub_svs_p2p)
else:
cp.cuda.runtime.setDevice(rank % num_devices)
# allocate local sub state vector, stream and event
d_org_sub_sv = cp.zeros((1 << n_local_index_bits,), dtype=sv_dtype)
local_stream = cp.cuda.Stream()
# event should be created with the cudaEventInterprocess flag
local_event = cp.cuda.Event(disable_timing=True, interprocess=True)
# create cuStateVec handle
handle = cusv.create()
# create communicator
#
# cuStateVec provides built-in communicators for Open MPI and MPICH.
#
# Built-in communicators dynamically resolve required MPI functions by using dlopen().
# This Python sample relies on mpi4py loading libmpi.so and initializing MPI for us.
# The deviation of the treatment for Open MPI and MPICH stems from the fact that the
# scope of the loaded MPI symbols (by mpi4py) are different due to a Python limitation
# (NVIDIA/cuQuantum#31), so we have to work around it.
#
# An external communicator can be used for MPI libraries that are not ABI compatible
# with Open MPI or MPICH. It uses a shared library that wraps the MPI library of choice.
# The soname should be set to the full path to the shared library. If you need this
# capability, please refer to the "mpicomm.c" file that comes with the C/C++ sample
# (which is a counterpart of this sample).
name, _ = MPI.get_vendor()
if name == "Open MPI":
# use built-in OpenMPI communicator
communicator_type = cusv.CommunicatorType.OPENMPI
soname = ""
elif name == "MPICH":
# use built-in MPICH communicator
communicator_type = cusv.CommunicatorType.MPICH
# work around a Python limitation as discussed in NVIDIA/cuQuantum#31
soname = "libmpi.so"
else:
# use external communicator
communicator_type = cusv.CommunicatorType.EXTERNAL
# please compile mpicomm.c to generate the shared library and place its path here
soname = ""
if not soname:
raise ValueError("please supply the soname to the shared library providing "
"an external communicator for cuStateVec")
# create communicator
communicator = cusv.communicator_create(handle, communicator_type, soname)
comm = MPI.COMM_WORLD
# create sv segment swap worker
sv_seg_swap_worker, extra_workspace_size, min_transfer_workspace_size = cusv.sv_swap_worker_create(
handle, communicator,
d_org_sub_sv.data.ptr, org_sub_sv_index, local_event.ptr, sv_data_type,
local_stream.ptr)
# set extra workspace
d_extra_workspace = cp.cuda.alloc(extra_workspace_size)
cusv.sv_swap_worker_set_extra_workspace(
handle, sv_seg_swap_worker, d_extra_workspace.ptr, extra_workspace_size)
# set transfer workspace
# The size should be equal to or larger than min_transfer_workspace_size
# Depending on the systems, larger transfer workspace can improve the performance
transfer_workspace_size = max(min_transfer_workspace_size, transfer_workspace_size)
d_transfer_workspace = cp.cuda.alloc(transfer_workspace_size)
cusv.sv_swap_worker_set_transfer_workspace(
handle, sv_seg_swap_worker, d_transfer_workspace.ptr, transfer_workspace_size)
# set remote sub state vectors accessible via GPUDirect P2P
# events should be also set for synchronization
sub_sv_indices_p2p = []
d_sub_svs_p2p = []
remote_events = []
if n_p2p_device_bits > 0:
# distribute device memory handles
# under the hood the handle is stored as a Python bytes object
ipc_mem_handle = cp.cuda.runtime.ipcGetMemHandle(d_org_sub_sv.data.ptr)
ipc_mem_handles = comm.allgather(ipc_mem_handle)
# distribute event handles
ipc_event_handle = cp.cuda.runtime.ipcGetEventHandle(local_event.ptr)
ipc_event_handles = comm.allgather(ipc_event_handle)
# get remote device pointers and events
# this calculation assumes that the global rank placement is done in a round-robin fashion
# across nodes, so for example if n_p2p_device_bits=2 there are 2^2=4 processes/node (and
# 1 GPU/progress) and we expect the global MPI ranks to be assigned as
# 0 1 2 3 -> node 0
# 4 5 6 7 -> node 1
# 8 9 10 11 -> node 2
# ...
# if the rank placement scheme is different, you will need to calculate based on local MPI
# rank/size, as CUDA IPC is only for intra-node, not inter-node, communication.
p2p_sub_sv_index_begin = (org_sub_sv_index // n_sub_svs_p2p) * n_sub_svs_p2p
p2p_sub_sv_index_end = p2p_sub_sv_index_begin + n_sub_svs_p2p
for p2p_sub_sv_index in range(p2p_sub_sv_index_begin, p2p_sub_sv_index_end):
if org_sub_sv_index == p2p_sub_sv_index:
continue # don't need local sub state vector pointer
sub_sv_indices_p2p.append(p2p_sub_sv_index)
dst_mem_handle = ipc_mem_handles[p2p_sub_sv_index]
# default is to use cudaIpcMemLazyEnablePeerAccess
d_sub_sv_p2p = cp.cuda.runtime.ipcOpenMemHandle(dst_mem_handle)
d_sub_svs_p2p.append(d_sub_sv_p2p)
event_p2p = cp.cuda.runtime.ipcOpenEventHandle(ipc_event_handles[p2p_sub_sv_index])
remote_events.append(event_p2p)
# set p2p sub state vectors
assert len(d_sub_svs_p2p) == len(sub_sv_indices_p2p) == len(remote_events)
cusv.sv_swap_worker_set_sub_svs_p2p(
handle, sv_seg_swap_worker,
d_sub_svs_p2p, sub_sv_indices_p2p, remote_events, len(d_sub_svs_p2p))
# create distributed index bit swap scheduler
scheduler = cusv.dist_index_bit_swap_scheduler_create(
handle, n_global_index_bits, n_local_index_bits)
# set the index bit swaps to the scheduler
# n_swap_batches is obtained by the call. This value specifies the number of loops
assert len(mask_bit_string) == len(mask_ordering)
n_swap_batches = cusv.dist_index_bit_swap_scheduler_set_index_bit_swaps(
handle, scheduler,
index_bit_swaps, len(index_bit_swaps),
mask_bit_string, mask_ordering, len(mask_bit_string))
# the main loop of index bit swaps
n_loops = 2
for loop in range(n_loops):
start = time.perf_counter()
for swap_batch_index in range(n_swap_batches):
# get parameters
parameters = cusv.dist_index_bit_swap_scheduler_get_parameters(
handle, scheduler, swap_batch_index, org_sub_sv_index)
# the rank of the communication endpoint is parameters.dst_sub_sv_index
# as "rank == sub_sv_index" is assumed in the present sample.
rank = parameters.dst_sub_sv_index
# set parameters to the worker
cusv.sv_swap_worker_set_parameters(
handle, sv_seg_swap_worker, parameters, rank)
# execute swap
cusv.sv_swap_worker_execute(
handle, sv_seg_swap_worker, 0, parameters.transfer_size)
# all internal CUDA calls are serialized on local_stream
# synchronize all operations on device
local_stream.synchronize()
# barrier here for time measurement
comm.barrier()
elapsed = time.perf_counter() - start
if (loop == n_loops - 1) and (org_sub_sv_index == 0):
# output benchmark result
elm_size = 16 if sv_data_type == cudaDataType.CUDA_C_64F else 8
fraction = 1. - 0.5 ** len(index_bit_swaps)
transferred = 2 ** n_local_index_bits * fraction * elm_size
bw = transferred / elapsed * 1E-9
print(f"BW {bw} [GB/s]")
# free all resources
cusv.dist_index_bit_swap_scheduler_destroy(handle, scheduler)
cusv.sv_swap_worker_destroy(handle, sv_seg_swap_worker)
cusv.communicator_destroy(handle, communicator)
cusv.destroy(handle)
# free IPC pointers and events
for d_sub_sv in d_sub_svs_p2p:
cp.cuda.runtime.ipcCloseMemHandle(d_sub_sv)
for event in remote_events:
cp.cuda.runtime.eventDestroy(event)
if __name__ == "__main__":
# get rank and size
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
# size should be a power of two number
assert (size & (size - 1)) == 0
# compute n_global_index_bits from the size
# n_global_index_bits = log2(size)
n_global_index_bits = 0
while (1 << n_global_index_bits) < size:
n_global_index_bits += 1
# the size of local sub state vectors
n_local_index_bits = N_LOCAL_INDEX_BITS
# create index bit swap
index_bit_swaps = []
n_index_bit_swaps = 1
n_index_bits = n_local_index_bits + n_global_index_bits
for idx in range(n_index_bit_swaps):
index_bit_swaps.append((n_local_index_bits-1-idx, n_index_bits-idx-1))
# empty mask
mask_bit_string = mask_ordering = []
run_distributed_index_bit_swaps(
rank, size, n_global_index_bits, n_local_index_bits,
index_bit_swaps, mask_bit_string, mask_ordering)
| cuQuantum-main | python/samples/custatevec/distributed_index_bit_swap_mpi.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nTargets = 1
nControls = 1
targets = np.asarray([2], dtype=np.int32)
controls = np.asarray([1], dtype=np.int32)
controlBitValues = np.asarray([1], dtype=np.int32)
paulis = np.asarray([cusv.Pauli.Z], dtype=np.int32)
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
expected = np.asarray([0.0+0.0j, 0.0+0.1j,-0.1+0.1j,-0.2+0.1j,
0.2+0.2j, 0.3+0.3j, 0.4-0.3j, 0.5-0.4j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# apply Pauli operator
cusv.apply_pauli_rotation(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, np.pi/2, paulis.ctypes.data,
targets.ctypes.data, nTargets, controls.ctypes.data, controlBitValues.ctypes.data, nControls)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected, d_sv):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/exponential_pauli.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType
nIndexBits = 3
svSize = (1 << nIndexBits)
# populate the device memory with junk values (for illustrative purpose only)
# (we create a real random array of twice length, and view it as a complex array)
d_sv = cp.random.random(2*svSize, dtype=cp.float32).view(cp.complex64)
d_sv_res = cp.asarray([[1.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j]], dtype=cp.complex64)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# initialize the state vector
cusv.initialize_state_vector(
handle, d_sv.data.ptr, cudaDataType.CUDA_C_32F, nIndexBits,
cusv.StateVectorType.ZERO)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(d_sv_res, d_sv):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/initialize_sv.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# In this example, all the available devices (up to 4 devices) will be used by default:
# $ python mgpu_swap_index_bits.py
#
# When device ids are given as additional inputs, the specified devices will be used:
# $ python mgpu_swap_index_bits.py 0 1
import sys
import cupy as cp
import numpy as np
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
nGlobalIndexBits = 2
nLocalIndexBits = 1
nSubSvs = (1 << nGlobalIndexBits)
subSvSize = (1 << nLocalIndexBits)
nMaxDevices = nSubSvs
# specify the type of device network topology to optimize the data transfer sequence.
# SWITCH provides better performance for devices connected via NVLink with an NVSwitch
# or PCIe device network with a single PCIe switch. FULLMESH provides better performance
# for devices connected by full mesh connection.
deviceNetworkType = cusv.DeviceNetworkType.SWITCH
# swap 0th and 2nd qubits
nBitSwaps = 1
bitSwaps = [(0, 2)]
# swap the state vector elements only if 1st qubit is 1
maskLen = 1
maskBitString = [1]
maskOrdering = [1]
# input: 0.2|001> + 0.4|011> - 0.4|101> - 0.8|111>
sv = np.asarray([0.0+0.0j, 0.2+0.0j, 0.0+0.0j, 0.4+0.0j,
0.0+0.0j, -0.4+0.0j, 0.0+0.0j, -0.8+0.0j],
dtype=np.complex128).reshape(nSubSvs, subSvSize)
# expected: 0.2|001> + 0.4|110> - 0.4|101> - 0.8|111>
sv_result = np.asarray([0.0+0.0j, 0.2+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, -0.4+0.0j, 0.4+0.0j, -0.8+0.0j],
dtype=np.complex128).reshape(nSubSvs, subSvSize)
# device allocation
if len(sys.argv) == 1:
nDevices = min(cp.cuda.runtime.getDeviceCount(), nMaxDevices)
devices = [i for i in range(nDevices)]
else:
nDevices = min(len(sys.argv) - 1, nMaxDevices)
devices = [int(sys.argv[i+1]) for i in range(nDevices)]
# check if device ids do not duplicate
duplicatedDevices = [id for id in set(devices) if devices.count(id) > 1]
if len(duplicatedDevices) != 0:
raise ValueError(f"device id {duplicatedDevices[0]} is defined more than once")
# enable P2P access
for i in range(nDevices):
with cp.cuda.Device(devices[i]):
for j in range(nDevices):
if i == j: continue
if cp.cuda.runtime.deviceCanAccessPeer(devices[i], devices[j]) != 1:
raise RuntimeError(f"P2P access between device id {devices[i]} and {devices[j]} is unsupported")
cp.cuda.runtime.deviceEnablePeerAccess(devices[j])
# define which device stores each sub state vector
subSvLayout = [devices[iSv % nDevices] for iSv in range(nSubSvs)]
print("The following devices will be used in this sample:")
d_sv = []
for iSv in range(nSubSvs):
print(f" sub-SV #{iSv} : device id {subSvLayout[iSv]}")
with cp.cuda.Device(subSvLayout[iSv]):
d_sv.append(cp.asarray(sv[iSv]))
d_sv_ptrs = [arr.data.ptr for arr in d_sv]
# custatevec handle initialization
handles = []
for i in range(nDevices):
with cp.cuda.Device(devices[i]):
handles.append(cusv.create())
# bit swap
# Note: when this API is called, the current device must be one of the participating devices,
# see the documentation of custatevecMultiDeviceSwapIndexBits()
with cp.cuda.Device(devices[0]):
cusv.multi_device_swap_index_bits(
handles, nDevices, d_sv_ptrs, cudaDataType.CUDA_C_64F,
nGlobalIndexBits, nLocalIndexBits,
bitSwaps, nBitSwaps, maskBitString, maskOrdering, maskLen,
deviceNetworkType)
# destroy handles
for i in range(nDevices):
with cp.cuda.Device(devices[i]):
cusv.destroy(handles[i])
# check results
correct = True
for iSv in range(nSubSvs):
with cp.cuda.Device(subSvLayout[iSv]):
correct = correct and cp.allclose(sv_result[iSv], d_sv[iSv])
if correct:
print("mgpu_swap_index_bits example PASSED")
else:
raise RuntimeError("mgpu_swap_index_bits example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/mgpu_swap_index_bits.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nBasisBits = 3
basisBits = np.asarray([0, 1, 2], dtype=np.int32)
# In real appliction, random number in range [0, 1) will be used.
randnum = 0.2
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.3+0.4j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.1+0.1j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
expected_sv = np.asarray([0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.2+0.4j,
0.0+0.0j, 0.6+0.6j, 0.2+0.2j, 0.0+0.0j], dtype=np.complex64)
expected_parity = 0
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# measurement on z basis
parity = cusv.measure_on_z_basis(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
basisBits.ctypes.data, nBasisBits, randnum, cusv.Collapse.NORMALIZE_AND_ZERO)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected_sv, d_sv):
raise ValueError("results mismatch")
if expected_parity != parity:
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/measure_zbasis.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
def run_test_matrix_type(
handle, matrixType, matrix, matrixDataType, layout, nTargets,
adjoint, computeType):
# check the size of external workspace
extraWorkspaceSizeInBytes = cusv.test_matrix_type_get_workspace_size(
handle, matrixType, matrix.ctypes.data, matrixDataType, layout,
nTargets, adjoint, computeType)
# allocate external workspace if necessary
if extraWorkspaceSizeInBytes > 0:
extraWorkspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)
extraWorkspacePtr = extraWorkspace.ptr
else:
extraWorkspacePtr = 0
# execute testing
residualNorm = cusv.test_matrix_type(
handle, matrixType, matrix.ctypes.data, matrixDataType, layout,
nTargets, adjoint, computeType, extraWorkspacePtr, extraWorkspaceSizeInBytes)
cp.cuda.Device().synchronize()
return residualNorm
if __name__ == '__main__':
nTargets = 1
adjoint = 0
# unitary and Hermitian matrix
matrix = np.asarray([0.5+0.0j, 1/np.sqrt(2)-0.5j,
1/np.sqrt(2)+0.5j, -0.5+0.0j], dtype=np.complex128)
# custatevec handle initialization
handle = cusv.create()
matrixDataType = cudaDataType.CUDA_C_64F
layout = cusv.MatrixLayout.ROW
computeType = ComputeType.COMPUTE_DEFAULT
unitaryResidualNorm = run_test_matrix_type(handle, cusv.MatrixType.UNITARY, matrix,
matrixDataType, layout, nTargets, adjoint,
computeType)
hermiteResidualNorm = run_test_matrix_type(handle, cusv.MatrixType.HERMITIAN, matrix,
matrixDataType, layout, nTargets, adjoint,
computeType)
# destroy handle
cusv.destroy(handle)
correct = np.allclose(unitaryResidualNorm, 0.)
correct &= np.allclose(hermiteResidualNorm, 0.)
if correct:
print("test_matrix_type example PASSED")
else:
raise RuntimeError("test_matrix_type example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/test_matrix_type.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import sys
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType
nGlobalBits = 2
nLocalBits = 2
nSubSvs = (1 << nGlobalBits)
subSvSize = (1 << nLocalBits)
nMaxShots = 5
nShots = 5
bitStringLen = 4
bitOrdering = [0, 1, 2, 3]
bitStrings = np.empty(nShots, dtype=np.int64)
bitStrings_result = np.asarray([0b0011, 0b0011, 0b0111, 0b1011, 0b1110], dtype=np.int64)
# In real appliction, random numbers in range [0, 1) will be used.
randnums = np.asarray([0.1, 0.2, 0.4, 0.6, 0.8], dtype=np.float64)
h_sv = np.asarray([[ 0.000+0.000j, 0.000+0.125j, 0.000+0.250j, 0.000+0.375j],
[ 0.000+0.000j, 0.000-0.125j, 0.000-0.250j, 0.000-0.375j],
[ 0.125+0.000j, 0.125-0.125j, 0.125-0.250j, 0.125-0.375j],
[-0.125+0.000j, -0.125-0.125j, -0.125-0.250j, -0.125-0.375j]],
dtype=np.complex128)
# device allocation
if len(sys.argv) == 1:
numDevices = cp.cuda.runtime.getDeviceCount()
devices = [i % numDevices for i in range(nSubSvs)]
else:
numDevices = min(len(sys.argv) - 1, nSubSvs)
devices = [int(sys.argv[i+1]) for i in range(numDevices)]
for i in range(numDevices, nSubSvs):
devices.append(devices[i % numDevices])
print("The following devices will be used in this sample:")
for iSv in range(nSubSvs):
print(f" sub-SV {iSv} : device id {devices[iSv]}")
d_sv = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]):
d_sv.append(cp.asarray(h_sv[iSv]))
# custatevec handle initialization
handle = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]):
handle.append(cusv.create())
# create sampler and check the size of external workspace
sampler = []
extraWorkspaceSizeInBytes = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
s, size = cusv.sampler_create(
handle[iSv], d_sv[iSv].data.ptr, cudaDataType.CUDA_C_64F, nLocalBits,
nMaxShots)
sampler.append(s)
extraWorkspaceSizeInBytes.append(size)
# allocate external workspace if necessary
extraWorkspace = []
for iSv in range(nSubSvs):
if extraWorkspaceSizeInBytes[iSv] > 0:
with cp.cuda.Device(devices[iSv]) as dev:
extraWorkspace.append(cp.cuda.alloc(extraWorkspaceSizeInBytes[iSv]))
# sample preprocess
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
cusv.sampler_preprocess(
handle[iSv], sampler[iSv], extraWorkspace[iSv].ptr,
extraWorkspaceSizeInBytes[iSv])
# get norm of the sub state vectors
subNorms = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
subNorms.append(cusv.sampler_get_squared_norm(handle[iSv], sampler[iSv]))
dev.synchronize()
# get cumulative array & norm
cumulativeArray = np.zeros(nSubSvs + 1, dtype=np.float64)
cumulativeArray[1:] = np.cumsum(subNorms)
norm = cumulativeArray[nSubSvs]
# apply offset and norm
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
cusv.sampler_apply_sub_sv_offset(
handle[iSv], sampler[iSv], iSv, nSubSvs, cumulativeArray[iSv], norm)
# divide randnum array
shotOffsets = np.zeros(nSubSvs+1, dtype=np.int32)
pos = np.searchsorted(randnums, cumulativeArray[1:]/norm)
pos[nSubSvs-1] = nShots
shotOffsets[1:] = pos
# sample bit strings
for iSv in range(nSubSvs):
shotOffset = int(shotOffsets[iSv])
nSubShots = shotOffsets[iSv + 1] - shotOffsets[iSv]
if nSubShots > 0:
with cp.cuda.Device(devices[iSv]) as dev:
cusv.sampler_sample(
handle[iSv], sampler[iSv],
# when sliced into a 0D array, NumPy returns a scalar, so we can't do
# bitStrings[shotOffset].ctypes.data and need this workaround
bitStrings.ctypes.data + shotOffset * bitStrings.dtype.itemsize,
bitOrdering, bitStringLen,
randnums.ctypes.data + shotOffset * randnums.dtype.itemsize,
nSubShots, cusv.SamplerOutput.RANDNUM_ORDER)
# destroy sampler descriptor and custatevec handle
for iSv in range(nSubSvs):
cp.cuda.Device(devices[iSv]).synchronize()
cusv.sampler_destroy(sampler[iSv])
cusv.destroy(handle[iSv])
correct = np.allclose(bitStrings, bitStrings_result)
if correct:
print("mgpu_sampler example PASSED")
else:
raise RuntimeError("mgpu_sampler example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/mgpu_sampler.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType
nSVs = 2
nIndexBits = 3
svStride = (1 << nIndexBits)
# square absolute values of state vector elements for 0/2-th bits will be summed up
# bit ordering should only live on host.
bitOrdering = np.asarray([1], dtype=np.int32)
bitStringLen = bitOrdering.size
# 2 state vectors are allocated contiguously in single memory chunk.
d_svs = cp.asarray([[0.0 + 0.0j, 0.0 + 0.1j, 0.1 + 0.1j, 0.1 + 0.2j,
0.2 + 0.2j, 0.3 + 0.3j, 0.3 + 0.4j, 0.4 + 0.5j],
[0.25 + 0.25j, 0.25 + 0.25j, 0.25 + 0.25j, 0.25 + 0.25j,
0.25 + 0.25j, 0.25 + 0.25j, 0.25 + 0.25j, 0.25 + 0.25j]], dtype=cp.complex64)
abs2sumStride = 2
batchedAbs2sumSize = nSVs * abs2sumStride
# abs2sum arrays are allocated contiguously in single memory chunk
# Note: abs2sum can also live on the host.
abs2sum = cp.empty(batchedAbs2sumSize, dtype=cp.float64)
abs2sum_res = cp.asarray([0.27, 0.73, 0.5, 0.5], dtype=cp.float64)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# compute abs2sum arrays
cusv.abs2sum_array_batched(
handle, d_svs.data.ptr, cudaDataType.CUDA_C_32F, nIndexBits, nSVs, svStride,
abs2sum.data.ptr, abs2sumStride,
bitOrdering.ctypes.data, bitStringLen, 0, 0, 0)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(abs2sum_res, abs2sum):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/batched_abs2sum.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
n_targets = 2
n_controls = 1
adjoint = 0
targets = [0, 1]
controls = [2]
control_bit_values = [1]
permutation = np.asarray([0, 2, 1, 3], dtype=np.int64)
d_sv = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv_res = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, -0.4+0.3j, -0.3+0.3j, 0.4+0.5j], dtype=np.complex64)
diagonals = np.asarray([1.0+0.0j, 0.0+1.0j, 0.0+1.0j, 1.0+0.0j], dtype=np.complex64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
workspaceSize = cusv.apply_generalized_permutation_matrix_get_workspace_size(
handle, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, permutation.ctypes.data, diagonals.ctypes.data,
cuquantum.cudaDataType.CUDA_C_32F, targets, n_targets, n_controls)
if workspaceSize > 0:
workspace = cp.cuda.memory.alloc(workspaceSize)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply matrix
cusv.apply_generalized_permutation_matrix(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
permutation.ctypes.data, diagonals.ctypes.data, cuquantum.cudaDataType.CUDA_C_32F, adjoint,
targets, n_targets, controls, control_bit_values, n_controls,
workspace_ptr, workspaceSize)
# destroy handle
cusv.destroy(handle)
# check result
if not np.allclose(d_sv, d_sv_res):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/permutation_matrix.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nTargets = 1
nControls = 2
adjoint = 0
targets = np.asarray([2], dtype=np.int32)
controls = np.asarray([0, 1], dtype=np.int32)
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
expected = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.4+0.5j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.1+0.2j], dtype=np.complex64)
# the gate matrix can live on either host (np) or device (cp)
matrix = cp.asarray([0.0+0.0j, 1.0+0.0j, 1.0+0.0j, 0.0+0.0j], dtype=np.complex64)
if isinstance(matrix, cp.ndarray):
matrix_ptr = matrix.data.ptr
elif isinstance(matrix, np.ndarray):
matrix_ptr = matrix.ctypes.data
else:
raise ValueError
d_sv = cp.asarray(h_sv)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
workspaceSize = cusv.apply_matrix_get_workspace_size(
handle, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F,
cusv.MatrixLayout.ROW, adjoint, nTargets, nControls, cuquantum.ComputeType.COMPUTE_32F)
# check the size of external workspace
if workspaceSize > 0:
workspace = cp.cuda.memory.alloc(workspaceSize)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply gate
cusv.apply_matrix(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F,
cusv.MatrixLayout.ROW, adjoint, targets.ctypes.data, nTargets, controls.ctypes.data, 0, nControls,
cuquantum.ComputeType.COMPUTE_32F, workspace_ptr, workspaceSize)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected, d_sv):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/gate_application.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
bitStringLen = 3
bitOrdering = np.asarray([2, 1, 0], dtype=np.int32)
# In real appliction, random number in range [0, 1) will be used.
randnum = 0.5
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
expected_sv = np.asarray([0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.6+0.8j, 0.0+0.0j], dtype=np.complex64)
expected_bitString = np.asarray([1, 1, 0], dtype=np.int32)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# allocate host memory to hold the result
bitString = np.empty((bitStringLen,), dtype=np.int32)
# batch measurement
cusv.batch_measure(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, bitString.ctypes.data,
bitOrdering.ctypes.data, bitStringLen, randnum, cusv.Collapse.NORMALIZE_AND_ZERO)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(expected_sv, d_sv):
raise ValueError("results mismatch")
if not np.allclose(expected_bitString, bitString):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/batch_measure.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
nIndexBits = 3
nSvSize = (1 << nIndexBits)
# swap 0th and 2nd qubits
nBitSwaps = 1
bitSwaps = [(0, 2)]
# swap the state vector elements only if 1st qubit is 1
maskLen = 1;
maskBitString = [1]
maskOrdering = [1]
# 0.2|001> + 0.4|011> - 0.4|101> - 0.8|111>
sv = cp.asarray([0.0+0.0j, 0.2+0.0j, 0.0+0.0j, 0.4+0.0j,
0.0+0.0j, -0.4+0.0j, 0.0+0.0j, -0.8+0.0j],
dtype=cp.complex128)
# 0.2|001> + 0.4|110> - 0.4|101> - 0.8|111>
sv_result = cp.asarray([0.0+0.0j, 0.2+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, -0.4+0.0j, 0.4+0.0j, -0.8+0.0j],
dtype=cp.complex128)
# custatevec handle initialization
handle = cusv.create()
# bit swap
cusv.swap_index_bits(
handle, sv.data.ptr, cudaDataType.CUDA_C_64F, nIndexBits,
bitSwaps, nBitSwaps,
maskBitString, maskOrdering, maskLen)
# destroy handle
cusv.destroy(handle)
correct = cp.allclose(sv, sv_result)
if correct:
print("swap_index_bits example PASSED")
else:
raise RuntimeError("swap_index_bits example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/swap_index_bits.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType
nSVs = 2
nIndexBits = 3
svStride = (1 << nIndexBits)
# bit ordering should only live on host.
bitOrdering = np.asarray([2, 1, 0], dtype=np.int32)
bitStringLen = bitOrdering.size
# 2 bitStrings are allocated contiguously in single memory chunk.
# Note: bitStrings can also live on the host.
bitStrings = cp.empty(2, dtype=cp.int64)
bitStrings_res = cp.asarray([0b100, 0b011], dtype=cp.int64)
# In real appliction, random number in range [0, 1) will be used.
# Note: norms can also live on the host.
randnums = cp.asarray([0.009, 0.5], dtype=cp.float64)
# 2 state vectors are allocated contiguously in single memory chunk.
d_svs = cp.asarray([[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j],
[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j]], dtype=cp.complex64)
d_svs_res = cp.asarray([[0.0+0.0j, 0.0+1.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.6+0.8j, 0.0+0.0j]], dtype=cp.complex64)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# batched measurement
cusv.measure_batched(
handle, d_svs.data.ptr, cudaDataType.CUDA_C_32F, nIndexBits, nSVs, svStride,
bitStrings.data.ptr, bitOrdering.ctypes.data, bitStringLen,
randnums.data.ptr, cusv.Collapse.NORMALIZE_AND_ZERO)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(d_svs_res, d_svs):
raise ValueError("results mismatch")
if not cp.allclose(bitStrings_res, bitStrings):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/batched_measure.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
nSVs = 2
nIndexBits = 3
svSize = (1 << nIndexBits)
svStride = svSize
adjoint = 0
targets = [2]
nTargets = len(targets)
controls = [0, 1]
nControls = len(controls)
matrixIndices = [1, 0]
nMatrices = len(matrixIndices)
# 2 state vectors are allocated contiguously in single memory chunk.
d_svs = cp.asarray([[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j],
[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j]], dtype=cp.complex64)
d_svs_res = cp.asarray([[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, -0.4-0.5j],
[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.4+0.5j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.1+0.2j]], dtype=cp.complex64)
# 2 gate matrices are allocated contiguously in single memory chunk.
# Note: gate matrices can also live on the host.
d_matrices = cp.asarray([[0.0+0.0j, 1.0+0.0j,
1.0+0.0j, 0.0+0.0j],
[1.0+0.0j, 0.0+0.0j,
0.0+0.0j, -1.0+0.0j]], dtype=cp.complex64)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
extraWorkspaceSizeInBytes = cusv.apply_matrix_batched_get_workspace_size(
handle, cudaDataType.CUDA_C_32F, nIndexBits, nSVs, svStride,
cusv.MatrixMapType.MATRIX_INDEXED, matrixIndices, d_matrices.data.ptr,
cudaDataType.CUDA_C_32F, cusv.MatrixLayout.ROW, adjoint, nMatrices,
nTargets, nControls,
ComputeType.COMPUTE_32F)
# allocate external workspace if necessary
if extraWorkspaceSizeInBytes > 0:
workspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply gate
cusv.apply_matrix_batched(
handle, d_svs.data.ptr, cudaDataType.CUDA_C_32F, nIndexBits, nSVs, svStride,
cusv.MatrixMapType.MATRIX_INDEXED, matrixIndices, d_matrices.data.ptr,
cudaDataType.CUDA_C_32F, cusv.MatrixLayout.ROW, adjoint, nMatrices,
targets, nTargets, controls, 0, nControls,
ComputeType.COMPUTE_32F, workspace_ptr, extraWorkspaceSizeInBytes)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(d_svs_res, d_svs):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/batched_gate_application.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nMaxShots = 5
nShots = 5
bitStringLen = 2;
bitOrdering = np.asarray([0, 1], dtype=np.int32)
bitStrings = np.empty((nShots,), dtype=np.int64)
bitStrings_expected = np.asarray([0b00, 0b01, 0b10, 0b11, 0b11], dtype=np.int64)
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
# In real appliction, random numbers in range [0, 1) will be used.
randnums = np.asarray([0.1, 0.8, 0.4, 0.6, 0.2], dtype=np.float64)
########################################################################
# cuStateVec handle initialization
handle = cusv.create()
# create sampler and check the size of external workspace
sampler, extraWorkspaceSizeInBytes = cusv.sampler_create(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, nMaxShots)
# allocate external workspace
extraWorkspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)
# sample preprocess
cusv.sampler_preprocess(
handle, sampler, extraWorkspace.ptr, extraWorkspaceSizeInBytes)
# sample bit strings
cusv.sampler_sample(
handle, sampler, bitStrings.ctypes.data, bitOrdering.ctypes.data, bitStringLen,
randnums.ctypes.data, nShots, cusv.SamplerOutput.ASCENDING_ORDER)
# destroy sampler
cusv.sampler_destroy(sampler)
# destroy handle
cusv.destroy(handle)
if not np.allclose(bitStrings, bitStrings_expected):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/sampler.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
if cp.cuda.runtime.runtimeGetVersion() < 11020:
raise RuntimeError("memory_handler example WAIVED : This example uses CUDA's "
"built-in stream-ordered memory allocator, which requires "
"CUDA 11.2+.")
nIndexBits = 3
nSvSize = (1 << nIndexBits)
sv = cp.asarray([0.48+1j*0.0, 0.36+1j*0.0, 0.64+1j*0.0, 0.48+1j*0.0,
0.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0],
dtype=cp.complex128)
# gates
adjoint = 0
layout = cusv.MatrixLayout.ROW
# Hadamard gate
hTargets = (2,)
hNTargets = 1
hGate = np.asarray([1/np.sqrt(2)+1j*0.0, 1/np.sqrt(2)+1j*0.0,
1/np.sqrt(2)+1j*0.0, -1/np.sqrt(2)+1j*0.0],
dtype=np.complex128)
# control-SWAP gate
swapTargets = (0, 1)
swapNTargets = 2
swapControls = (2,)
swapNControls = 1
swapGate = np.asarray([1.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0,
0.0+1j*0.0, 0.0+1j*0.0, 1.0+1j*0.0, 0.0+1j*0.0,
0.0+1j*0.0, 1.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0,
0.0+1j*0.0, 0.0+1j*0.0, 0.0+1j*0.0, 1.0+1j*0.0],
dtype=np.complex128)
# observable
basisBits = (2,)
nBasisBits = 1
observable = np.asarray([1.0+1j*0.0, 0.0+1j*0.0,
0.0+1j*0.0, 0.0+1j*0.0], dtype=np.complex128)
# check device config
dev = cp.cuda.Device()
if not dev.attributes['MemoryPoolsSupported']:
raise RuntimeError("memory handler example WAIVED: device does not support CUDA Memory pools")
# avoid shrinking the pool
mempool = cp.cuda.runtime.deviceGetDefaultMemPool(dev.id)
if int(cp.__version__.split('.')[0]) >= 10:
# this API is exposed since CuPy v10
cp.cuda.runtime.memPoolSetAttribute(
mempool, cp.cuda.runtime.cudaMemPoolAttrReleaseThreshold, 0xffffffffffffffff) # = UINT64_MAX
# custatevec handle initialization
handle = cusv.create()
stream = cp.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
# device memory handler
# In Python we support 3 kinds of calling conventions as of v22.03, this example
# involves using Python callables. Please refer to the documentation of
# set_device_mem_handler() for further detail.
def malloc(size, stream):
return cp.cuda.runtime.mallocAsync(size, stream)
def free(ptr, size, stream):
cp.cuda.runtime.freeAsync(ptr, stream)
handler = (malloc, free, "memory_handler python example")
cusv.set_device_mem_handler(handle, handler)
# apply Hadamard gate
cusv.apply_matrix(
handle, sv.data.ptr, cudaDataType.CUDA_C_64F, nIndexBits,
hGate.ctypes.data, cudaDataType.CUDA_C_64F, layout, adjoint,
hTargets, hNTargets, 0, 0, 0, ComputeType.COMPUTE_DEFAULT,
0, 0) # last two 0s indicate we're using our own mempool
# apply Hadamard gate
cusv.apply_matrix(
handle, sv.data.ptr, cudaDataType.CUDA_C_64F, nIndexBits,
swapGate.ctypes.data, cudaDataType.CUDA_C_64F, layout, adjoint,
swapTargets, swapNTargets, swapControls, 0, swapNControls, ComputeType.COMPUTE_DEFAULT,
0, 0) # last two 0s indicate we're using our own mempool
# apply Hadamard gate
cusv.apply_matrix(
handle, sv.data.ptr, cudaDataType.CUDA_C_64F, nIndexBits,
hGate.ctypes.data, cudaDataType.CUDA_C_64F, layout, adjoint,
hTargets, hNTargets, 0, 0, 0, ComputeType.COMPUTE_DEFAULT,
0, 0) # last two 0s indicate we're using our own mempool
# compute expectation
expect = np.empty((1,), dtype=np.float64)
cusv.compute_expectation(
handle, sv.data.ptr, cudaDataType.CUDA_C_64F, nIndexBits,
expect.ctypes.data, cudaDataType.CUDA_R_64F,
observable.ctypes.data, cudaDataType.CUDA_C_64F, layout,
basisBits, nBasisBits, ComputeType.COMPUTE_DEFAULT,
0, 0) # last two 0s indicate we're using our own mempool
stream.synchronize()
# destroy handle
cusv.destroy(handle)
expectationValueResult = 0.9608
if np.isclose(expect, expectationValueResult):
print("memory_handler example PASSED")
else:
raise RuntimeError("memory_handler example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/memory_handler.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# Note: cuQuantum Python follows the cuQuantum SDK version, which is now
# switched to YY.MM and is different from individual libraries' (semantic)
# versioning scheme.
__version__ = '23.06.0'
| cuQuantum-main | python/cuquantum/_version.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from cuquantum import custatevec
from cuquantum import cutensornet
from cuquantum.cutensornet import (
contract, contract_path, einsum, einsum_path, tensor, tensor_qualifiers_dtype, Network, BaseCUDAMemoryManager, MemoryPointer,
NetworkOptions, OptimizerInfo, OptimizerOptions, PathFinderOptions, ReconfigOptions, SlicerOptions, CircuitToEinsum)
from cuquantum.utils import ComputeType, cudaDataType, libraryPropertyType
from cuquantum._version import __version__
# We patch all enum values so that they have the correct docstrings
for enum in (
custatevec.Pauli,
custatevec.MatrixLayout,
custatevec.MatrixType,
custatevec.MatrixMapType,
custatevec.Collapse,
custatevec.SamplerOutput,
custatevec.DeviceNetworkType,
cutensornet.NetworkAttribute,
custatevec.CommunicatorType,
custatevec.DataTransferType,
custatevec.StateVectorType,
cutensornet.ContractionOptimizerInfoAttribute,
cutensornet.ContractionOptimizerConfigAttribute,
cutensornet.ContractionAutotunePreferenceAttribute,
cutensornet.WorksizePref,
cutensornet.Memspace,
cutensornet.GraphAlgo,
cutensornet.MemoryModel,
cutensornet.OptimizerCost,
cutensornet.TensorSVDConfigAttribute,
cutensornet.TensorSVDNormalization,
cutensornet.TensorSVDPartition,
cutensornet.TensorSVDInfoAttribute,
cutensornet.GateSplitAlgo,
cutensornet.StatePurity,
cutensornet.MarginalAttribute,
cutensornet.SamplerAttribute,
):
cutensornet._internal.enum_utils.add_enum_class_doc(enum, chomp="_ATTRIBUTE|_PREFERENCE_ATTRIBUTE")
del enum, utils
| cuQuantum-main | python/cuquantum/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import os
import site
import sys
import cuquantum # get the shared libraries loaded
def get_lib_path(name):
"""Get the loaded shared library path."""
# Ideally we should call dl_iterate_phdr or dladdr to do the job, but this
# is simpler and not bad; the former two are not strictly portable anyway
# (not part of POSIX). Obviously this only works on Linux!
try:
with open('/proc/self/maps') as f:
lib_map = f.read()
except FileNotFoundError as e:
raise NotImplementedError("This utility is available only on Linux.") from e
lib = set()
for line in lib_map.split('\n'):
if name in line:
fields = line.split()
lib.add(fields[-1]) # pathname is the last field, check "man proc"
if len(lib) == 0:
raise ValueError(f"library {name} is not loaded")
elif len(lib) > 1:
# This could happen when, e.g., a library exists in both the user env
# and LD_LIBRARY_PATH, and somehow both copies get loaded. This is a
# messy problem, but let's work around it by assuming the one in the
# user env is preferred.
lib2 = set()
for s in [site.getusersitepackages()] + site.getsitepackages():
for path in lib:
if path.startswith(s):
lib2.add(path)
if len(lib2) != 1:
raise RuntimeError(f"cannot find the unique copy of {name}: {lib}")
else:
lib = lib2
return lib.pop()
def _get_cuquantum_libs():
paths = set()
for lib in ('custatevec', 'cutensornet', 'cutensor'):
path = os.path.normpath(get_lib_path(f"lib{lib}.so"))
paths.add(path)
return tuple(paths)
def _get_cuquantum_includes():
paths = set()
for path in _get_cuquantum_libs():
path = os.path.normpath(os.path.join(os.path.dirname(path), '..'))
if not os.path.isdir(os.path.join(path, 'include')):
path = os.path.normpath(os.path.join(path, '../include'))
else:
path = os.path.join(path, 'include')
assert os.path.isdir(path), f"path={path} is invalid"
paths.add(path)
return tuple(paths)
def _get_cuquantum_target(target):
target = f"lib{target}.so"
libs = [os.path.basename(lib) for lib in _get_cuquantum_libs()]
for lib in libs:
if target in lib:
lib = '.'.join(lib.split('.')[:3]) # keep SONAME
flag = f"-l:{lib} "
break
else:
assert False
return flag
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--includes', action='store_true',
help='get cuQuantum include flags')
parser.add_argument('--libs', action='store_true',
help='get cuQuantum linker flags')
parser.add_argument('--target', action='append', default=[],
choices=('custatevec', 'cutensornet'),
help='get the linker flag for the target cuQuantum component')
args = parser.parse_args()
if not sys.argv[1:]:
parser.print_help()
sys.exit(1)
if args.includes:
out = ' '.join(f"-I{path}" for path in _get_cuquantum_includes())
print(out, end=' ')
if args.libs:
paths = set([os.path.dirname(path) for path in _get_cuquantum_libs()])
out = ' '.join(f"-L{path}" for path in paths)
print(out, end=' ')
flag = ''
for target in args.target:
flag += _get_cuquantum_target(target)
if target == 'cutensornet':
flag += _get_cuquantum_target('cutensor')
print(flag)
| cuQuantum-main | python/cuquantum/__main__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of types for defining options to cutensornet.
"""
__all__ = ['NetworkOptions', 'OptimizerInfo', 'OptimizerOptions', 'PathFinderOptions', 'ReconfigOptions', 'SlicerOptions']
import collections
from dataclasses import dataclass
from logging import Logger
from typing import Dict, Hashable, Iterable, Literal, Mapping, Optional, Tuple, Union
import cupy as cp
import cuquantum
from cuquantum import cutensornet as cutn
from ._internal import enum_utils
from ._internal import formatters
from ._internal.mem_limit import MEM_LIMIT_RE_PCT, MEM_LIMIT_RE_VAL, MEM_LIMIT_DOC
from .memory import BaseCUDAMemoryManager
@dataclass
class NetworkOptions(object):
"""A data class for providing options to the :class:`cuquantum.Network` object.
Attributes:
compute_type (cuquantum.ComputeType): CUDA compute type. A suitable compute type will be selected if not specified.
device_id: CUDA device ordinal (used if the tensor network resides on the CPU). Device 0 will be used if not specified.
handle: cuTensorNet library handle. A handle will be created if one is not provided.
logger (logging.Logger): Python Logger object. The root logger will be used if a logger object is not provided.
memory_limit: Maximum memory available to cuTensorNet. It can be specified as a value (with optional suffix like
K[iB], M[iB], G[iB]) or as a percentage. The default is 80%.
blocking: A flag specifying the behavior of the execution functions and methods,
such as :meth:`Network.autotune` and :meth:`Network.contract`.
When ``blocking`` is ``True``, these methods do not return until the operation is complete. When blocking is ``"auto"``,
the methods return immediately when the input tensors are on the GPU. The execution methods always block when the
input tensors are on the CPU. The default is ``True``.
allocator: An object that supports the :class:`BaseCUDAMemoryManager` protocol, used to draw device memory. If an
allocator is not provided, a memory allocator from the library package will be used
(:func:`torch.cuda.caching_allocator_alloc` for PyTorch operands, :func:`cupy.cuda.alloc` otherwise).
"""
compute_type : Optional[int] = None
device_id : Optional[int] = None
handle : Optional[int] = None
logger : Optional[Logger] = None
memory_limit : Optional[Union[int, str]] = r'80%'
blocking : Literal[True, "auto"] = True
allocator : Optional[BaseCUDAMemoryManager] = None
def __post_init__(self):
# Defer creating handle as well as computing the memory limit till we know the device the network is on.
if self.compute_type is not None:
self.compute_type = cuquantum.ComputeType(self.compute_type)
if self.device_id is None:
self.device_id = 0
if not isinstance(self.memory_limit, (int, float)):
m1 = MEM_LIMIT_RE_PCT.match(self.memory_limit)
if m1:
factor = float(m1.group('value'))
if factor <= 0 or factor > 100:
raise ValueError("The memory limit percentage must be in the range (0, 100].")
m2 = MEM_LIMIT_RE_VAL.match(self.memory_limit)
if not (m1 or m2):
raise ValueError(MEM_LIMIT_DOC % self.memory_limit)
if self.blocking != True and self.blocking != "auto":
raise ValueError("The value specified for blocking must be either True or 'auto'.")
if self.allocator is not None and not isinstance(self.allocator, BaseCUDAMemoryManager):
raise TypeError("The allocator must be an object of type that fulfils the BaseCUDAMemoryManager protocol.")
# Generate the options dataclasses from ContractionOptimizerConfigAttributes.
_create_options = enum_utils.create_options_class_from_enum
_opt_conf_enum = cutn.ContractionOptimizerConfigAttribute
_get_dtype = cutn.contraction_optimizer_config_get_attribute_dtype
PathFinderOptions = _create_options('PathFinderOptions', _opt_conf_enum, _get_dtype, "path finder", 'GRAPH_(?P<option_name>.*)')
SlicerOptions = _create_options('SlicerOptions', _opt_conf_enum, _get_dtype, 'slicer', 'SLICER_(?P<option_name>.*)')
ReconfigOptions = _create_options('ReconfigOptions', _opt_conf_enum, _get_dtype, 'reconfiguration', 'RECONFIG_(?P<option_name>.*)')
del _create_options, _opt_conf_enum, _get_dtype
PathType = Iterable[Tuple[int, int]]
ModeSequenceType = Iterable[Hashable]
ModesSequenceType = Iterable[ModeSequenceType]
ModeExtentSequenceType = Iterable[Tuple[Hashable, int]]
KeywordArgType = Dict
@dataclass
class OptimizerOptions(object):
"""A data class for providing options to the cuTensorNet optimizer.
Attributes:
samples: Number of samples for hyperoptimization. See `CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_SAMPLES`.
threads: Number of threads for the hyperoptimizer. See `CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_HYPER_NUM_THREADS`.
path: Options for the path finder (:class:`~cuquantum.PathFinderOptions` object or dict containing the ``(parameter, value)``
items for ``PathFinderOptions``). Alternatively, the path can be provided as a sequence of pairs in the
:func:`numpy.einsum_path` format.
slicing: Options for the slicer (:class:`~cuquantum.SlicerOptions` object or dict containing the ``(parameter, value)`` items for
``SlicerOptions``). Alternatively, a sequence of sliced modes or sequence of ``(sliced mode, sliced extent)`` pairs
can be directly provided.
reconfiguration: Options for the reconfiguration algorithm as a :class:`~cuquantum.ReconfigOptions` object or dict containing the
``(parameter, value)`` items for ``ReconfigOptions``.
seed: Optional seed for the random number generator. See `CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_SEED`.
cost_function: The objective function to use for finding the optimal contraction path.
See `CUTENSORNET_CONTRACTION_OPTIMIZER_CONFIG_COST_FUNCTION_OBJECTIVE`.
"""
samples : Optional[int] = None
threads : Optional[int] = None
path : Optional[Union[PathFinderOptions, PathType]] = None
slicing : Optional[Union[SlicerOptions, ModeSequenceType, ModeExtentSequenceType]] = None
reconfiguration : Optional[ReconfigOptions] = None
seed : Optional[int] = None
cost_function: Optional[int] = None
def _check_option(self, option, option_class, checker=None):
if isinstance(option, option_class):
return option
if option is None:
option = option_class()
elif isinstance(option, KeywordArgType):
option = option_class(**option)
elif checker is not None:
checker()
return option
def _check_specified_path(self):
if not isinstance(self.path, collections.abc.Sequence):
raise TypeError("The path must be a sequence of pairs in the linear format accepted by numpy.einsum_path.")
for pair in self.path:
if not isinstance(pair, collections.abc.Sequence) or len(pair) != 2:
raise TypeError("The path must be a sequence of pairs in the linear format accepted by numpy.einsum_path.")
def _check_specified_slices(self):
if not isinstance(self.slicing, collections.abc.Sequence):
raise TypeError("Slicing must be specified as a sequence of modes or as a sequence of (mode, extent) pairs.")
pair = False
for slc in self.slicing:
if isinstance(slc, collections.abc.Sequence) and not isinstance(slc, str):
pair = True
break
for s in self.slicing:
if pair and (isinstance(s, str) or not isinstance(s, collections.abc.Sequence) or len(s) != 2):
raise TypeError("Slicing must be specified as a sequence of modes or as a sequence of (mode, extent) pairs.")
def _check_int(self, attribute, name):
message = f"Invalid value ({attribute}) for '{name}'. Expect positive integer or None."
if not isinstance(attribute, (type(None), int)):
raise ValueError(message)
if isinstance(attribute, int) and attribute < 0:
raise ValueError(message)
def __post_init__(self):
self._check_int(self.samples, "samples")
self.path = self._check_option(self.path, PathFinderOptions, self._check_specified_path)
self.slicing = self._check_option(self.slicing, SlicerOptions, self._check_specified_slices)
self.reconfiguration = self._check_option(self.reconfiguration, ReconfigOptions, None)
self._check_int(self.seed, "seed")
if self.cost_function is not None:
self.cost_function = cuquantum.cutensornet.OptimizerCost(self.cost_function)
@dataclass
class OptimizerInfo(object):
"""A data class for capturing optimizer information.
Attributes:
largest_intermediate: The number of elements in the largest intermediate tensor. See `CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_LARGEST_TENSOR`.
opt_cost: The FLOP count of the optimized contraction path. See `CUTENSORNET_CONTRACTION_OPTIMIZER_INFO_FLOP_COUNT`.
path: The contraction path as a sequence of pairs in the :func:`numpy.einsum_path` format.
slices: A sequence of ``(sliced mode, sliced extent)`` pairs.
num_slices : The number of slices.
intermediate_modes: A sequence of mode labels for all intermediate tensors
"""
largest_intermediate : float
opt_cost : float
path : PathType
slices : ModeExtentSequenceType
num_slices : int
intermediate_modes : ModesSequenceType
def __str__(self):
path = [str(p) for p in self.path]
slices = [str(s) for s in self.slices]
intermediate_modes = [str(m) for m in self.intermediate_modes]
s = f"""Optimizer Information:
Largest intermediate = {formatters.MemoryStr(self.largest_intermediate, base_unit='Elements')}
Optimized cost = {self.opt_cost:.3e} FLOPs
Path = {formatters.array2string(path)}"""
if len(slices):
s += f"""
Number of slices = {self.num_slices}
Slices = {formatters.array2string(slices)}"""
else:
s += """
Slicing not needed."""
s += f"""
Intermediate tensor mode labels = {formatters.array2string(intermediate_modes)}"""
return s
| cuQuantum-main | python/cuquantum/cutensornet/configuration.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Tensor network contraction with the standard einsum interface using cutensornet.
"""
__all__ = ['contract', 'contract_path', 'einsum', 'einsum_path', 'Network']
import collections
import dataclasses
import logging
import cupy as cp
import numpy as np
from cuquantum import cutensornet as cutn
from . import configuration
from . import memory
from ._internal import einsum_parser
from ._internal import formatters
from ._internal import optimizer_ifc
from ._internal import tensor_wrapper
from ._internal import typemaps
from ._internal import utils
class InvalidNetworkState(Exception):
pass
class Network:
"""
Network(subscripts, *operands, options=None)
Create a tensor network object specified as an Einstein summation expression.
The Einstein summation convention provides an elegant way of representing many tensor network operations. This object
allows the user to invest considerable effort into computing the best contraction path as well as autotuning the contraction
upfront for repeated contractions over the same network *topology* (different input tensors, or "operands", with the same
Einstein summation expression). Also see :meth:`~Network.contract_path` and :meth:`autotune`.
For the Einstein summation expression, both the explicit and implicit forms are supported.
In the implicit form, the output mode labels are inferred from the summation expression and *reordered lexicographically*.
An example is the expression ``'ij,jh'``, for which the output mode labels are ``'hi'``. (This corresponds to a matrix
multiplication followed by a transpose.)
In the explicit form, output mode labels can be directly stated following the identifier ``'->'`` in the summation expression.
An example is the expression ``'ij,jh->ih'`` (which corresponds to a matrix multiplication).
To specify an Einstein summation expression, both the subscript format (as shown above) and the interleaved format
are supported.
The interleaved format is an alternative way for specifying the operands and their mode labels as
``Network(op0, modes0, op1, modes1, ..., [modes_out])``, where ``opN``
is the N-th operand and ``modesN`` is a sequence of hashable and comparable objects (strings, integers, etc) representing the
N-th operand's mode labels.
Ellipsis broadcasting is supported.
Additional information on various operations on the network can be obtained by passing in a :class:`logging.Logger` object
to :class:`NetworkOptions` or by setting the appropriate options in the root logger object, which is used by default:
>>> import logging
>>> logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
Args:
subscripts: The mode labels (subscripts) defining the Einstein summation expression as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified using the Einstein summation convention.
operands: A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
qualifiers: Specify the tensor qualifiers as a :class:`numpy.ndarray` of :class:`~cuquantum.tensor_qualifiers_dtype` objects
of length equal to the number of operands.
options: Specify options for the tensor network as a :class:`~cuquantum.NetworkOptions` object. Alternatively, a `dict`
containing the parameters for the ``NetworkOptions`` constructor can also be provided. If not specified,
the value will be set to the default-constructed ``NetworkOptions`` object.
See Also:
:meth:`~Network.contract_path`, :meth:`autotune`, :meth:`~Network.contract`, :meth:`reset_operands`
Examples:
>>> from cuquantum import Network
>>> import numpy as np
Define the parameters of the tensor network:
>>> expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
>>> shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
Create the input tensors using NumPy:
>>> operands = [np.random.rand(*shape) for shape in shapes]
Create a :class:`Network` object:
>>> n = Network(expr, *operands)
Find the best contraction order:
>>> path, info = n.contract_path({'samples': 500})
Autotune the network:
>>> n.autotune(iterations=5)
Perform the contraction. The result is of the same type and on the same device as the operands:
>>> r1 = n.contract()
Reset operands to new values:
>>> operands = [i*operand for i, operand in enumerate(operands, start=1)]
>>> n.reset_operands(*operands)
Get the result of the new contraction:
>>> r2 = n.contract()
>>> from math import factorial
>>> np.allclose(r2, factorial(len(operands))*r1)
True
Finally, free network resources. If this call isn't made, it may hinder further operations (especially if the
network is large) since the memory will be released only when the object goes out of scope. (*To avoid having
to explicitly make this call, it is recommended to use the* :class:`Network` *object as a context manager*.)
>>> n.free()
If the operands are on the GPU, they can also be updated using in-place operations. In this case, the call
to :meth:`reset_operands` can be skipped -- subsequent :meth:`~Network.contract` calls will use the same
operands (with updated contents). The following example illustrates this using CuPy operands and also demonstrates
the usage of a :class:`Network` context (so as to skip calling :meth:`free`):
>>> import cupy as cp
>>> expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
>>> shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
>>> operands = [cp.random.rand(*shape) for shape in shapes]
>>>
>>> with Network(expr, *operands) as n:
... path, info = n.contract_path({'samples': 500})
... n.autotune(iterations=5)
...
... # Perform the contraction
... r1 = n.contract()
...
... # Update the operands in place
... for i, operand in enumerate(operands, start=1):
... operand *= i
...
... # Perform the contraction with the updated operand values
... r2 = n.contract()
...
... # The resources used by the network are automatically released when the context ends.
>>>
>>> from math import factorial
>>> cp.allclose(r2, factorial(len(operands))*r1)
array(True)
PyTorch CPU and GPU tensors can be passed as input operands in the same fashion.
See :func:`contract` for more examples on specifying the Einstein summation expression as well
as specifying options for the tensor network and the optimizer.
"""
def __init__(self, *operands, qualifiers=None, options=None):
"""
__init__(subscripts, *operands, options=None)
"""
options = utils.check_or_create_options(configuration.NetworkOptions, options, "network options")
self.options = options
# Logger.
self.logger = options.logger if options.logger is not None else logging.getLogger()
self.logger.info(f"CUDA runtime version = {cutn.get_cudart_version()}")
self.logger.info(f"cuTensorNet version = {cutn.MAJOR_VER}.{cutn.MINOR_VER}.{cutn.PATCH_VER}")
self.logger.info("Beginning network creation...")
# Parse Einsum expression.
self.operands, self.inputs, self.output, self.size_dict, self.mode_map_user_to_ord, self.mode_map_ord_to_user, self.is_interleaved = einsum_parser.parse_einsum(*operands)
# Copy operands to device if needed.
self.network_location = 'cuda'
self.device_id = utils.get_network_device_id(self.operands)
if self.device_id is None:
self.network_location = 'cpu'
self.device_id = options.device_id
self.operands = tensor_wrapper.to(self.operands, self.device_id)
# Set blocking or non-blocking behavior.
self.blocking = self.options.blocking is True or self.network_location == 'cpu'
if self.blocking:
self.call_prologue = "This call is blocking and will return only after the operation is complete."
else:
self.call_prologue = "This call is non-blocking and will return immediately after the operation is launched on the device."
# Infer the library package the operands belong to.
self.package = utils.get_operands_package(self.operands)
# The output class is that of the first wrapped device operand.
self.output_class = self.operands[0].__class__
self.device = cp.cuda.Device(self.device_id)
# Set memory allocator.
self.allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[self.package](self.device_id, self.logger)
# Set memory limit.
self.memory_limit = utils.get_memory_limit(self.options.memory_limit, self.device)
self.logger.info(f"The memory limit is {formatters.MemoryStr(self.memory_limit)}.")
# Define data types.
self.data_type = utils.get_operands_dtype(self.operands)
if self.data_type not in typemaps.NAME_TO_COMPUTE_TYPE:
message = f"""Unsupported data type.
The data type '{self.data_type}' is currently not supported.
"""
raise ValueError(message)
self.compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[self.data_type]
# Prepare data for cutensornet.
num_inputs = len(self.inputs)
num_modes_out = len(self.output)
extents_in = tuple(o.shape for o in self.operands)
strides_in = tuple(o.strides for o in self.operands)
self.operands_data = utils.get_operands_data(self.operands)
modes_in = tuple(tuple(m for m in _input) for _input in self.inputs)
num_modes_in = tuple(len(m) for m in modes_in)
self.qualifiers_in = utils.check_tensor_qualifiers(qualifiers, cutn.tensor_qualifiers_dtype, num_inputs)
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
stream = None
self.logger.debug("Beginning output tensor creation...")
self.contraction, self.contraction_output_event, modes_out, extents_out, strides_out = utils.create_output_tensor(
self.output_class, self.package, self.output, self.size_dict, self.device_id, stream, self.data_type)
self.logger.debug("The output tensor has been created.")
# Create/set handle.
if options.handle is not None:
self.own_handle = False
self.handle = options.handle
else:
self.own_handle = True
with utils.device_ctx(self.device_id):
self.handle = cutn.create()
# Network definition.
self.network = cutn.create_network_descriptor(self.handle, num_inputs,
num_modes_in, extents_in, strides_in, modes_in, self.qualifiers_in, # inputs
num_modes_out, extents_out, strides_out, modes_out, # output
typemaps.NAME_TO_DATA_TYPE[self.data_type], self.compute_type)
# Keep output extents for creating new tensors, if needed.
self.extents_out = extents_out
# Path optimization attributes.
self.optimizer_config_ptr, self.optimizer_info_ptr = None, None
self.optimized = False
# Workspace attributes.
self.workspace_desc = cutn.create_workspace_descriptor(self.handle)
self.workspace_ptr, self.workspace_size = None, None
# Contraction plan attributes.
self.plan = None
self.planned = False
# Autotuning attributes.
self.autotune_pref_ptr = None
self.autotuned = False
# Attributes to establish stream ordering.
self.workspace_stream = None
self.last_compute_event = None
self.valid_state = True
self.logger.info("The network has been created.")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.free()
def _check_valid_network(self, *args, **kwargs):
"""
"""
if not self.valid_state:
raise InvalidNetworkState("The network cannot be used after resources are free'd")
def _check_optimized(self, *args, **kwargs):
"""
"""
what = kwargs['what']
if not self.optimized:
raise RuntimeError(f"{what} cannot be performed before contract_path() has been called.")
def _check_planned(self, *args, **kwargs):
"""
"""
what = kwargs['what']
if not self.planned:
raise RuntimeError(f"Internal Error: {what} cannot be performed before planning has been done.")
def _free_plan_resources(self, exception=None):
"""
Free resources allocated in network contraction planning.
"""
if self.plan is not None:
cutn.destroy_contraction_plan(self.plan)
self.plan = None
return True
def _free_workspace_memory(self, exception=None):
"""
Free workspace by releasing the MemoryPointer object.
"""
self.workspace_ptr = None
return True
def _free_path_resources(self, exception=None):
"""
Free resources allocated in path computation.
"""
if self.optimizer_config_ptr is not None:
cutn.destroy_contraction_optimizer_config(self.optimizer_config_ptr)
self.optimizer_config_ptr = None
if self.optimizer_info_ptr is not None:
cutn.destroy_contraction_optimizer_info(self.optimizer_info_ptr)
self.optimizer_info_ptr = None
self._free_workspace_memory()
self.workspace_size = None
self._free_plan_resources()
return True
@utils.precondition(_check_valid_network)
@utils.precondition(_check_optimized, "Workspace memory allocation")
@utils.atomic(_free_workspace_memory, method=True)
def _allocate_workspace_memory_perhaps(self, stream, stream_ctx):
if self.workspace_ptr is not None:
return
assert self.workspace_size is not None, "Internal Error."
self.logger.debug("Allocating memory for contracting the tensor network...")
with utils.device_ctx(self.device_id), stream_ctx:
try:
self.workspace_ptr = self.allocator.memalloc(self.workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
self.workspace_stream = stream
self.logger.debug(f"Finished allocating memory of size {formatters.MemoryStr(self.workspace_size)} for contraction in the context of stream {self.workspace_stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(self.workspace_ptr)
cutn.workspace_set_memory(self.handle, self.workspace_desc, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH, device_ptr, self.workspace_size)
self.logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
@utils.precondition(_check_valid_network)
@utils.precondition(_check_optimized, "Workspace size calculation")
def _calculate_workspace_size(self):
"""
Allocate workspace for cutensornet.
"""
# Release workspace already allocated, if any, because the new requirements are likely different.
self.workspace_ptr = None
cutn.workspace_compute_contraction_sizes(self.handle, self.network, self.optimizer_info_ptr, self.workspace_desc)
min_size = cutn.workspace_get_memory_size(self.handle, self.workspace_desc, cutn.WorksizePref.MIN, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
max_size = cutn.workspace_get_memory_size(self.handle, self.workspace_desc, cutn.WorksizePref.MAX, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if self.memory_limit < min_size:
message = f"""Insufficient memory.
The memory limit specified is {self.memory_limit}, while the minimum workspace size needed is {min_size}.
"""
raise RuntimeError(message)
self.workspace_size = max_size if max_size < self.memory_limit else self.memory_limit
self.logger.info(f"The workspace size requirements range from {formatters.MemoryStr(min_size)} to "\
f"{formatters.MemoryStr(max_size)}.")
self.logger.info(f"The workspace size has been set to {formatters.MemoryStr(self.workspace_size)}.")
# Set workspace size to enable contraction planning. The device pointer will be set later during allocation.
cutn.workspace_set_memory(self.handle, self.workspace_desc, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH, 0, self.workspace_size)
@utils.precondition(_check_valid_network)
@utils.precondition(_check_optimized, "Planning")
@utils.atomic(_free_plan_resources, method=True)
def _create_plan(self):
"""
Create network plan.
"""
self.logger.debug("Creating contraction plan...")
if self.plan:
cutn.destroy_contraction_plan(self.plan)
self.plan = cutn.create_contraction_plan(self.handle, self.network, self.optimizer_info_ptr, self.workspace_desc)
self.logger.debug("Finished creating contraction plan.")
def _set_opt_config_options(self, options):
"""
Set ContractionOptimizerConfig options if the value is not None.
Args:
options: A PathFinderOptions, SlicerOptions, or ReconfigOptions object.
"""
for field in dataclasses.fields(options):
name, value = field.name, getattr(options, field.name)
if value is None:
continue
enum = options.option_to_enum[name]
self._set_opt_config_option(name, enum, value)
def _set_opt_config_option(self, name, enum, value):
"""
Set a single ContractionOptimizerConfig option if the value is not None.
Args:
name: The name of the attribute.
enum: A ContractionOptimizerConfigAttribute to set.
value: The value to which the attribute is set to.
"""
if value is None:
return
dtype = cutn.contraction_optimizer_config_get_attribute_dtype(enum)
value = np.array((value,), dtype=dtype)
cutn.contraction_optimizer_config_set_attribute(self.handle, self.optimizer_config_ptr, enum, value.ctypes.data, value.dtype.itemsize)
self.logger.info(f"The optimizer config attribute '{name}' has been set to {value[0]}.")
@utils.precondition(_check_valid_network)
def _set_optimizer_options(self, optimize):
"""
"""
# Loop over the options and set if not None.
assert isinstance(optimize.path, configuration.PathFinderOptions), "Internal error."
# PathFinder options.
self._set_opt_config_options(optimize.path)
# Slicer options.
if isinstance(optimize.slicing, configuration.SlicerOptions):
self._set_opt_config_options(optimize.slicing)
# Reconfiguration options.
self._set_opt_config_options(optimize.reconfiguration)
# The "global" options.
ConfEnum = cutn.ContractionOptimizerConfigAttribute
enum = ConfEnum.HYPER_NUM_SAMPLES
self._set_opt_config_option('samples', enum, optimize.samples)
enum = ConfEnum.HYPER_NUM_THREADS
self._set_opt_config_option('threads', enum, optimize.threads)
enum = ConfEnum.SEED
self._set_opt_config_option('seed', enum, optimize.seed)
enum = ConfEnum.COST_FUNCTION_OBJECTIVE
self._set_opt_config_option('cost_function', enum, optimize.cost_function)
@utils.precondition(_check_valid_network)
@utils.atomic(_free_path_resources, method=True)
def contract_path(self, optimize=None, **kwargs):
"""
contract_path(optimize=None)
Compute the best contraction path together with any slicing that is needed to ensure that the contraction can be
performed within the specified memory limit.
Args:
optimize : This parameter specifies options for path optimization as an :class:`OptimizerOptions` object. Alternatively, a
dictionary containing the parameters for the ``OptimizerOptions`` constructor can also be provided. If not
specified, the value will be set to the default-constructed ``OptimizerOptions`` object.
Returns:
tuple: A 2-tuple (``path``, ``opt_info``):
- ``path`` : A sequence of pairs of operand ordinals representing the best contraction order in the
:func:`numpy.einsum_path` format.
- ``opt_info`` : An object of type :class:`OptimizerInfo` containing information about the best contraction order.
Notes:
- If the path is provided, the user has to set the sliced modes too if slicing is desired.
"""
binary_contraction_optimization = len(self.operands) == 2 and optimize is None
optimize = utils.check_or_create_options(configuration.OptimizerOptions, optimize, "path optimizer options")
internal_options = dict()
internal_options['create_plan'] = utils.Value(True, validator=lambda v: isinstance(v, bool))
utils.check_and_set_options(internal_options, kwargs)
if self.optimizer_config_ptr is None:
self.optimizer_config_ptr = cutn.create_contraction_optimizer_config(self.handle)
if self.optimizer_info_ptr is None:
self.optimizer_info_ptr = cutn.create_contraction_optimizer_info(self.handle, self.network)
opt_info_ifc = optimizer_ifc.OptimizerInfoInterface(self)
# Special case worth optimizing (when the "optimize" option is not specified), as it's an extremely common use case with a trivial path.
if binary_contraction_optimization:
optimize.path = [(0, 1)]
# Compute path (or set provided path).
if isinstance(optimize.path, configuration.PathFinderOptions):
# Set optimizer options.
self._set_optimizer_options(optimize)
# Find "optimal" path.
self.logger.info("Finding optimal path as well as sliced modes...")
try:
cutn.contraction_optimize(
self.handle, self.network, self.optimizer_config_ptr, self.memory_limit, self.optimizer_info_ptr)
except cutn.cuTensorNetError as e:
if 'INTERRUPTED' in str(e):
raise KeyboardInterrupt from e
raise
self.logger.info("Finished finding optimal path as well as sliced modes.")
else:
self.logger.info("Setting user-provided path...")
opt_info_ifc.path = optimize.path
self.logger.info("Finished setting user-provided path.")
# Set slicing if provided.
if not isinstance(optimize.slicing, configuration.SlicerOptions):
self.logger.info("Setting user-provided sliced modes...")
opt_info_ifc.sliced_mode_extent = optimize.slicing
self.logger.info("Finished setting user-provided sliced modes.")
self.num_slices = opt_info_ifc.num_slices
assert self.num_slices > 0
# Create OptimizerInfo object.
largest_intermediate = opt_info_ifc.largest_intermediate
opt_cost = opt_info_ifc.flop_count
path = opt_info_ifc.path
slices = opt_info_ifc.sliced_mode_extent
aux_modes = opt_info_ifc.intermediate_modes
opt_info = configuration.OptimizerInfo(
largest_intermediate, opt_cost, path, slices, self.num_slices, aux_modes)
# If we are not logging, avoid the overhead of creating the string representation of opt_info.
if self.logger.handlers:
self.logger.info(f"{opt_info}")
self.optimized = True
if internal_options['create_plan']:
# Calculate workspace size required.
self._calculate_workspace_size()
# Create plan.
self._create_plan()
self.planned = True
else:
self.planned = False
return opt_info.path, opt_info
def _set_autotune_options(self, options):
"""
Set ContractionAutotunePreference options if the value is not None.
Args:
options: dict of name : (enum, value) AutotunePreference parameters.
"""
for name in options:
enum, value = options[name]
if value is None:
continue
self._set_autotune_option(name, enum, value)
def _set_autotune_option(self, name, enum, value):
"""
Set a single ContractionAutotunePreference option if the value is not None.
Args:
name: The name of the attribute.
enum: A ContractionAutotunePreferenceAttribute to set.
value: The value to which the attribute is set to.
"""
if value is None:
return
dtype = cutn.contraction_autotune_preference_get_attribute_dtype(enum)
value = np.array((value,), dtype=dtype)
cutn.contraction_autotune_preference_set_attribute(self.handle, self.autotune_pref_ptr, enum, value.ctypes.data, value.dtype.itemsize)
self.logger.info(f"The autotune preference '{name}' has been set to {value[0]}.")
@utils.precondition(_check_valid_network)
@utils.precondition(_check_optimized, "Autotuning")
@utils.precondition(_check_planned, "Autotuning")
def autotune(self, *, iterations=3, stream=None):
"""Autotune the network to reduce the contraction cost.
This is an optional step that is recommended if the :class:`Network` object is used to perform multiple contractions.
Args:
iterations: The number of iterations for autotuning. See `CUTENSORNET_CONTRACTION_AUTOTUNE_MAX_ITERATIONS`.
stream: Provide the CUDA stream to use for the autotuning operation. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
"""
message = utils.check_autotune_params(iterations)
self.logger.info(message)
if self.autotune_pref_ptr is None:
self.autotune_pref_ptr = cutn.create_contraction_autotune_preference(self.handle)
AutoEnum = cutn.ContractionAutotunePreferenceAttribute
options = {'iterations': (AutoEnum.MAX_ITERATIONS, iterations)}
self._set_autotune_options(options)
# Allocate device memory (in stream context) if needed.
stream, stream_ctx, stream_ptr = utils.get_or_create_stream(self.device_id, stream, self.package)
self._allocate_workspace_memory_perhaps(stream, stream_ctx)
# Check if we still hold an output tensor; if not, create a new one.
if self.contraction is None:
self.logger.debug("Beginning output (empty) tensor creation...")
self.contraction = utils.create_empty_tensor(self.output_class, self.extents_out, self.data_type, self.device_id, stream_ctx)
self.logger.debug("The output (empty) tensor has been created.")
elif self.contraction_output_event is not None:
stream.wait_event(self.contraction_output_event)
self.contraction_output_event = None
self.logger.debug("Established ordering with output tensor creation event.")
timing = bool(self.logger and self.logger.handlers)
self.logger.info(f"Starting autotuning...")
self.logger.info(f"{self.call_prologue}")
with utils.device_ctx(self.device_id), utils.cuda_call_ctx(stream, self.blocking, timing) as (self.last_compute_event, elapsed):
cutn.contraction_autotune(self.handle, self.plan, self.operands_data, self.contraction.data_ptr,
self.workspace_desc, self.autotune_pref_ptr, stream_ptr)
if elapsed.data is not None:
self.logger.info(f"The autotuning took {elapsed.data:.3f} ms to complete.")
self.autotuned = True
@utils.precondition(_check_valid_network)
def reset_operands(self, *operands):
"""Reset the operands held by this :class:`Network` instance.
This method is not needed when the operands
reside on the GPU and in-place operations are used to update the operand values.
This method will perform various checks on the new operands to make sure:
- The shapes, strides, datatypes match those of the old ones.
- The packages that the operands belong to match those of the old ones.
- If input tensors are on GPU, the library package and device must match.
Args:
operands: See :class:`Network`'s documentation.
"""
if len(operands) != len(self.operands):
message = f"Mismatch in the number of operands ({len(operands)} provided, need {len(self.operands)})."
raise ValueError(message)
self.logger.info("Resetting operands...")
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
utils.check_operands_match(self.operands, operands, 'dtype', "data type")
utils.check_operands_match(self.operands, operands, 'shape', 'shape')
device_id = utils.get_network_device_id(operands)
if device_id is None:
# Copy to existing device pointers because the new operands are on the CPU.
tensor_wrapper.copy_(operands, self.operands)
else:
utils.check_operands_match(self.operands, operands, 'strides', 'strides')
package = utils.get_operands_package(operands)
if self.package != package:
message = f"Library package mismatch: '{self.package}' => '{package}'"
raise TypeError(message)
if self.device_id != device_id:
raise ValueError(f"The new operands must be on the same device ({device_id}) as the original operands "
f"({self.device_id}).")
# Finally, replace the original data pointers by the new ones.
self.operands_data = utils.get_operands_data(operands)
self.logger.info("The operands have been reset.")
@utils.precondition(_check_valid_network)
@utils.precondition(_check_optimized, "Contraction")
@utils.precondition(_check_planned, "Contraction")
def contract(self, *, slices=None, stream=None):
"""Contract the network and return the result.
Args:
slices: Specify the slices to be contracted as Python :class:`range` for contiguous slice IDs or as a Python sequence
object for arbitrary slice IDs. If not specified, all slices will be contracted.
stream: Provide the CUDA stream to use for the contraction operation. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
Returns:
The result is of the same type and on the same device as the operands.
"""
# Allocate device memory (in stream context) if needed.
stream, stream_ctx, stream_ptr = utils.get_or_create_stream(self.device_id, stream, self.package)
self._allocate_workspace_memory_perhaps(stream, stream_ctx)
# Check if we still hold an output tensor; if not, create a new one.
if self.contraction is None:
self.logger.debug("Beginning output (empty) tensor creation...")
self.contraction = utils.create_empty_tensor(self.output_class, self.extents_out, self.data_type, self.device_id, stream_ctx)
self.logger.debug("The output (empty) tensor has been created.")
elif self.contraction_output_event is not None:
stream.wait_event(self.contraction_output_event)
self.contraction_output_event = None
self.logger.debug("Established ordering with output tensor creation event.")
# Create a slice group for contraction.
slice_group = None
if slices is None:
slice_group = 0
self.logger.info(f"All the available slices ({self.num_slices}) will be contracted.")
elif isinstance(slices, range):
slice_group = cutn.create_slice_group_from_id_range(self.handle, slices.start, slices.stop, slices.step)
self.logger.info(f"A slice group has been created with start={slices.start}, stop={slices.stop}, and step={slices.step}.")
elif isinstance(slices, collections.abc.Sequence):
slice_group = cutn.create_slice_group_from_ids(self.handle, slices, len(slices))
self.logger.info(f"A slice group has been created from the specified sequence: {formatters.array2string([str(s) for s in slices])}")
else:
message = f"The provided 'slices' must be a range object or a sequence object. The object type is {type(slices)}."
raise TypeError(message)
timing = bool(self.logger and self.logger.handlers)
self.logger.info("Starting network contraction...")
self.logger.info(f"{self.call_prologue}")
with utils.device_ctx(self.device_id), utils.cuda_call_ctx(stream, self.blocking, timing) as (self.last_compute_event, elapsed):
cutn.contract_slices(self.handle, self.plan, self.operands_data, self.contraction.data_ptr, False,
self.workspace_desc, slice_group, stream_ptr)
if elapsed.data is not None:
self.logger.info(f"The contraction took {elapsed.data:.3f} ms to complete.")
# Destroy slice group, if created.
if slice_group != 0:
cutn.destroy_slice_group(slice_group)
self.logger.debug(f"Slice group ({slice_group}) has been destroyed.")
if self.network_location == 'cpu':
out = self.contraction.to('cpu')
else:
out = self.contraction.tensor
self.contraction = None # We cannot overwrite what we've already handed to users.
return out
def free(self):
"""Free network resources.
It is recommended that the :class:`Network` object be used within a context, but if it is not possible then this
method must be called explicitly to ensure that the network resources are properly cleaned up.
"""
if not self.valid_state:
return
try:
# Future operations on the workspace stream should be ordered after the computation.
if self.last_compute_event is not None:
self.workspace_stream.wait_event(self.last_compute_event)
self._free_path_resources()
if self.autotune_pref_ptr is not None:
cutn.destroy_contraction_autotune_preference(self.autotune_pref_ptr)
self.autotune_pref_ptr = None
if self.workspace_desc is not None:
cutn.destroy_workspace_descriptor(self.workspace_desc)
self.workspace_desc = None
if self.network is not None:
cutn.destroy_network_descriptor(self.network)
self.network = None
if self.handle is not None and self.own_handle:
cutn.destroy(self.handle)
self.handle = None
self.own_handle = False
except Exception as e:
self.logger.critical("Internal error: only part of the network resources have been released.")
self.logger.critical(str(e))
raise e
finally:
self.valid_state = False
self.logger.info("The network resources have been released.")
def contract(*operands, qualifiers=None, options=None, optimize=None, stream=None, return_info=False):
r"""
contract(subscripts, *operands, options=None, optimize=None, stream=None, return_info=False)
Evaluate the Einstein summation convention on the operands.
Explicit as well as implicit form is supported for the Einstein summation expression. In addition to the subscript format,
the interleaved format is also supported as a means of specifying the operands and their mode labels. See :class:`Network`
for more detail on the types of operands as well as for examples.
Args:
subscripts : The mode labels (subscripts) defining the Einstein summation expression as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified using the Einstein summation convention.
operands : A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
qualifiers: Specify the tensor qualifiers as a :class:`numpy.ndarray` of :class:`~cuquantum.tensor_qualifiers_dtype` objects
of length equal to the number of operands.
options : Specify options for the tensor network as a :class:`~cuquantum.NetworkOptions` object. Alternatively, a `dict`
containing the parameters for the ``NetworkOptions`` constructor can also be provided. If not specified,
the value will be set to the default-constructed ``NetworkOptions`` object.
optimize : This parameter specifies options for path optimization as an :class:`OptimizerOptions` object. Alternatively, a
dictionary containing the parameters for the ``OptimizerOptions`` constructor can also be provided. If not
specified, the value will be set to the default-constructed ``OptimizerOptions`` object.
stream: Provide the CUDA stream to use for the autotuning operation. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
return_info : If true, information about the best contraction order will also be returned.
Returns:
If ``return_info`` is `False`, the output tensor (ndarray-like object) of the same type and on the same device
as the operands containing the result of the contraction; otherwise, a 2-tuple consisting of the output tensor and an
:class:`OptimizerInfo` object that contains information about the best contraction order etc.
.. note::
It is encouraged for users to maintain the library handle themselves so as to reduce the context initialization time:
.. code-block:: python
from cuquantum import cutensornet as cutn
from cuquantum import contract, NetworkOptions
handle = cutn.create()
network_opts = NetworkOptions(handle=handle, ...)
out = contract(..., options=network_opts, ...)
# ... the same handle can be reused for further calls ...
# when it's done, remember to destroy the handle
cutn.destroy(handle)
Examples:
Use NumPy operands:
>>> from cuquantum import contract
>>> import numpy as np
>>> a = np.arange(6.).reshape(3, 2)
>>> b = np.arange(6.).reshape(2, 3)
Perform matrix multiplication using the explicit form. The result ``r`` is a NumPy ndarray (with the computation
performed on the GPU):
>>> r = contract('ij,jk->ik', a, b)
Implicit form:
>>> r = contract('ij,jk', a, b)
Interleaved format using characters for mode labels:
>>> r = contract(a, ['i', 'j'], b, ['j', 'k'], ['i', 'k'], return_info=True)
Interleaved format using string labels for mode labels and implicit form:
>>> r = contract(a, ['first', 'second'], b, ['second', 'third'])
Interleaved format using integer mode labels and explicit form:
>>> r = contract(a, [1, 2], b, [2, 3], [1, 3])
Obtain information ``i`` on the best contraction path along with the result ``r``:
>>> r, i = contract('ij,jk', a, b, return_info=True)
Provide options for the tensor network:
>>> from cuquantum import NetworkOptions
>>> n = NetworkOptions(device_id=1)
>>> r = contract('ij,jk->ik', a, b, options=n)
Alternatively, the options can be provided as a dict instead of a :class:`NetworkOptions` object:
>>> r = contract('ij,jk->ik', a, b, options={'device_id': 1})
Specify options for the optimizer:
>>> from cuquantum import OptimizerOptions, PathFinderOptions
>>> p = PathFinderOptions(imbalance_factor=230, cutoff_size=8)
>>> o = OptimizerOptions(path=p, seed=123)
>>> r = contract('ij,jk,kl', a, b, a, optimize=o)
Alternatively, the options above can be provided as a dict:
>>> r = contract('ij,jk,kl', a, b, a, optimize={'path': {'imbalance_factor': 230, 'cutoff_size': 8}, 'seed': 123})
Specify the path directly:
>>> o = OptimizerOptions(path = [(0,2), (0,1)])
>>> r = contract('ij,jk,kl', a, b, a, optimize=o)
Perform elementwise multiplication :math:`a \odot b^T` using the ellipsis shorthand notation:
>>> r = contract('...,...', a, b.T)
Obtain the double inner product :math:`a : b^T` (Frobenius inner product for real-valued tensors) using the
ellipsis shorthand notation:
>>> r = contract('...,...->', a, b.T)
Use CuPy operands. The result ``r`` is a CuPy ndarray on the same device as the operands, and ``dev`` is any valid
device ID on your system that you wish to use to store the tensors and compute the contraction:
>>> import cupy
>>> dev = 0
>>> with cupy.cuda.Device(dev):
... a = cupy.arange(6.).reshape(3, 2)
... b = cupy.arange(6.).reshape(2, 3)
>>> r = contract('ij,jk', a, b)
Use PyTorch operands. The result ``r`` is a PyTorch tensor on the same device (``dev``) as the operands:
.. doctest::
:skipif: torch is None
>>> import torch
>>> dev = 0
>>> a = torch.arange(6., device=f'cuda:{dev}').reshape(3, 2)
>>> b = torch.arange(6., device=f'cuda:{dev}').reshape(2, 3)
>>> r = contract('ij,jk', a, b)
"""
# Create network.
with Network(*operands, qualifiers=qualifiers, options=options) as network:
# Compute path.
opt_info = network.contract_path(optimize=optimize)
# Skip autotuning since the network is contracted only once.
# Contraction.
output = network.contract(stream=stream)
if return_info:
return output, opt_info
return output
def contract_path(*operands, qualifiers=None, options=None, optimize=None):
"""
contract_path(subscripts, *operands, options=None, optimize=None)
Evaluate the "best" contraction order by allowing the creation of intermediate tensors.
Explicit as well as implicit form is supported for the Einstein summation expression. In addition to the subscript format,
the interleaved format is also supported as a means of specifying the operands and their mode labels. See :class:`Network`
for more detail on the types of operands as well as for examples.
Args:
subscripts : The mode labels (subscripts) defining the Einstein summation expression as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified using the Einstein summation convention.
operands : A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
qualifiers: Specify the tensor qualifiers as a :class:`numpy.ndarray` of :class:`~cuquantum.tensor_qualifiers_dtype` objects
of length equal to the number of operands.
options : Specify options for the tensor network as a :class:`~cuquantum.NetworkOptions` object. Alternatively, a `dict`
containing the parameters for the ``NetworkOptions`` constructor can also be provided. If not specified,
the value will be set to the default-constructed ``NetworkOptions`` object.
optimize : This parameter specifies options for path optimization as an :class:`OptimizerOptions` object. Alternatively, a
dictionary containing the parameters for the ``OptimizerOptions`` constructor can also be provided. If not
specified, the value will be set to the default-constructed ``OptimizerOptions`` object.
Returns:
tuple: A 2-tuple (``path``, ``opt_info``):
- ``path`` : A sequence of pairs of operand ordinals representing the best contraction order in the
:func:`numpy.einsum_path` format.
- ``opt_info`` : An object of type :class:`OptimizerInfo` containing information about the best contraction order.
.. note::
It is encouraged for users to maintain the library handle themselves so as to reduce the context initialization time:
.. code-block:: python
from cuquantum import cutensornet as cutn
from cuquantum import contract, NetworkOptions
handle = cutn.create()
network_opts = NetworkOptions(handle=handle, ...)
path, info = contract_path(..., options=network_opts, ...)
# ... the same handle can be reused for further calls ...
# when it's done, remember to destroy the handle
cutn.destroy(handle)
"""
# Create network.
with Network(*operands, qualifiers=qualifiers, options=options) as network:
# Compute path.
path, opt_info = network.contract_path(optimize=optimize, create_plan=False)
return path, opt_info
def _check_einsum_options(out, dtype, order, casting, optimize):
"""
Check whether the options provided to the einsum function interface are supported.
"""
if out is not None:
message = f"value '{out}' for parameter 'out'."
raise NotImplementedError(message)
if dtype is not None:
message = f"value '{dtype}' for parameter 'dtype'."
raise NotImplementedError(message)
if order != 'K':
message = f"value '{order}' for parameter 'order'."
raise NotImplementedError(message)
if casting.lower() != 'safe':
message = f"value '{casting}' for parameter 'casting'."
raise NotImplementedError(message)
if optimize not in (True, False) and not isinstance(optimize, collections.abc.Sequence):
message = f"""value '{optimize}' for parameter 'optimize'.
Only True or False values are allowed. Alternatively an explicit contraction list from einsum_path
can be provided."""
raise NotImplementedError(message)
def einsum(*operands, out=None, dtype=None, order='K', casting='safe', optimize=True):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K', casting='safe', optimize=True)
A drop-in replacement of :func:`numpy.einsum` for computing the specified tensor contraction using cuTensorNet.
Not all NumPy options are supported or even used. The :func:`contract` function provides an extensive set of options
specific to cuTensorNet and is recommended over this function.
Explicit as well as implicit form is supported for the Einstein summation expression. In addition to the subscript format,
the interleaved format is also supported as a means of specifying the operands and their mode labels. See :class:`Network`
for more detail on the types of operands as well as for examples.
Args:
subscripts : The mode labels (subscripts) defining the Einstein summation expression as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified using the Einstein summation convention.
operands : A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
out : Not supported in this release.
dtype : Not supported in this release.
order : Not supported in this release.
casting : Not supported in this release.
optimize : This parameter specifies options for path optimization. The only values accepted by this interface are `True`,
`False`, or the contraction path specified in the :func:`numpy.einsum_path` format.
Returns:
output:
A tensor (ndarray-like object) of the same type and on the same device as the operands containing the result of
the contraction.
"""
_check_einsum_options(out, dtype, order, casting, optimize)
# Create network.
with Network(*operands) as network:
if optimize is True:
# Compute path.
network.contract_path()
else:
if optimize is False:
# Use canonical path.
path = [(0, 1)] * (len(network.inputs) - 1)
else:
# Use specified path.
path = optimize
# Set path (path validation is done when setting OptimizerOptions).
optimize = configuration.OptimizerOptions(path=path)
network.contract_path(optimize=optimize)
# Skip autotuning since the network is contracted only once.
# Contraction.
output = network.contract()
return output
def einsum_path(*operands, optimize=True):
"""
einsum_path(subscripts, *operands, optimize=True)
A drop-in replacement of :func:`numpy.einsum_path` for evaluating the "best" contraction order using cuTensorNet.
Only a subset of the NumPy options is supported using this interface. The :func:`contract_path` function provides an
extensive set of options specific to cuTensorNet and is recommended over this function.
Explicit as well as implicit form is supported for the Einstein summation expression. In addition to the subscript format,
the interleaved format is also supported as a means of specifying the operands and their mode labels. See :class:`Network`
for more detail on the types of operands as well as for examples.
Args:
subscripts : The mode labels (subscripts) defining the Einstein summation expression as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified using the Einstein summation convention.
operands : A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
optimize : This parameter specifies options for path optimization. The only value allowed with this interface is `True`.
Returns:
tuple: A 2-tuple (``path``, ``opt_info``):
- ``path`` : A list starting with the string 'einsum_path' and followed by a sequence of pairs of operand ordinals
representing the best contraction order in the :func:`numpy.einsum_path` format.
- ``opt_info`` : String representation of an object of type :class:`OptimizerInfo` containing information about
the best contraction order.
"""
if optimize is not True:
message = f"""Invalid value for parameter 'optimize'.
The only allowed value for 'optimize' is True."""
raise NotImplementedError(message)
# Create network.
with Network(*operands) as network:
# Compute path.
path, opt_info = network.contract_path(create_plan=False)
return ['einsum_path', *path], str(opt_info)
| cuQuantum-main | python/cuquantum/cutensornet/tensor_network.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
""" Interface for pluggable memory handlers.
"""
__all__ = ['BaseCUDAMemoryManager', 'MemoryPointer']
from abc import abstractmethod
from typing import Protocol, runtime_checkable
import weakref
import cupy as cp
from ._internal import utils
class MemoryPointer:
"""
An RAII class for a device memory buffer.
Args:
device_ptr: The address of the device memory buffer.
size: The size of the memory buffer in bytes.
finalizer: A nullary callable that will be called when the buffer is to be freed.
.. seealso:: :class:`numba.cuda.MemoryPointer`
"""
def __init__(self, device_ptr, size, finalizer):
self.device_ptr = device_ptr
self.size = size
if finalizer is not None:
self._finalizer = weakref.finalize(self, finalizer)
def free(self):
"""
"Frees" the memory buffer by calling the finalizer.
"""
if finalizer is None:
return
if not self._finalizer.alive:
raise RuntimeError("The buffer has already been freed.")
self._finalizer()
@runtime_checkable
class BaseCUDAMemoryManager(Protocol):
"""
Protocol for memory manager plugins.
.. seealso:: :class:`numba.cuda.BaseCUDAMemoryManager`
"""
@abstractmethod
def memalloc(self, size):
"""
Allocate device memory.
Args:
size: The size of the memory buffer in bytes.
Returns:
An object that owns the allocated memory and is responsible for releasing it (to the OS or a pool). The object must
have an attribute named ``device_ptr``, ``device_pointer``, or ``ptr`` specifying the pointer to the allocated memory
buffer. See :class:`MemoryPointer` for an example interface.
Note:
Objects of type :class:`numba.cuda.MemoryPointer` as well as :class:`cupy.cuda.MemoryPointer` meet the requirements
listed above for the device memory pointer object.
"""
raise NotImplementedError
class _RawCUDAMemoryManager(BaseCUDAMemoryManager):
"""
Raw device memory allocator.
Args:
device_id: The ID (int) of the device on which memory is to be allocated.
logger (logging.Logger): Python Logger object.
"""
def __init__(self, device_id, logger):
"""
__init__(device_id)
"""
self.device_id = device_id
self.logger = logger
def memalloc(self, size):
with utils.device_ctx(self.device_id):
device_ptr = cp.cuda.runtime.malloc(size)
self.logger.debug(f"_RawCUDAMemoryManager (allocate memory): size = {size}, ptr = {device_ptr}, "
f"device = {self.device_id}, stream={cp.cuda.get_current_stream()}")
def create_finalizer():
def finalizer():
# Note: With UVA there is no need to switch context to the device the memory belongs to before calling free().
cp.cuda.runtime.free(device_ptr)
self.logger.debug(f"_RawCUDAMemoryManager (release memory): ptr = {device_ptr}")
return finalizer
return MemoryPointer(device_ptr, size, finalizer=create_finalizer())
class _CupyCUDAMemoryManager(BaseCUDAMemoryManager):
"""
CuPy device memory allocator.
Args:
device_id: The ID (int) of the device on which memory is to be allocated.
logger (logging.Logger): Python Logger object.
"""
def __init__(self, device_id, logger):
"""
__init__(device_id)
"""
self.device_id = device_id
self.logger = logger
def memalloc(self, size):
with utils.device_ctx(self.device_id):
cp_mem_ptr = cp.cuda.alloc(size)
device_ptr = cp_mem_ptr.ptr
self.logger.debug(f"_CupyCUDAMemoryManager (allocate memory): size = {size}, ptr = {device_ptr}, "
f"device = {self.device_id}, stream={cp.cuda.get_current_stream()}")
return cp_mem_ptr
class _TorchCUDAMemoryManager(BaseCUDAMemoryManager):
"""
Torch caching memory allocator.
Args:
device_id: The ID (int) of the device on which memory is to be allocated.
logger (logging.Logger): Python Logger object.
"""
def __init__(self, device_id, logger):
"""
__init__(device_id)
"""
self.device_id = device_id
self.logger = logger
def memalloc(self, size):
from torch.cuda import caching_allocator_alloc, caching_allocator_delete, current_stream
device_ptr = caching_allocator_alloc(size, device=self.device_id)
self.logger.debug(f"_TorchCUDAMemoryManager (allocate memory): size = {size}, ptr = {device_ptr}, "
f"device_id = {self.device_id}, stream={current_stream()}")
def create_finalizer():
def finalizer():
caching_allocator_delete(device_ptr)
self.logger.debug(f"_TorchCUDAMemoryManager (release memory): ptr = {device_ptr}")
return finalizer
return MemoryPointer(device_ptr, size, finalizer=create_finalizer())
_MEMORY_MANAGER = {'_raw' : _RawCUDAMemoryManager, 'cupy' : _CupyCUDAMemoryManager, 'torch' : _TorchCUDAMemoryManager}
| cuQuantum-main | python/cuquantum/cutensornet/memory.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from cuquantum.cutensornet.cutensornet import *
from cuquantum.cutensornet.configuration import *
from cuquantum.cutensornet.memory import *
from cuquantum.cutensornet.tensor_network import *
from cuquantum.cutensornet.circuit_converter import *
from cuquantum.cutensornet._internal.utils import get_mpi_comm_pointer
from . import experimental
from . import tensor
| cuQuantum-main | python/cuquantum/cutensornet/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Computational primitives for tensors
"""
__all__ = ['decompose', 'DecompositionOptions', 'QRMethod', 'SVDInfo', 'SVDMethod']
import dataclasses
import logging
import re
from typing import Optional
import numpy
from . import cutensornet as cutn
from .configuration import NetworkOptions
from ._internal import decomposition_utils
from ._internal import utils
DecompositionOptions = dataclasses.make_dataclass("DecompositionOptions", fields=[(field.name, field.type, field) for field in dataclasses.fields(NetworkOptions)], bases=(NetworkOptions,))
DecompositionOptions.__doc__ = re.sub(":class:`cuquantum.Network` object", ":func:`cuquantum.cutensornet.tensor.decompose` and :func:`cuquantum.cutensornet.experimental.contract_decompose` functions", NetworkOptions.__doc__)
def decompose(
subscripts,
operand,
*,
method=None,
options=None,
stream=None,
return_info=False
):
r"""
Perform the tensor decomposition of the operand based on the expression described by ``subscripts``.
The expression adopts a similar notation as Einstein summation (einsum) expression, but in the reversed order.
Meanwhile the input is now a single operand while the output contains two or three tensors.
Unlike einsum expression, the mode labels for all input and output operands must be specified *explicitly*.
See the notes and examples for clarification. See also :ref:`Tensor Decomposition<approximatedTN>`.
Args:
subscripts : The mode labels (subscripts) defining the decomposition as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the rank (dimensionality) of the tensor that
can be specified.
operand : A ndarray-like tensor object. The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
method : Specify decomposition method as a :class:`cuquantum.cutensornet.tensor.QRMethod` or a :class:`cuquantum.cutensornet.tensor.SVDMethod` object.
Alternatively, a `dict` containing the parameters for the ``QRMethod`` or ``SVDMethod`` constructor can also be provided.
If not specified, the value will be set to the default-constructed ``QRMethod``.
Note that both SVD and QR method operate in reduced fashion, similar to ``full_matrices=False`` for ``numpy.linalg.svd`` and ``reduced=True`` for ``numpy.linalg.qr``.
options : Specify the computational options for the decomposition as a :class:`cuquantum.cutensornet.tensor.DecompositionOptions` object.
Alternatively, a `dict` containing the parameters for the ``DecompositionOptions`` constructor can also be provided.
If not specified, the value will be set to the default-constructed ``DecompositionOptions``.
stream: Provide the CUDA stream to use for the decomposition. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
return_info : If true, information about the decomposition will be returned via a :class:`cuquantum.cutensornet.tensor.SVDInfo` object.
Currently this option is only supported for SVD decomposition (which is specified via ``method``).
Returns:
Depending on the decomposition method specified in ``method``, the results returned may vary:
- For QR decomposition (default), the output tensors Q and R (ndarray-like objects) of the same type
and on the same device as the input operand are returned as the result of the decomposition.
- For SVD decomposition, if ``return_info`` is `False`, a 3-tuple of output tensors U, S and V (ndarray-like objects)
of the same type as the input operand are returned as the result of the decomposition. If ``return_info`` is `True`,
a 4-tuple of output tensors U, S, V and a `dict` object that contains information about the decomposition will be returned.
Note, depending on the choice of :attr:`cuquantum.cutensornet.tensor.SVDMethod.partition`, the returned S operand may be `None`.
Also see :attr:`~SVDMethod.partition`.
The decomposition expression adopts a similar notation as einsum expression.
The ``subscripts`` string is a list of subscript labels where each label refers to a mode of the corresponding operand.
The subscript labels are separated by either comma or identifier ``->``.
The subscript labels before the identifier ``->`` are viewed as input, and the ones after are viewed as outputs, respectively.
The requirements on the subscripts for SVD and QR decomposition are summarized below:
- For SVD and QR decomposition, the subscripts string is expected to contain exactly one input and two outputs (the modes for ``s`` is not needed in the case of SVD).
- One and only one identical mode is expected to exist in the two output mode labels.
- When inverted, the decomposition subscript yields a valid einsum subscript that can specify the contraction of the outputs to reproduce the input (modes for ``s`` excluded for SVD).
Examples:
>>> # equivalent:
>>> # q, r = numpy.linalg.qr(a)
>>> q, r = tensor.decompose('ij->ik,kj', a)
>>> # equivalent:
>>> # u, s, v = numpy.linalg.svd(a, full_matrices=False)
>>> u, s, v = tensor.decompose('ij->ik,kj', a, method=tensor.SVDMethod())
For generalization to multi-dimensional tensors (here ``a`` is a rank-4 tensor):
>>> u, s, v = tensor.decompose('ijab->ixb,jax', a, method=tensor.SVDMethod())
>>> # u is unitary
>>> identity = cuquantum.contract('ixb,iyb->xy', u, u.conj())
>>> # re-construct the tensor a by inverting the expression
>>> a_reconstructed = cuquantum.contract('ixb,x,jax->ijab', u, s, v)
**Broadcasting** is supported for certain cases via ellipsis notation.
One may add an ellipsis in the input to represent all the modes that are not explicitly specified in the labels.
The ellipsis *must* also appear in one of the outputs to indicate which output the represented modes will all be partitioned onto.
Example:
Given a rank-6 operand ``a``:
>>> # equivalent:
>>> # q, r = tensor.decompose('ijabcd->ixj,abcdx', a)
>>> q, r = tensor.decompose('ij...->ixj,...x', a)
.. note::
It is encouraged for users to maintain the library handle themselves so as to reduce the context initialization time:
.. code-block:: python
from cuquantum import cutensornet as cutn
from cuquantum.cutensornet.tensor import decompose, QRMethod
handle = cutn.create()
q, r = decompose(..., method=QRMethod(), options={"handle": handle}, ...)
# ... the same handle can be reused for further calls ...
# when it's done, remember to destroy the handle
cutn.destroy(handle)
Below we give more pedagogical examples.
Examples:
Use NumPy operands:
>>> from cuquantum.cutensornet.tensor import decompose, SVDMethod
>>> import numpy as np
>>> T = np.random.random((4,4,6,6))
Perform tensor QR decomposition such that T[i,j,a,b] = Q[i,k,a] R[k,j,b].
The results ``q`` and ``r`` are NumPy ndarrays (with the computation performed on the GPU):
>>> q, r = decompose('ijab->ika,kjb', T)
Perform exact tensor SVD decomposition such that T[i,j,a,b] = U[i,k,a] S[k] V[k,j,b]:
>>> u, s, v = decompose('ijab->ika,kjb', T, method=SVDMethod())
Perform exact tensor SVD decomposition such that T[i,j,a,b] = US[i,k,a] V[k,j,b] where US[i,k,a] represents the product of U[i,k,a] and S[k]:
>>> us, _, v = decompose('ijab->ika,kjb', T, method=SVDMethod(partition="U"))
Perform exact tensor SVD decomposition such that T[i,j,a,b] = U[i,k,a] S[k] V[k,j,b] then normalize the L2 norm of output singular values to 1:
>>> u, s_normalized, v = decompose('ijab->ika,kjb', T, method=SVDMethod(normalization="L2") )
>>> print(np.linalg.norm(s_normalized)) # 1.0
Perform truncated SVD decomposition to keep at most 8 singular values.
Meanwhile, request the runtime information from the SVD truncation with ``return_info=True``.
>>> u, s, v, info = decompose('ijab->ika,kjb', T, method=SVDMethod(max_extent=8), return_info=True)
>>> print(s.shape) # (8,)
>>> print(info)
We can also perform truncated SVD decomposition with all requirements below:
- the number of remaining singular values shall not exceed 8.
- remaining singular values are all larger than 0.01
- remaining singular values are all larger than 0.1 * largest singular values
- the L1 norm of the remaining singular values are normalized to 1.
- the remaining singular values (after truncation and normalization) are equally partitioned onto U and V
>>> method = {"max_extent": 8,
... "abs_cutoff": 0.01,
... "rel_cutoff": 0.1,
... "normalization": "L1",
... "partition": "UV"}
>>> svd_method = SVDMethod(**method)
>>> us, _, sv, info = decompose('ijab->ika,kjb', T, method=svd_method, return_info=True)
Alternatively, the options can be provided as a ``dict`` object:
>>> us, _, sv, info = tensor_decompose('ijab->ika,kjb', T, method=method, return_info=True)
Use CuPy operands. The results ``q`` and ``r`` are CuPy ndarrays on the same device as the input operand, and ``dev`` is any valid
device ID on your system that you wish to use to store the tensors and perform the decomposition:
>>> import cupy
>>> dev = 0
>>> with cupy.cuda.Device(dev):
... T = cupy.random.random((4,4,6,6))
>>> q, r = decompose('ijab->ika,kjb', T)
Use PyTorch operands. The results ``q`` and ``r`` are PyTorch tensors on the same device (``dev``) as the input operand:
.. doctest::
:skipif: torch is None
>>> import torch
>>> dev = 0
>>> T = torch.rand(4,4,6,6, device=f'cuda:{dev}')
>>> q, r = decompose('ijab->ika,kjb', T)
"""
options = utils.check_or_create_options(DecompositionOptions, options, "decomposition options")
logger = logging.getLogger() if options.logger is None else options.logger
logger.info(f"CUDA runtime version = {cutn.get_cudart_version()}")
logger.info(f"cuTensorNet version = {cutn.MAJOR_VER}.{cutn.MINOR_VER}.{cutn.PATCH_VER}")
logger.info("Beginning operands parsing...")
# Infer the correct decomposition method, QRMethod by default
for method_class in (QRMethod, SVDMethod):
try:
method = utils.check_or_create_options(method_class, method, method_class.__name__)
except TypeError:
continue
else:
break
else:
raise ValueError("method must be either a QRMethod/SVDMethod object or a dict that can be used to construct QRMethod/SVDMethod")
# Parse the decomposition expression
wrapped_operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, max_mid_extent = decomposition_utils.parse_decomposition(subscripts, operand)
if len(wrapped_operands) != 1:
raise ValueError(f"only one input operand expected for tensor.decompose, found {len(wrapped_operands)}")
# placeholder to help avoid resource leak
handle = workspace_desc = svd_config = svd_info = None
input_descriptors = output_descriptors = []
try:
# wrap operands to be consistent with options.
# options is a new instance of DecompositionOptions with all entries initialized
wrapped_operands, options, own_handle, operands_location = decomposition_utils.parse_decompose_operands_options(options,
wrapped_operands, allowed_dtype_names=decomposition_utils.DECOMPOSITION_DTYPE_NAMES)
handle = options.handle
if isinstance(method, QRMethod):
mid_extent = max_mid_extent
if return_info:
raise ValueError("``return_info`` is only supported for SVDMethod")
elif isinstance(method, SVDMethod):
mid_extent = max_mid_extent if method.max_extent is None else min(max_mid_extent, method.max_extent)
else:
raise ValueError("method must be either SVDMethod or QRMethod")
# # Create input/output tensor descriptors and empty output operands
package = utils.infer_object_package(wrapped_operands[0].tensor)
stream, stream_ctx, stream_ptr = utils.get_or_create_stream(options.device_id, stream, package)
input_descriptors, output_operands, output_descriptors, s, s_ptr = decomposition_utils.create_operands_and_descriptors(options.handle,
wrapped_operands, size_dict, inputs, outputs, mid_extent, method, options.device_id, stream_ctx, options.logger)
# Create workspace descriptor
workspace_desc = cutn.create_workspace_descriptor(handle)
# Compute required workspace size
if isinstance(method, QRMethod):
logger.debug("Querying QR workspace size...")
cutn.workspace_compute_qr_sizes(handle, *input_descriptors, *output_descriptors, workspace_desc)
elif isinstance(method, SVDMethod):
svd_config = cutn.create_tensor_svd_config(handle)
decomposition_utils.parse_svd_config(handle, svd_config, method, logger)
logger.debug("Querying SVD workspace size...")
cutn.workspace_compute_svd_sizes(handle,
*input_descriptors, *output_descriptors, svd_config, workspace_desc)
else:
ValueError("method must be either a QRMethod/SVDMethod object or a dict that can be used to construct QRMethod/SVDMethod")
# Allocate and set workspace
workspaces = dict()
for mem_space in (cutn.Memspace.DEVICE, cutn.Memspace.HOST):
workspaces[mem_space] = decomposition_utils.allocate_and_set_workspace(handle, options.allocator, workspace_desc,
cutn.WorksizePref.MIN, mem_space, cutn.WorkspaceKind.SCRATCH, options.device_id,
stream, stream_ctx, options.logger, task_name='tensor decomposition')
svd_info_obj = None
# Perform QR/SVD computation
logger.info("Starting tensor decomposition...")
blocking = options.blocking is True or operands_location == 'cpu'
if blocking:
logger.info("This call is blocking and will return only after the operation is complete.")
else:
logger.info("This call is non-blocking and will return immediately after the operation is launched on the device.")
timing = bool(logger and logger.handlers)
if isinstance(method, QRMethod):
with utils.device_ctx(options.device_id), utils.cuda_call_ctx(stream, blocking, timing) as (last_compute_event, elapsed):
cutn.tensor_qr(handle,
*input_descriptors, wrapped_operands[0].data_ptr,
output_descriptors[0], output_operands[0].data_ptr,
output_descriptors[1], output_operands[1].data_ptr,
workspace_desc, stream_ptr)
if elapsed.data is not None:
logger.info(f"The QR decomposition took {elapsed.data:.3f} ms to complete.")
elif isinstance(method, SVDMethod):
svd_info = cutn.create_tensor_svd_info(handle)
with utils.device_ctx(options.device_id), utils.cuda_call_ctx(stream, blocking, timing) as (last_compute_event, elapsed):
cutn.tensor_svd(handle,
*input_descriptors, wrapped_operands[0].data_ptr,
output_descriptors[0], output_operands[0].data_ptr,
s_ptr,
output_descriptors[1], output_operands[1].data_ptr,
svd_config, svd_info,
workspace_desc, stream_ptr)
if elapsed.data is not None:
logger.info(f"The SVD decomposition took {elapsed.data:.3f} ms to complete.")
svd_info_obj = SVDInfo(**decomposition_utils.get_svd_info_dict(handle, svd_info))
# update the operand to reduced_extent if needed
for (wrapped_tensor, tensor_desc) in zip(output_operands, output_descriptors):
wrapped_tensor.reshape_to_match_tensor_descriptor(handle, tensor_desc)
reduced_extent = svd_info_obj.reduced_extent
if s is not None and reduced_extent != mid_extent:
s.tensor = s.tensor[:reduced_extent]
finally:
# when host workspace is allocated, synchronize stream before return
if workspaces[cutn.Memspace.HOST] is not None:
stream.synchronize()
# Free resources
if svd_config is not None:
cutn.destroy_tensor_svd_config(svd_config)
if svd_info is not None:
cutn.destroy_tensor_svd_info(svd_info)
decomposition_utils._destroy_tensor_descriptors(input_descriptors)
decomposition_utils._destroy_tensor_descriptors(output_descriptors)
if workspace_desc is not None:
cutn.destroy_workspace_descriptor(workspace_desc)
# destroy handle if owned
if own_handle and handle is not None:
cutn.destroy(handle)
logger.info(f"All resources for the decomposition are freed.")
left_output, right_output, s = [decomposition_utils.get_return_operand_data(o, operands_location) for o in output_operands + [s, ]]
if isinstance(method, QRMethod):
return left_output, right_output
elif isinstance(method, SVDMethod):
if return_info:
return left_output, s, right_output, svd_info_obj
else:
return left_output, s, right_output
else:
raise NotImplementedError
@dataclasses.dataclass
class QRMethod:
"""A data class for providing QR options to the :func:`cuquantum.cutensornet.tensor.decompose` function."""
pass
@dataclasses.dataclass
class SVDInfo:
"""A data class for holding information regarding SVD truncation at runtime.
Attributes:
full_extent: The total number of singular values after matricization (before truncation).
reduced_extent: The number of remaining singular values after truncation.
discarded_weight: The discarded weight for the truncation.
algorithm: The algorithm used in the SVD execution.
gesvdj_residual: The residual for full gesvdj execution.
gesvdj_sweeps: The number of iterations used in the gesvdj execution.
gesvdp_err_sigma: The error sigma in the gesvdp execution.
.. note::
When the SVD algorithm is set to ``"gesvdr"`` with fixed extent truncation enabled in :class:`cuquantum.cutensornet.tensor.SVDMethod`,
the discarded weight will not be computed.
"""
reduced_extent: int
full_extent: int
discarded_weight: float
algorithm: str
gesvdj_residual: Optional[float] = None
gesvdj_sweeps: Optional[int] = None
gesvdp_err_sigma: Optional[float] = None
def __str__(self):
svd_details = f"Algorithm = {self.algorithm}"
if self.gesvdj_residual is not None:
svd_details += f", residual= {self.gesvdj_residual}"
if self.gesvdj_sweeps is not None:
svd_details += f", sweeps = {self.gesvdj_sweeps}"
if self.gesvdp_err_sigma is not None:
svd_details += f", sigma error = {self.gesvdp_err_sigma}"
s = f"""SVD Information at Runtime:
{svd_details}
Total number of singular values after matricization = {self.full_extent}
Number of singular values after truncation = {self.reduced_extent}
Discarded weight for the truncation = {self.discarded_weight}"""
return s
@dataclasses.dataclass
class SVDMethod:
"""A data class for providing SVD options to the :func:`cuquantum.cutensornet.tensor.decompose` function.
Attributes:
max_extent: Keep no more than the largest ``max_extent`` singular values in the output operands (the rest will be truncated).
abs_cutoff: Singular values below this value will be trimmed in the output operands.
rel_cutoff: Singular values below the product of this value and the largest singular value will be trimmed in the output operands.
partition: Singular values S will be explictly returned by default (``partition=None``).
Alternatively, singular values may be factorized onto output tensor U (``partition="U"``), output tensor V (``partition="V"``) or
equally onto output tensor U and output tensor V (``partition="UV"``). When any of these three partition schemes is selected,
the returned ``S`` operand from :func:`cuquantum.cutensornet.tensor.decompose` and
:func:`cuquantum.cutensornet.experimental.contract_decompose` will be `None`.
normalization: The specified norm of the singular values (after truncation) will be normalized to 1.
Currently supports ``None``, ``"L1"``, ``"L2"`` and ``"LInf"``.
algorithm: The SVD algorithm to use. Currently supports ``"gesvd"`` (default), ``"gesvdj"``, ``"gesvdp"`` and ``"gesvdr"``.
gesvdj_tol: The tolerance to use when ``algorithm`` is set to ``"gesvdj"``. Default 0 denotes machine precision.
gesvdj_max_sweeps: The maximal number of sweeps when ``algorithm`` is set to ``"gesvdj"``. Default 0 denotes 100.
gesvdr_oversampling: The size of oversampling when ``algorithm`` is set to ``"gesvdr"``. Default 0 denotes the lower of 4 times ``max_extent`` and the difference between full rank and ``max_extent``.
gesvdr_niters: The number of iteration of power method when ``algorithm`` is set to ``"gesvdr"`` and the default (0) is 10.
.. note::
For detailed explanation on the different SVD algorithms and the corresponding parameters,
please refer to `cuSolver documentation page <https://docs.nvidia.com/cuda/cusolver/index.html#cusolverdn-dense-lapack-function-reference>`_
.. note::
For truncated SVD, currently at least one singular value will be retained in the output even if the truncation parameters are set to trim out all singular values.
This behavior may be subject to change in a future release.
"""
max_extent: Optional[int] = None
abs_cutoff: Optional[float] = 0.0
rel_cutoff: Optional[float] = 0.0
partition: Optional[str] = None
normalization: Optional[str] = None
algorithm: Optional[str] = 'gesvd'
gesvdj_tol: Optional[float] = 0
gesvdj_max_sweeps: Optional[int] = 0
gesvdr_oversampling: Optional[int] = 0
gesvdr_niters: Optional[int] = 0
def __str__(self):
svd_details = f"Algorithm = {self.algorithm}"
if self.gesvdj_tol is not None:
svd_details += f", tolerance = {self.gesvdj_tol}"
if self.gesvdj_max_sweeps is not None:
svd_details += f", max sweeps = {self.gesvdj_max_sweeps}"
if self.gesvdr_oversampling is not None:
svd_details += f", oversampling = {self.gesvdr_oversampling}"
if self.gesvdr_niters is not None:
svd_details += f", niters = {self.gesvdr_niters}"
s = f"""SVD Method:
{svd_details}
Maxmial number of singular values = {self.max_extent}
Absolute value cutoff = {self.abs_cutoff}
Relative value cutoff = {self.rel_cutoff}
Singular values partition = {self.partition}
Singular values normalization = {self.normalization}"""
return s
def __post_init__(self):
if self.algorithm not in ('gesvd', 'gesvdj', 'gesvdr', 'gesvdp'):
raise ValueError(f"SVD algorithm {self.algorithm} not supported; currently supports gesvd, gesvdj, gesvdr, gesvdp")
if (self.gesvdj_tol !=0 or self.gesvdj_max_sweeps !=0) and self.algorithm != 'gesvdj':
raise ValueError(f"gesvdj_tol and gesvdj_max_sweeps can only be set when algorithm is set to gesvdj, found algorithm {self.algorithm}")
if (self.gesvdr_oversampling !=0 or self.gesvdr_niters !=0) and self.algorithm != 'gesvdr':
raise ValueError(f"gesvdr_oversample and gesvdr_niters can only be set when algorithm is set to gesvdr, found algorithm {self.algorithm}")
def _get_algo_params(self):
initialized = False
if self.algorithm in ('gesvdj', 'gesvdr'):
dtype = cutn.tensor_svd_algo_params_get_dtype(decomposition_utils.SVD_ALGORITHM_MAP[self.algorithm])
algo_params = numpy.zeros(1, dtype=dtype)
for name in dtype.names:
value = getattr(self, f'{self.algorithm}_{name}')
if value != 0:
algo_params[name] = value
initialized = True
if initialized:
return algo_params
else:
return None
| cuQuantum-main | python/cuquantum/cutensornet/tensor.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A converter that translates a quantum circuit to tensor network Einsum equations.
"""
__all__ = ['CircuitToEinsum']
import collections.abc
import importlib
import warnings
import numpy as np
from ._internal import circuit_converter_utils as circ_utils
EMPTY_DICT = circ_utils.EMPTY_DICT
class CircuitToEinsum:
"""
Create a converter object that can generate Einstein summation expressions and tensor operands for a given circuit.
The supported circuit types include :class:`cirq.Circuit` and :class:`qiskit.QuantumCircuit`. The input circuit must
be fully parameterized and can not contain operations that are not well-defined in tensor network simulation, for instance,
resetting the quantum state or performing any intermediate measurement.
Args:
circuit : A fully parameterized :class:`cirq.Circuit` or :class:`qiskit.QuantumCircuit` object.
dtype : The datatype for the output tensor operands. If not specified, double complex is used.
backend: The backend for the output tensor operands. If not specified, ``cupy`` is used.
Notes:
- For :class:`qiskit.QuantumCircuit`, composite gates will be decomposed into either Qiskit standard gates or customized unitary gates.
Examples:
Examples using Qiskit:
>>> import qiskit.circuit.random
>>> from cuquantum import contract, CircuitToEinsum
Generate a random quantum circuit:
>>> qc = qiskit.circuit.random.random_circuit(num_qubits=8, depth=7)
Create a :class:`CircuitToEinsum` object:
>>> converter = CircuitToEinsum(qc, backend='cupy')
Find the Einstein summation expression and tensor operands for the state vector:
>>> expression, operands = converter.state_vector()
Contract the equation above to compute the state vector:
>>> sv = contract(expression, *operands)
>>> print(sv.shape)
(2, 2, 2, 2, 2, 2, 2, 2)
Find the Einstein summation expression and tensor operands for computing the probability amplitude of bitstring 00000000:
>>> expression, operands = converter.amplitude('00000000')
Contract the equation above to compute the amplitude:
>>> amplitude = contract(expression, *operands)
Find the Einstein summation expression and tensor operands for computing reduced density matrix on the
first two qubits with the condition that the last qubit is fixed at state ``1``:
>>> where = qc.qubits[:2]
>>> fixed = {qc.qubits[-1]: '1'}
>>> expression, operands = converter.reduced_density_matrix(where, fixed=fixed)
Contract the equation above to compute the reduced density matrix:
>>> rdm = contract(expression, *operands)
>>> print(rdm.shape)
(2, 2, 2, 2)
"""
def __init__(self, circuit, dtype='complex128', backend='cupy'):
# infer library-specific parser
self.parser = circ_utils.infer_parser(circuit)
circuit = self.parser.remove_measurements(circuit)
self.circuit = circuit
if isinstance(backend, str):
backend = importlib.import_module(backend)
self.backend = backend
if isinstance(dtype, str):
try:
dtype = getattr(backend, dtype)
except AttributeError:
dtype = getattr(backend, np.dtype(dtype).name)
self.dtype = dtype
# unfold circuit metadata
self._qubits, self._gates = self.parser.unfold_circuit(circuit, dtype=self.dtype, backend=self.backend)
self.n_qubits = len(self.qubits)
self._metadata = None
@property
def qubits(self):
"""A sequence of all qubits in the circuit."""
return self._qubits
@property
def gates(self):
"""
A sequence of 2-tuple (``gate_operand``, ``qubits``) representing all gates in the circuit:
Returns:
tuple:
- ``gate_operand``: A ndarray-like tensor object.
The modes of the operands are ordered as ``AB...ab...``, where ``AB...`` denotes all output modes and
``ab...`` denotes all input modes.
- ``qubits``: A list of arrays corresponding to all the qubits and gate tensor operands.
"""
return self._gates
def state_vector(self):
"""
Generate the Einstein summation expression and tensor operands to compute the statevector for the input circuit.
Returns:
The Einstein summation expression and a list of tensor operands. The order of the output mode labels is consistent with :attr:`CircuitToEinsum.qubits`.
For :class:`cirq.Circuit`, this order corresponds to all qubits in the circuit sorted in ascending order.
For :class:`qiskit.QuantumCircuit`, this order is the same as :attr:`qiskit.QuantumCircuit.qubits`.
"""
return self.batched_amplitudes(dict())
def batched_amplitudes(self, fixed):
"""
Generate the Einstein summation expression and tensor operands to compute a batch of bitstring amplitudes for the input circuit.
Args:
fixed: A dictionary that maps certain qubits to the corresponding fixed states 0 or 1.
Returns:
The Einstein summation expression and a list of tensor operands. The order of the output mode labels is consistent with :attr:`CircuitToEinsum.qubits`.
For :class:`cirq.Circuit`, this order corresponds to all qubits in the circuit sorted in ascending order.
For :class:`qiskit.QuantumCircuit`, this order is the same as :attr:`qiskit.QuantumCircuit.qubits`.
"""
if not isinstance(fixed, collections.abc.Mapping):
raise TypeError('fixed must be a dictionary')
input_mode_labels, input_operands, qubits_frontier = self._get_inputs()
fixed_qubits, fixed_bitstring = circ_utils.parse_fixed_qubits(fixed)
fixed_mode_labels = [[qubits_frontier[q]] for q in fixed_qubits]
mode_labels = input_mode_labels + fixed_mode_labels
operands = input_operands + circ_utils.get_bitstring_tensors(fixed_bitstring, dtype=self.dtype, backend=self.backend)
output_mode_labels = [qubits_frontier[q] for q in self.qubits if q not in fixed]
expression = circ_utils.convert_mode_labels_to_expression(mode_labels, output_mode_labels)
return expression, operands
def amplitude(self, bitstring):
"""Generate the Einstein summation expression and tensor operands to compute the probability amplitude of
a bitstring for the input circuit.
Args:
bitstring: A sequence of 0/1 specifying the desired measured state.
The order of the bitstring is expected to be consistent with :attr:`CircuitToEinsum.qubits`.
For :class:`cirq.Circuit`, this order corresponds to all qubits in the circuit sorted in ascending order.
For :class:`qiskit.QuantumCircuit`, this order is the same as :attr:`qiskit.QuantumCircuit.qubits`.
Returns:
The Einstein summation expression and a list of tensor operands
"""
bitstring = circ_utils.parse_bitstring(bitstring, n_qubits=self.n_qubits)
input_mode_labels, input_operands, qubits_frontier = self._get_inputs()
mode_labels = input_mode_labels + [[qubits_frontier[q]] for q in self.qubits]
output_mode_labels = []
expression = circ_utils.convert_mode_labels_to_expression(mode_labels, output_mode_labels)
operands = input_operands + circ_utils.get_bitstring_tensors(bitstring, dtype=self.dtype, backend=self.backend)
return expression, operands
def reduced_density_matrix(self, where, fixed=EMPTY_DICT, lightcone=True):
r"""
reduced_density_matrix(where, fixed=None, lightcone=True)
Generate the Einstein summation expression and tensor operands to compute the reduced density matrix for
the input circuit.
Unitary reverse lightcone cancellation refers to removing the identity formed by a unitary gate (from
the ket state) and its inverse (from the bra state) when there exists no additional operators
in-between. One can take advantage of this technique to reduce the effective network size by
only including the *causal* gates (gates residing in the lightcone).
Args:
where: A sequence of qubits specifying where the density matrix are reduced onto.
fixed: Optional, a dictionary that maps certain qubits to the corresponding fixed states 0 or 1.
lightcone: Whether to apply the unitary reverse lightcone cancellation technique to reduce the number of tensors in density matrix computation.
Returns:
The Einstein summation expression and a list of tensor operands.
The mode labels for output of the expression has the same order as the where argument.
For example, if where = (:math:`a, b`), the mode labels for the reduced density matrix would be (:math:`a, b, a^{\prime}, b^{\prime}`)
.. seealso:: `unitary reverse lightcone cancellation <https://quimb.readthedocs.io/en/latest/tensor-circuit.html#Unitary-Reverse-Lightcone-Cancellation>`_
"""
n_qubits = self.n_qubits
coned_qubits = list(where) + list(fixed.keys())
input_mode_labels, input_operands, qubits_frontier, next_frontier, inverse_gates = self._get_forward_inverse_metadata(lightcone, coned_qubits)
# handle tensors/mode labels for qubits with fixed state
fixed_qubits, fixed_bitstring = circ_utils.parse_fixed_qubits(fixed)
fixed_operands = circ_utils.get_bitstring_tensors(fixed_bitstring, dtype=self.dtype, backend=self.backend)
mode_labels = input_mode_labels + [[qubits_frontier[ix]] for ix in fixed_qubits]
for iqubit in fixed_qubits:
qubits_frontier[iqubit] = next_frontier
mode_labels.append([next_frontier])
next_frontier += 1
operands = input_operands + fixed_operands * 2
output_mode_labels_info = dict()
for iqubit in where:
output_mode_labels_info[iqubit] = [qubits_frontier[iqubit], next_frontier]
qubits_frontier[iqubit] = next_frontier
next_frontier += 1
igate_mode_labels, igate_operands = circ_utils.parse_gates_to_mode_labels_operands(inverse_gates,
qubits_frontier,
next_frontier)
mode_labels += igate_mode_labels
operands += igate_operands
mode_labels += [[qubits_frontier[ix]] for ix in self.qubits]
operands += input_operands[:n_qubits]
output_left_mode_labels = []
output_right_mode_labels = []
for iqubits, (left_mode_labels, right_mode_labels) in output_mode_labels_info.items():
output_left_mode_labels.append(left_mode_labels)
output_right_mode_labels.append(right_mode_labels)
output_mode_labels = output_left_mode_labels + output_right_mode_labels
expression = circ_utils.convert_mode_labels_to_expression(mode_labels, output_mode_labels)
return expression, operands
def expectation(self, pauli_string, lightcone=True):
"""
Generate the Einstein summation expression and tensor operands to compute the expectation value of a Pauli
string for the input circuit.
Unitary reverse lightcone cancellation refers to removing the identity formed by a unitary gate (from
the ket state) and its inverse (from the bra state) when there exists no additional operators
in-between. One can take advantage of this technique to reduce the effective network size by
only including the *causal* gates (gates residing in the lightcone).
Args:
pauli_string: The Pauli string for expectation value computation. It can be:
- a sequence of characters ``'I'``/``'X'``/``'Y'``/``'Z'``. The length must be equal to the number of qubits.
- a dictionary mapping the selected qubits to Pauli characters. Qubits not specified are
assumed to be applied with the identity operator ``'I'``.
lightcone: Whether to apply the unitary reverse lightcone cancellation technique to reduce the number of tensors in expectation value computation.
Returns:
The Einstein summation expression and a list of tensor operands.
.. note::
When ``lightcone=True``, the identity Pauli operators will be omitted in the output operands. The unitary reverse lightcone cancellation technique is then
applied based on the remaining causal qubits to further reduce the size of the network. The reduction effect depends on the circuit topology and the input Pauli string
(so the contraction path cannot be reused for the contraction of different Pauli strings). When ``lightcone=False``, the identity Pauli operators are preserved in the output operands such that the output tensor network has the identical topology for different Pauli strings, and the contraction path only needs to be computed once and can be reused for all Pauli strings.
.. seealso:: `unitary reverse lightcone cancellation <https://quimb.readthedocs.io/en/latest/tensor-circuit.html#Unitary-Reverse-Lightcone-Cancellation>`_
"""
if isinstance(pauli_string, collections.abc.Sequence):
if len(pauli_string) != self.n_qubits:
raise ValueError('pauli_string must be of equal size as the number of qubits in the circuit')
pauli_string = dict(zip(self.qubits, pauli_string))
else:
if not isinstance(pauli_string, collections.abc.Mapping):
raise TypeError('pauli_string must be either a sequence of pauli characters or a dictionary')
n_qubits = self.n_qubits
if lightcone:
pauli_map = {qubit: pauli_char for qubit, pauli_char in pauli_string.items() if pauli_char!='I'}
else:
pauli_map = pauli_string
coned_qubits = pauli_map.keys()
input_mode_labels, input_operands, qubits_frontier, next_frontier, inverse_gates = self._get_forward_inverse_metadata(lightcone, coned_qubits)
pauli_gates = circ_utils.get_pauli_gates(pauli_map, dtype=self.dtype, backend=self.backend)
gates = pauli_gates + inverse_gates
gate_mode_labels, gate_operands = circ_utils.parse_gates_to_mode_labels_operands(gates,
qubits_frontier,
next_frontier)
mode_labels = input_mode_labels + gate_mode_labels + [[qubits_frontier[ix]] for ix in self.qubits]
operands = input_operands + gate_operands + input_operands[:n_qubits]
output_mode_labels = []
expression = circ_utils.convert_mode_labels_to_expression(mode_labels, output_mode_labels)
return expression, operands
def _get_inputs(self):
"""transform the qubits and gates in the circuit to a prelimary Einsum form.
Returns:
metadata: A 3-tuple (``mode_labels``, ``operands``, ``qubits_frontier``):
- ``mode_labels`` : A list of list of int, each corresponding to the mode labels for the tensor operands.
- ``operands`` : A list of arrays corresponding to all the qubits and gate tensor operands.
- ``qubits_frontier`` : A dictionary that maps all qubits to their current mode labels.
"""
if self._metadata is None:
self._metadata = circ_utils.parse_inputs(self.qubits, self._gates, self.dtype, self.backend)
return self._metadata
def _get_forward_inverse_metadata(self, lightcone, coned_qubits):
"""parse the metadata for forward and inverse circuit.
Args:
lightcone: Whether to apply the unitary reverse lightcone cancellation technique to reduce the number of tensors in expectation value computation.
coned_qubits: An iterable of qubits to be coned.
Returns:
tuple: A 5-tuple (``input_mode_labels``, ``input_operands``, ``qubits_frontier``, ``next_frontier``, ``inverse_gates``):
- ``input_mode_labels`` : A sequence of mode labels for initial states and gate tensors.
- ``input_operands`` : A sequence of operands for initial states and gate tensors.
- ``qubits_frontier``: A dictionary mapping all qubits to their current mode labels.
- ``next_frontier``: The next mode label to use.
- ``inverse_gates``: A sequence of (operand, qubits) for the inverse circuit.
"""
parser = self.parser
if lightcone:
circuit = parser.get_lightcone_circuit(self.circuit, coned_qubits)
_, gates = parser.unfold_circuit(circuit, dtype=self.dtype, backend=self.backend)
# in cirq, the lightcone circuit may only contain a subset of the original qubits
# It's imperative to use qubits=self.qubits to generate the input tensors
input_mode_labels, input_operands, qubits_frontier = circ_utils.parse_inputs(self.qubits, gates, self.dtype, self.backend)
else:
circuit = self.circuit
input_mode_labels, input_operands, qubits_frontier = self._get_inputs()
# avoid inplace modification on metadata
qubits_frontier = qubits_frontier.copy()
next_frontier = max(qubits_frontier.values()) + 1
# inverse circuit
inverse_circuit = parser.get_inverse_circuit(circuit)
_, inverse_gates = parser.unfold_circuit(inverse_circuit, dtype=self.dtype, backend=self.backend)
return input_mode_labels, input_operands, qubits_frontier, next_frontier, inverse_gates
| cuQuantum-main | python/cuquantum/cutensornet/circuit_converter.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Formatters for printing data.
"""
import numpy as np
class MemoryStr(object):
"""
A simple type to pretty-print memory-like values.
"""
def __init__(self, memory, base_unit='B'):
self.memory = memory
self.base_unit = base_unit
self.base = 1024
def __str__(self):
"""
Convert large values to powers of 1024 for readability.
"""
base, base_unit, memory = self.base, self.base_unit, self.memory
if memory < base:
value, unit = memory, base_unit
elif memory < base**2:
value, unit = memory/base, f'Ki{base_unit}'
elif memory < base**3:
value, unit = memory/base**2, f'Mi{base_unit}'
else:
value, unit = memory/base**3, f'Gi{base_unit}'
return f"{value:0.2f} {unit}"
def array2string(array_like):
"""
String representation of an array-like object with possible truncation of "interior" values to limit string size.
The NumPy function "set_printoptions" can be used to control the display of the array.
"""
return np.array2string(
np.asanyarray(array_like, dtype=object),
separator=', ',
# NumPy hates empty strings so we print 'None' instead.
formatter={'object': lambda s: s if s != '' else 'None'})
| cuQuantum-main | python/cuquantum/cutensornet/_internal/formatters.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Factories for create options dataclasses, as well as utilities to add docstring to enum classes.
"""
import dataclasses
from enum import IntEnum
import re
from typing import Any, Callable, ClassVar, Dict, Optional
import numpy
def create_options_class_from_enum(options_class_name: str, enum_class: IntEnum, get_attr_dtype: Callable, description: str, filter_re: str =r'(?P<option_name>.*)'):
"""
Create an options dataclass from a Python enum class. Names can be filtered if desired.
Args:
options_class_name: Name of the dataclass that will be created.
enum_class: The IntEnum class that contains the options for the dataclass.
get_attr_dtype: A callable that takes in an enum value as the argument and returns the size in bytes of the cuTensorNet.
filter_re: A re definition that defines the match named 'option_name'.
"""
if r'(?P<option_name>' not in filter_re:
message = """Incorrect re.
The re for the filter must contain the named group 'option_name'."""
raise ValueError(message)
# Helper vars for creating attribute docstring.
doc = f"""A data class for capturing the {description} options.
Attributes:
"""
indent = ' '*8
prefix = determine_enum_prefix(enum_class, '_ATTRIBUTE')
filter_re = re.compile(filter_re)
option_to_enum = dict()
option_to_dtype = dict()
for e in enum_class:
m = filter_re.match(e.name)
if not m:
continue
option_name = m.group('option_name').lower()
option_to_enum[option_name] = e
option_to_dtype[option_name] = get_attr_dtype(e)
# Add docstring for this attribute.
doc += indent + option_name + ':' + f" See `{prefix + '_' + m.group(0)}`.\n"
fields = list()
for option_name, dtype in option_to_dtype.items():
if numpy.issubdtype(dtype, numpy.integer):
field = option_name, Optional[int], dataclasses.field(default=None)
else:
field = option_name, Optional[Any], dataclasses.field(default=None)
fields.append(field)
# Add class attributes.
field = 'option_to_enum', ClassVar[Dict], dataclasses.field(default=option_to_enum)
fields.append(field)
field = 'option_to_dtype', ClassVar[Dict], dataclasses.field(default=option_to_dtype)
fields.append(field)
# Create the options class.
options_class = dataclasses.make_dataclass(options_class_name, fields)
options_class.__doc__ = doc
return options_class
def snake_to_camel(names):
name = ""
for i, sub_name in enumerate(names):
if i == 0:
name += sub_name.lower()
else:
name += sub_name[0].upper() + sub_name[1:]
name += "_t"
return name
def camel_to_snake(name, upper=True):
"""
Convert string from camel case to snake style.
"""
def transformer(m):
prefix = iter(('', '_', ''))
for i in 1, 3, 5:
first, second = i, i + 1
s = next(prefix)
if m.group(second):
if m.group(first): # If the second group doesn't exist, the first won't either by the design of the RE.
s += m.group(first).lower() + '_'
s += m.group(second).lower()
break
return s
name = re.sub(r"^([A-Z]*)([A-Z])|(?<!_)([A-Z]*)([A-Z])|([A-Z]*)([A-Z])", transformer, name)
if upper:
name = name.upper()
return name
def determine_enum_prefix(enum_class, chomp):
"""
This function assumes that the convention used to translate C enumerators to Python enum names holds.
"""
prefix = enum_class.__module__.split('.')[-1].upper()
prefix += '_' + camel_to_snake(enum_class.__name__)
prefix = re.sub(chomp, '', prefix)
return prefix
def add_enum_class_doc(enum_class, chomp):
"""
Add docstring to enum classes.
"""
for e in enum_class:
e.__doc__ = f"See `{determine_enum_prefix(enum_class, chomp) + '_' + e.name.upper()}`."
| cuQuantum-main | python/cuquantum/cutensornet/_internal/enum_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of utility functions for decomposition.
"""
import logging
import numpy
from . import einsum_parser
from . import formatters
from . import tensor_wrapper
from . import typemaps
from . import utils
from .. import cutensornet as cutn
from .. import memory
DECOMPOSITION_DTYPE_NAMES = ('float32', 'float64', 'complex64', 'complex128')
#TODO: auto generate the maps below
PARTITION_MAP = {None: cutn.TensorSVDPartition.NONE,
'U': cutn.TensorSVDPartition.US,
'V': cutn.TensorSVDPartition.SV,
'UV': cutn.TensorSVDPartition.UV_EQUAL}
NORMALIZATION_MAP = {None: cutn.TensorSVDNormalization.NONE,
'L1': cutn.TensorSVDNormalization.L1,
'L2': cutn.TensorSVDNormalization.L2,
'LInf': cutn.TensorSVDNormalization.LINF}
SVD_ALGORITHM_MAP = {'gesvd': cutn.TensorSVDAlgo.GESVD,
'gesvdj': cutn.TensorSVDAlgo.GESVDJ,
'gesvdp': cutn.TensorSVDAlgo.GESVDP,
'gesvdr': cutn.TensorSVDAlgo.GESVDR}
SVD_ALGORITHM_MAP_TO_STRING = dict((val, key) for key, val in SVD_ALGORITHM_MAP.items())
SVD_METHOD_CONFIG_MAP = {'abs_cutoff': cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
'rel_cutoff': cutn.TensorSVDConfigAttribute.REL_CUTOFF,
'partition': cutn.TensorSVDConfigAttribute.S_PARTITION,
'normalization': cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
'algorithm': cutn.TensorSVDConfigAttribute.ALGO}
SVD_INFO_MAP = {'full_extent': cutn.TensorSVDInfoAttribute.FULL_EXTENT,
'reduced_extent': cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
'discarded_weight': cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
'algorithm': cutn.TensorSVDInfoAttribute.ALGO}
def compute_combined_size(size_dict, modes):
"""
Given the modes, compute the product of all extents using information in size_dict.
"""
size = 1
for mode in modes:
size *= size_dict[mode]
return size
def parse_decomposition_subscripts(subscripts):
"""
Parse decomposition expression in string format, retaining ellipses if present.
"""
input_modes, *output_modes = subscripts.split("->")
if not output_modes:
raise ValueError("Output modes must be explicitly specified for decomposition")
if len(output_modes) > 1:
raise ValueError("subscripts must contain only 1 ->")
input_modes = input_modes.split(",")
output_modes = output_modes[0].split(",")
if len(output_modes) != 2:
raise ValueError("subscripts must specify the modes for both left and right tensors")
return input_modes, output_modes
def compute_mid_extent(size_dict, inputs, outputs):
"""
Compute the expected mid extent given a size_dict and the modes for both inputs and outputs.
"""
size_dict = size_dict.copy() # this func will modify it in place
left_output = set(outputs[0])
right_output = set(outputs[1])
shared_mode_out = set(left_output) & set(right_output)
if len(shared_mode_out) !=1:
raise ValueError(f"Expect one shared mode in the output tensors, found {len(shared_mode_out)}")
left_output -= shared_mode_out
right_output -= shared_mode_out
for _input in inputs:
left_extent = right_extent = remaining_extent = 1
left_modes = set()
right_modes = set()
for mode in _input:
extent = size_dict[mode]
if mode in left_output:
left_extent *= extent
left_modes.add(mode)
elif mode in right_output:
right_extent *= extent
right_modes.add(mode)
else:
remaining_extent *= extent
if right_extent * remaining_extent < left_extent:
# update left modes
left_mode_collapsed = left_modes.pop()
size_dict[left_mode_collapsed] = right_extent * remaining_extent
left_output -= left_modes
elif left_extent * remaining_extent < right_extent:
# update right modes
right_mode_collapsed = right_modes.pop()
size_dict[right_mode_collapsed] = left_extent * remaining_extent
right_output -= right_modes
left_extent = compute_combined_size(size_dict, left_output)
right_extent = compute_combined_size(size_dict, right_output)
return min(left_extent, right_extent)
def parse_decomposition(subscripts, *operands):
"""
Parse the generalized decomposition expression in string formats (unicode strings supported).
The modes for the outputs must be specified.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers,
the forward as well as the reverse mode maps, and the largest mid extent expected for the decomposition.
"""
inputs, outputs = parse_decomposition_subscripts(subscripts)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch. The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the decomposition expression."""
raise ValueError(message)
morpher = einsum_parser.select_morpher(False)
# First wrap operands.
operands = tensor_wrapper.wrap_operands(operands)
inputs = list(einsum_parser.parse_single(_input) for _input in inputs)
outputs = list(einsum_parser.parse_single(_output) for _output in outputs)
ellipses_input = any(Ellipsis in _input for _input in inputs)
num_ellipses_output = sum(Ellipsis in _output for _output in outputs)
if num_ellipses_output > 1:
raise ValueError(f"Ellipses found in {num_ellipses_output} output terms, only allowed in one at most.")
if ellipses_input:
if num_input == 1 and num_ellipses_output == 0:
raise ValueError("tensor.decompose does not support reduction operations")
einsum_parser.check_ellipses(inputs+outputs, morpher)
else:
if num_ellipses_output != 0:
raise ValueError("Invalid ellipsis specification. The output terms contain ellipsis while none of the input terms do.")
einsum_parser.check_einsum_with_operands(inputs, operands, morpher)
# Map data to ordinals for cutensornet.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses_input else 0
all_modes, _, mode_map_user_to_ord, mode_map_ord_to_user, label_end = einsum_parser.map_modes(inputs + outputs, None, num_extra_labels, morpher)
mapper = einsum_parser.ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = einsum_parser.select_morpher(False, mapper)
# Replace ellipses with concrete labels
if ellipses_input:
if num_input == 1:
# For tensor.decompose only
n = len(operands[0].shape) - (len(inputs[0]) -1)
else:
num_implicit_modes = set()
for i, o in enumerate(operands):
_input = all_modes[i]
if Ellipsis not in _input:
continue
n = len(o.shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
num_implicit_modes.add(n)
if len(num_implicit_modes) != 1:
#NOTE: Although we can allow ellipsis denoting different number of modes,
# here we disable it due to limited use case if any and potential confusion due to implicit specification.
raise ValueError(f"Ellipsis for all operands must refer to equal number of modes, found {num_implicit_modes}")
n = num_implicit_modes.pop()
ellipses_modes = tuple(range(label_end-n, label_end))
for i, _modes in enumerate(all_modes):
if Ellipsis not in _modes:
continue
s = _modes.index(Ellipsis)
all_modes[i] = _modes[:s] + ellipses_modes + _modes[s+1:]
inputs = all_modes[:num_input]
outputs = all_modes[num_input:]
if num_input == 1:
contracted_modes_output = set(einsum_parser.infer_output_mode_labels(outputs))
if contracted_modes_output != set(inputs[0]):
raise ValueError("The contracted outcome from the right hand side of the expression does not match the input")
# Create mode-extent map based on internal mode numbers.
size_dict = einsum_parser.create_size_dict(inputs, operands)
# Compute the maximally allowed mid extent
mid_extent = compute_mid_extent(size_dict, inputs, outputs)
return operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, mid_extent
def get_svd_config_info_scalar_attr(handle, obj_type, obj, attr, svd_algorithm=None):
"""
Get the data for given attribute of SVDConfig or SVDInfo.
"""
if obj_type == 'config':
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
return None
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_config_get_attribute
elif obj_type == 'info':
if attr != cutn.TensorSVDInfoAttribute.ALGO_STATUS:
dtype = cutn.tensor_svd_info_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDP):
return None
dtype = cutn.tensor_svd_algo_status_get_dtype(svd_algorithm)
getter = cutn.tensor_svd_info_get_attribute
else:
raise ValueError("object type must be either config or info")
data = numpy.empty((1,), dtype=dtype)
getter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
return data
def set_svd_config_scalar_attr(handle, obj, attr, data, svd_algorithm=None):
"""
Set the data for given attribute of SVDConfig.
"""
setter = cutn.tensor_svd_config_set_attribute
if attr != cutn.TensorSVDConfigAttribute.ALGO_PARAMS:
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
else:
if svd_algorithm not in (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR):
raise ValueError(f"Algorithm specific parameters not supported for {svd_algorithm}")
dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
if not isinstance(data, numpy.ndarray):
data = numpy.asarray(data, dtype=dtype)
setter(handle, obj, attr, data.ctypes.data, data.dtype.itemsize)
def parse_svd_config(handle, svd_config, svd_method, logger=None):
"""
Given an SVDMethod object, set the corresponding attributes in the SVDConfig.
"""
svd_algorithm = None
for method_attr, attr in SVD_METHOD_CONFIG_MAP.items():
data = getattr(svd_method, method_attr)
if method_attr == 'partition':
data = PARTITION_MAP[data]
elif method_attr == 'normalization':
data = NORMALIZATION_MAP[data]
elif method_attr == 'algorithm':
svd_algorithm = data = SVD_ALGORITHM_MAP[data]
set_svd_config_scalar_attr(handle, svd_config, attr, data)
if logger is not None:
logger.info(f"The SVDConfig attribute '{method_attr}' has been set to {data}.")
algo_params = svd_method._get_algo_params()
if algo_params is not None:
set_svd_config_scalar_attr(handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params, svd_algorithm=svd_algorithm)
if logger is not None:
logger.info(f"The SVDConfig attribute '{cutn.TensorSVDConfigAttribute.ALGO_PARAMS}' has been set to {algo_params}.")
def get_svd_info_dict(handle, svd_info):
"""
Parse the information in SVDInfo in a dictionary object.
"""
info = dict()
for key, attr in SVD_INFO_MAP.items():
info[key] = get_svd_config_info_scalar_attr(handle, 'info', svd_info, attr).item()
svd_algorithm = info['algorithm']
algo_status = get_svd_config_info_scalar_attr(handle, 'info', svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, svd_algorithm=svd_algorithm)
info['algorithm'] = SVD_ALGORITHM_MAP_TO_STRING[svd_algorithm]
if algo_status is not None:
for name in algo_status.dtype.names:
key = info['algorithm'] + f'_{name}'
info[key] = algo_status[name].item()
return info
def parse_decompose_operands_options(options, wrapped_operands, allowed_dtype_names=None):
"""
Given initially wrapped tensors and network options, wrap the operands to device and create an internal NetworkOptions object.
If cutensornet library handle is not provided in `options`, one will be created in the internal options.
"""
device_id = utils.get_network_device_id(wrapped_operands)
logger = logging.getLogger() if options.logger is None else options.logger
operands_location = 'cuda'
if device_id is None:
operands_location = 'cpu'
device_id = options.device_id
logger.info(f"Begin transferring input data from host to device {device_id}")
wrapped_operands = tensor_wrapper.to(wrapped_operands, device_id)
logger.info("Input data transfer finished")
# initialize handle once if not provided
if options.handle is not None:
own_handle = False
handle = options.handle
else:
own_handle = True
with utils.device_ctx(device_id):
handle = cutn.create()
dtype_name = utils.get_operands_dtype(wrapped_operands)
if allowed_dtype_names is not None and dtype_name not in allowed_dtype_names:
raise ValueError(f"dtype {dtype_name} not supported")
compute_type = options.compute_type if options.compute_type is not None else typemaps.NAME_TO_COMPUTE_TYPE[dtype_name]
package = utils.get_operands_package(wrapped_operands)
allocator = options.allocator if options.allocator is not None else memory._MEMORY_MANAGER[package](device_id, logger)
internal_options = options.__class__(device_id=device_id,
logger=logger,
handle=handle,
blocking=options.blocking,
compute_type=compute_type,
memory_limit=options.memory_limit,
allocator=allocator)
return wrapped_operands, internal_options, own_handle, operands_location
def allocate_and_set_workspace(handle, allocator, workspace_desc, pref, mem_space, workspace_kind, device_id, stream, stream_ctx, logger, task_name=''):
"""
Allocate and set the workspace in the workspace descriptor.
"""
workspace_size = cutn.workspace_get_memory_size(handle, workspace_desc, pref, mem_space, workspace_kind)
# Allocate and set workspace
if mem_space == cutn.Memspace.DEVICE:
with utils.device_ctx(device_id), stream_ctx:
try:
logger.debug(f"Allocating device memory for {task_name}")
workspace_ptr = allocator.memalloc(workspace_size)
except TypeError as e:
message = "The method 'memalloc' in the allocator object must conform to the interface in the "\
"'BaseCUDAMemoryManager' protocol."
raise TypeError(message) from e
logger.debug(f"Finished allocating device memory of size {formatters.MemoryStr(workspace_size)} for decomposition in the context of stream {stream}.")
device_ptr = utils.get_ptr_from_memory_pointer(workspace_ptr)
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, device_ptr, workspace_size)
logger.debug(f"The workspace memory (device pointer = {device_ptr}) has been set in the workspace descriptor.")
return workspace_ptr
elif workspace_size != 0:
# host workspace
logger.debug(f"Allocating host memory for {task_name}")
workspace_host = numpy.empty(workspace_size, dtype=numpy.int8)
logger.debug(f"Finished allocating host memory of size {formatters.MemoryStr(workspace_size)} for decomposition.")
cutn.workspace_set_memory(handle, workspace_desc, mem_space, workspace_kind, workspace_host.ctypes.data, workspace_size)
logger.debug(f"The workspace memory (host pointer = {workspace_host.ctypes.data}) has been set in the workspace descriptor.")
return workspace_host
else:
return None
def _destroy_tensor_descriptors(desc_tensors):
for t in desc_tensors:
if t is not None:
cutn.destroy_tensor_descriptor(t)
def create_operands_and_descriptors(handle, wrapped_operands, size_dict, inputs, outputs, mid_extent, method, device_id, stream_ctx, logger):
"""
Create empty tensor operands and corresponding tensor descriptors for a decomposition problem.
"""
# Create input tensor descriptors, output operands and output tensor descriptors
output_class = wrapped_operands[0].__class__
dtype_name = wrapped_operands[0].dtype
# Compute extents for the outputs
shared_mode_out = list(set(outputs[0]) & set(outputs[1]))[0]
output_extents = [tuple(size_dict[m] if m != shared_mode_out else mid_extent for m in modes) for modes in outputs]
logger.debug("Creating input tensor descriptors.")
input_tensor_descriptors = []
output_tensor_descriptors = []
try:
for (t, modes) in zip(wrapped_operands, inputs):
input_tensor_descriptors.append(t.create_tensor_descriptor(handle, modes))
logger.debug("The input tensor descriptors have been created.")
# Create the output in the context of the current stream to work around a performance issue with CuPy's memory pool.
logger.debug("Beginning output tensors and descriptors creation...")
s = None
s_ptr = 0
output_operands = []
with utils.device_ctx(device_id):
for extent, tensor_modes in zip(output_extents, outputs):
operand = utils.create_empty_tensor(output_class, extent, dtype_name, device_id, stream_ctx)
output_operands.append(operand)
output_tensor_descriptors.append(operand.create_tensor_descriptor(handle, tensor_modes))
if hasattr(method, 'partition') and method.partition is None:
if dtype_name in ['float32', 'complex64']:
s_dtype_name = 'float32'
elif dtype_name in ['float64', 'complex128']:
s_dtype_name = 'float64'
else:
raise ValueError(f"{dtype_name} data type not supported")
s = utils.create_empty_tensor(output_class, (mid_extent, ), s_dtype_name, device_id, stream_ctx)
s_ptr = s.data_ptr
logger.debug("The output tensors and descriptors have been created.")
except:
_destroy_tensor_descriptors(input_tensor_descriptors)
_destroy_tensor_descriptors(output_tensor_descriptors)
raise
return input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr
def get_return_operand_data(tensor, target_location):
"""
Given wrapped tensors, fetch the return operands based on target location.
"""
if tensor is None: # potentially for s
return tensor
if target_location == 'cpu':
return tensor.to('cpu')
else: # already on device
return tensor.tensor
| cuQuantum-main | python/cuquantum/cutensornet/_internal/decomposition_utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface class to encapsulate low-level calls to get or set optimizer information.
"""
__all__ = ['OptimizerInfoInterface']
from collections.abc import Sequence
import itertools
import operator
import numpy as np
from cuquantum import cutensornet as cutn
def _parse_and_map_sliced_modes(sliced_modes, mode_map_user_to_ord, size_dict):
"""
Parse user-provided sliced modes, create and return a contiguous (sliced mode, slide extent) array of
type `cutn.cutensornet.slice_info_pair_dtype`.
"""
num_sliced_modes = len(sliced_modes)
slice_info_array = np.empty((num_sliced_modes,), dtype=cutn.cutensornet.slice_info_pair_dtype)
if num_sliced_modes == 0:
return slice_info_array
# The sliced modes have already passed basic checks when creating the OptimizerOptions dataclass.
pairs = not isinstance(sliced_modes[0], str) and isinstance(sliced_modes[0], Sequence)
if pairs:
sliced_modes, sliced_extents = zip(*sliced_modes)
else:
sliced_extents = (1,)
# Check for invalid mode labels.
invalid_modes = tuple(filter(lambda k: k not in mode_map_user_to_ord, sliced_modes))
if invalid_modes:
message = f"Invalid sliced mode labels: {invalid_modes}"
raise ValueError(message)
slice_info_array["sliced_mode"] = sliced_modes = [mode_map_user_to_ord[m] for m in sliced_modes]
remainder = any(size_dict[m] % e for m, e in itertools.zip_longest(sliced_modes, sliced_extents, fillvalue=1))
if remainder:
raise ValueError("The sliced extents must evenly divide the original extents of the corresponding mode.")
slice_info_array["sliced_extent"] = sliced_extents
return slice_info_array
InfoEnum = cutn.ContractionOptimizerInfoAttribute
class OptimizerInfoInterface:
def __init__(self, network):
"""
"""
self.network = network
get_dtype = cutn.contraction_optimizer_info_get_attribute_dtype
self._flop_count = np.zeros((1,), dtype=get_dtype(InfoEnum.FLOP_COUNT))
self._largest_tensor = np.zeros((1,), dtype=get_dtype(InfoEnum.LARGEST_TENSOR))
self._num_slices = np.zeros((1,), dtype=get_dtype(InfoEnum.NUM_SLICES))
self._num_sliced_modes = np.zeros((1,), dtype=get_dtype(InfoEnum.NUM_SLICED_MODES))
self._slicing_config = np.zeros((1,), dtype=get_dtype(InfoEnum.SLICING_CONFIG))
self._slicing_overhead = np.zeros((1,), dtype=get_dtype(InfoEnum.SLICING_OVERHEAD))
self.num_contraction = len(self.network.operands) - 1
self._path = np.zeros((1,), dtype=get_dtype(InfoEnum.PATH))
@staticmethod
def _get_scalar_attribute(network, name, attribute):
"""
name = cutensornet enum for the attribute
attribute = numpy ndarray object into which the value is stored by cutensornet
"""
assert network.optimizer_info_ptr is not None, "Internal error"
cutn.contraction_optimizer_info_get_attribute(network.handle, network.optimizer_info_ptr, name, attribute.ctypes.data, attribute.dtype.itemsize)
@staticmethod
def _set_scalar_attribute(network, name, attribute, value):
"""
name = cutensornet enum for the attribute
attribute = numpy ndarray object into which the value is stored
value = the value to set the the attribute to
"""
assert network.optimizer_info_ptr is not None, "Internal error"
attribute[0] = value
cutn.contraction_optimizer_info_set_attribute(network.handle, network.optimizer_info_ptr, name, attribute.ctypes.data, attribute.dtype.itemsize)
@property
def num_slices(self):
"""
The number of slices in the network.
"""
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.NUM_SLICES, self._num_slices)
return int(self._num_slices)
@property
def flop_count(self):
"""
The cost of contracting the network.
"""
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.FLOP_COUNT, self._flop_count)
return float(self._flop_count)
@property
def largest_intermediate(self):
"""
The size of the largest intermediate.
"""
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.LARGEST_TENSOR, self._largest_tensor)
return float(self._largest_tensor)
@property
def slicing_overhead(self):
"""
The slicing overhead.
"""
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.SLICING_OVERHEAD, self._slicing_overhead)
return float(self._slicing_overhead)
@property
def path(self):
"""
Return the contraction path in linear format.
"""
path = np.empty((2*self.num_contraction,), dtype=np.int32)
self._path["data"] = path.ctypes.data
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.PATH, self._path)
return list(zip(*[iter(path)]*2))
@path.setter
def path(self, path):
"""
Set the path.
"""
from functools import reduce
get_dtype = cutn.contraction_optimizer_info_get_attribute_dtype
network = self.network
num_contraction = len(path)
if num_contraction != len(network.operands) - 1:
raise ValueError(f"The length of the contraction path ({num_contraction}) must be one less than the number of operands ({len(network.operands)}).")
path = reduce(operator.concat, path)
path_array = np.asarray(path, dtype=np.int32)
# Construct the path type.
path = np.array((num_contraction, path_array.ctypes.data), dtype=get_dtype(InfoEnum.PATH))
# Set the attribute.
OptimizerInfoInterface._set_scalar_attribute(self.network, InfoEnum.PATH, self._path, path)
@property
def num_sliced_modes(self):
"""
The number of sliced modes in the network.
"""
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.NUM_SLICED_MODES, self._num_sliced_modes)
return int(self._num_sliced_modes)
@property
def sliced_mode_extent(self):
"""
Return the sliced modes as a sequence of (sliced mode, sliced extent) pairs.
"""
get_dtype = cutn.contraction_optimizer_info_get_attribute_dtype
network = self.network
num_sliced_modes = self.num_sliced_modes
slice_info_array = np.empty((num_sliced_modes,), dtype=cutn.cutensornet.slice_info_pair_dtype)
slicing_config = self._slicing_config
slicing_config["num_sliced_modes"] = num_sliced_modes
slicing_config["data"] = slice_info_array.ctypes.data
OptimizerInfoInterface._get_scalar_attribute(self.network, InfoEnum.SLICING_CONFIG, slicing_config)
sliced_modes = tuple(network.mode_map_ord_to_user[m] for m in slice_info_array["sliced_mode"]) # Convert to user mode labels
sliced_extents = slice_info_array["sliced_extent"]
return tuple(zip(sliced_modes, sliced_extents))
@sliced_mode_extent.setter
def sliced_mode_extent(self, sliced_modes):
"""
Set the sliced modes (and possibly sliced extent).
sliced_mode = sequence of sliced modes, or sequence of (sliced mode, sliced extent) pairs
"""
get_dtype = cutn.contraction_optimizer_info_get_attribute_dtype
network = self.network
# Construct the slicing config type.
slice_info_array = _parse_and_map_sliced_modes(sliced_modes, network.mode_map_user_to_ord, network.size_dict)
slicing_config = np.array((len(slice_info_array), slice_info_array.ctypes.data), dtype=get_dtype(InfoEnum.SLICING_CONFIG))
# Set the attribute.
OptimizerInfoInterface._set_scalar_attribute(network, InfoEnum.SLICING_CONFIG, self._slicing_config, slicing_config)
@property
def intermediate_modes(self):
"""
Return a sequence of mode labels for all the intermediate tensors.
"""
get_dtype = cutn.contraction_optimizer_info_get_attribute_dtype
network = self.network
num_intermediate_modes = np.zeros((max(1, self.num_contraction),), dtype=get_dtype(InfoEnum.NUM_INTERMEDIATE_MODES)) # Output modes included
size = num_intermediate_modes.nbytes
cutn.contraction_optimizer_info_get_attribute(network.handle, network.optimizer_info_ptr, InfoEnum.NUM_INTERMEDIATE_MODES, num_intermediate_modes.ctypes.data, size)
intermediate_modes = np.zeros((num_intermediate_modes.sum(),), dtype=get_dtype(InfoEnum.INTERMEDIATE_MODES))
size = intermediate_modes.nbytes
cutn.contraction_optimizer_info_get_attribute(network.handle, network.optimizer_info_ptr, InfoEnum.INTERMEDIATE_MODES, intermediate_modes.ctypes.data, size)
count, out = 0, list()
mode_type = tuple if network.is_interleaved else ''.join
for n in num_intermediate_modes:
out.append(mode_type(map(lambda m: network.mode_map_ord_to_user[m], intermediate_modes[count:count+n]))) # Convert to user mode labels
count += n
assert count == num_intermediate_modes.sum()
return tuple(out)
| cuQuantum-main | python/cuquantum/cutensornet/_internal/optimizer_ifc.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to CuPy operations.
"""
__all__ = ['CupyPackage']
import cupy as cp
from . import utils
from .package_ifc import Package
class CupyPackage(Package):
@staticmethod
def get_current_stream(device_id):
with utils.device_ctx(device_id):
stream = cp.cuda.get_current_stream()
return stream
@staticmethod
def to_stream_pointer(stream):
return stream.ptr
@staticmethod
def to_stream_context(stream):
return stream
@staticmethod
def create_external_stream(device_id, stream_ptr):
return cp.cuda.ExternalStream(stream_ptr)
@staticmethod
def create_stream(device_id):
with utils.device_ctx(device_id):
stream = cp.cuda.Stream(null=False, non_blocking=False, ptds=False)
return stream
| cuQuantum-main | python/cuquantum/cutensornet/_internal/package_ifc_cupy.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
An abstract interface to certain package-provided operations.
"""
__all__ = ['Package']
from abc import ABC, abstractmethod
class Package(ABC):
@staticmethod
@abstractmethod
def get_current_stream(device_id):
"""
Obtain the current stream on the device.
Args:
device_id: The id (ordinal) of the device.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_stream_pointer(stream):
"""
Obtain the stream pointer.
Args:
stream: The stream object.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def to_stream_context(stream):
"""
Create a context manager from the stream.
Args:
stream: The stream object.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def create_external_stream(device_id, stream_ptr):
"""
Wrap a stream pointer into an external stream object.
Args:
device_id: The id (ordinal) of the device.
stream: The stream pointer (int) to be wrapped.
"""
raise NotImplementedError
@staticmethod
@abstractmethod
def create_stream(device_id):
"""
Create a new stream on the specified device.
Args:
device_id: The id (ordinal) of the device.
"""
raise NotImplementedError
| cuQuantum-main | python/cuquantum/cutensornet/_internal/package_ifc.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to seamlessly use Cupy ndarray objects.
"""
__all__ = ['CupyTensor']
import cupy
import numpy
from . import utils
from .tensor_ifc import Tensor
from .. import cutensornet as cutn
class CupyTensor(Tensor):
"""
Tensor wrapper for cupy ndarrays.
"""
name = 'cupy'
module = cupy
name_to_dtype = Tensor.create_name_dtype_map(conversion_function=lambda name: cupy.dtype(name), exception_type=TypeError)
def __init__(self, tensor):
super().__init__(tensor)
@property
def data_ptr(self):
return self.tensor.data.ptr
@property
def device(self):
return 'cuda'
@property
def device_id(self):
return self.tensor.device.id
@property
def dtype(self):
"""Name of the data type"""
return self.tensor.dtype.name
@property
def shape(self):
return tuple(self.tensor.shape)
@property
def strides(self):
return tuple(stride_in_bytes // self.tensor.itemsize for stride_in_bytes in self.tensor.strides)
def numpy(self):
return self.tensor.get()
@classmethod
def empty(cls, shape, **context):
"""
Create an empty tensor of the specified shape and data type.
"""
name = context.get('dtype', 'float32')
dtype = CupyTensor.name_to_dtype[name]
device = context.get('device', None)
if isinstance(device, cupy.cuda.Device):
device_id = device.id
elif isinstance(device, int):
device_id = device
else:
raise ValueError(f"The device must be specified as an integer or cupy.cuda.Device instance, not '{device}'.")
with utils.device_ctx(device_id):
tensor = cupy.empty(shape, dtype=dtype)
return tensor
def to(self, device='cpu'):
"""
Create a copy of the tensor on the specified device (integer or
'cpu'). Copy to Numpy ndarray if CPU, otherwise return Cupy type.
"""
if device == 'cpu':
return self.numpy()
if not isinstance(device, int):
raise ValueError(f"The device must be specified as an integer or 'cpu', not '{device}'.")
with utils.device_ctx(device):
tensor_device = cupy.asarray(self.tensor)
return tensor_device
def copy_(self, src):
"""
Inplace copy of src (copy the data from src into self).
"""
cupy.copyto(self.tensor, src)
def istensor(self):
"""
Check if the object is ndarray-like.
"""
return isinstance(self.tensor, cupy.ndarray)
def reshape_to_match_tensor_descriptor(self, handle, desc_tensor):
_, _, extents, strides = cutn.get_tensor_details(handle, desc_tensor)
if tuple(extents) != self.shape:
strides = [i * self.tensor.itemsize for i in strides]
self.tensor = cupy.ndarray(extents, dtype=self.tensor.dtype, memptr=self.tensor.data, strides=strides)
| cuQuantum-main | python/cuquantum/cutensornet/_internal/tensor_ifc_cupy.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Memory specification regular expression.
"""
__all__ = ['MEM_LIMIT_RE_PCT', 'MEM_LIMIT_RE_VAL', 'MEM_LIMIT_DOC']
import re
MEM_LIMIT_RE_PCT = re.compile(r"(?P<value>[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)\s*%\s*$")
MEM_LIMIT_RE_VAL = re.compile(r"(?P<value>[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)\s*(?P<units>[kmg])?(?P<binary>(?<=[kmg])i)?b\s*$", re.IGNORECASE)
MEM_LIMIT_DOC = """The memory limit must be specified in one of the following forms:
(1) A number (int or float). If the number is between 0 and 1, the memory limit is interpreted as a fraction of the
total device memory.
Examples: 0.75, 50E6, 50000000, ...
(2) A string containing a value followed by B, kB, MB, or GB for powers of 1000.
Examples: "0.05 GB", "50 MB", "50000000 B" ...
(3) A string containing a value followed by kiB, MiB, or GiB for powers of 1024.
Examples: "0.05 GB", "51.2 MB", "53687091 B" ...
(4) A string with value in the range (0, 100] followed by a %% symbol.
Examples: "26%%", "82%%", ...
Whitespace between values and units is optional.
The provided memory limit is "%s".
"""
| cuQuantum-main | python/cuquantum/cutensornet/_internal/mem_limit.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
| cuQuantum-main | python/cuquantum/cutensornet/_internal/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to seamlessly use Numpy ndarray objects.
"""
__all__ = ['NumpyTensor']
import cupy
import numpy
from . import utils
from .tensor_ifc import Tensor
class NumpyTensor(Tensor):
"""
Tensor wrapper for numpy ndarrays.
"""
name = 'numpy'
module = numpy
name_to_dtype = Tensor.create_name_dtype_map(conversion_function=lambda name: numpy.dtype(name), exception_type=TypeError)
def __init__(self, tensor):
super().__init__(tensor)
@property
def data_ptr(self):
return self.tensor.ctypes.data
@property
def device(self):
return 'cpu'
@property
def device_id(self):
return None
@property
def dtype(self):
"""Name of the data type"""
return self.tensor.dtype.name
@property
def shape(self):
return tuple(self.tensor.shape)
@property
def strides(self):
return tuple(stride_in_bytes // self.tensor.itemsize for stride_in_bytes in self.tensor.strides)
def numpy(self):
return self.tensor
@classmethod
def empty(cls, shape, **context):
"""
Create an empty tensor of the specified shape and data type.
"""
name = context.get('dtype', 'float32')
dtype = NumpyTensor.name_to_dtype[name]
return cls(module.empty(shape, dtype=dtype))
def to(self, device='cpu'):
"""
Create a copy of the tensor on the specified device (integer or
'cpu'). Copy to Cupy ndarray on the specified device if it
is not CPU. Otherwise, return self.
"""
if device == 'cpu':
return self
if not isinstance(device, int):
raise ValueError(f"The device must be specified as an integer or 'cpu', not '{device}'.")
with utils.device_ctx(device):
tensor_device = cupy.asarray(self.tensor)
return tensor_device
def istensor(self):
"""
Check if the object is ndarray-like.
"""
return isinstance(self.tensor, numpy.ndarray)
def reshape_to_match_tensor_descriptor(self, handle, desc_tensor):
#NOTE: this method is only called for CupyTensor and TorchTensor
raise NotImplementedError | cuQuantum-main | python/cuquantum/cutensornet/_internal/tensor_ifc_numpy.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from qiskit import QuantumCircuit
from qiskit.circuit import Barrier, ControlledGate, Delay, Gate, Measure
from qiskit.extensions import UnitaryGate
from .tensor_wrapper import _get_backend_asarray_func
def remove_measurements(circuit):
"""
Return a circuit with final measurement operations removed.
"""
circuit = circuit.copy()
circuit.remove_final_measurements()
for operation, _, _ in circuit:
if isinstance(operation, Measure):
raise ValueError('mid-circuit measurement not supported in tensor network simulation')
return circuit
def get_inverse_circuit(circuit):
"""
Return a circuit with all gate operations inversed.
"""
return circuit.inverse()
def get_decomposed_gates(circuit, qubit_map=None, gates=None, gate_process_func=None, global_phase=0):
"""
Return the gate sequence for the given circuit. Compound gates/instructions will be decomposed
to either standard gates or customized unitary gates.
"""
if gates is None:
gates = []
global_phase += circuit.global_phase
for operation, gate_qubits, _ in circuit:
if qubit_map:
gate_qubits = [qubit_map[q] for q in gate_qubits]
if isinstance(operation, Gate):
if 'standard_gate' in str(type(operation)) or isinstance(operation, UnitaryGate):
if callable(gate_process_func):
gates.append(gate_process_func(operation, gate_qubits))
else:
gates.append((operation, gate_qubits))
continue
else:
if isinstance(operation, (Barrier, Delay)):
# no physical meaning in tensor network simulation
continue
elif not isinstance(operation.definition, QuantumCircuit):
# Instruction as composite gate
raise ValueError(f'operation type {type(operation)} not supported')
# for composite gate, must provide a map from the sub circuit to the original circuit
next_qubit_map = dict(zip(operation.definition.qubits, gate_qubits))
gates, global_phase = get_decomposed_gates(operation.definition, qubit_map=next_qubit_map, gates=gates, gate_process_func=gate_process_func, global_phase=global_phase)
return gates, global_phase
def unfold_circuit(circuit, dtype='complex128', backend=cp):
"""
Unfold the circuit to obtain the qubits and all gate tensors. All :class:`qiskit.circuit.Gate` and
:class:`qiskit.circuit.Instruction` in the circuit will be decomposed into either standard gates or customized unitary gates.
Barrier and delay operations will be discarded.
Args:
circuit: A :class:`qiskit.QuantumCircuit` object. All parameters in the circuit must be binded.
dtype: Data type for the tensor operands.
backend: The package the tensor operands belong to.
Returns:
All qubits and gate operations from the input circuit
"""
asarray = _get_backend_asarray_func(backend)
qubits = circuit.qubits
def gate_process_func(operation, gate_qubits):
tensor = operation.to_matrix().reshape((2,2)*len(gate_qubits))
tensor = asarray(tensor, dtype=dtype)
# in qiskit notation, qubits are labelled in the inverse order
return tensor, gate_qubits[::-1]
gates, global_phase = get_decomposed_gates(circuit, gate_process_func=gate_process_func, global_phase=0)
if global_phase != 0:
phase = np.exp(1j*global_phase)
phase_gate = asarray([[phase, 0], [0, phase]], dtype=dtype)
gates = [(phase_gate, qubits[:1]), ] + gates
return qubits, gates
def get_lightcone_circuit(circuit, coned_qubits):
"""
Use unitary reversed lightcone cancellation technique to reduce the effective circuit size based on the qubits to be coned.
Args:
circuit: A :class:`qiskit.QuantumCircuit` object.
coned_qubits: An iterable of qubits to be coned.
Returns:
A :class:`qiskit.QuantumCircuit` object that potentially contains less number of gates
"""
coned_qubits = set(coned_qubits)
gates, global_phase = get_decomposed_gates(circuit)
newqc = QuantumCircuit(circuit.qubits)
ix = len(gates)
tail_operations = []
while len(coned_qubits) != circuit.num_qubits and ix>0:
ix -= 1
operation, gate_qubits = gates[ix]
qubit_set = set(gate_qubits)
if qubit_set & coned_qubits:
tail_operations.append([operation, gate_qubits])
coned_qubits |= qubit_set
for operation, gate_qubits in gates[:ix] + tail_operations[::-1]:
newqc.append(operation, gate_qubits)
newqc.global_phase = global_phase
return newqc
| cuQuantum-main | python/cuquantum/cutensornet/_internal/circuit_parser_utils_qiskit.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Entry point to using tensors from different libraries seamlessly.
"""
__all__ = [ 'infer_tensor_package', 'wrap_operands', 'wrap_operands', 'to', 'copy_']
import functools
import numpy as np
from . import formatters
from .tensor_ifc_cupy import CupyTensor
from .tensor_ifc_numpy import NumpyTensor
_TENSOR_TYPES = {
'cupy': CupyTensor,
'numpy': NumpyTensor
}
# Optional modules
try:
import torch
from .tensor_ifc_torch import TorchTensor
_TENSOR_TYPES['torch'] = TorchTensor
torch_asarray = functools.partial(torch.as_tensor, device='cuda')
except ImportError as e:
torch = None
torch_asarray = None
_SUPPORTED_PACKAGES = tuple(_TENSOR_TYPES.keys())
def infer_tensor_package(tensor):
"""
Infer the package that defines this tensor.
"""
if issubclass(tensor.__class__, np.ndarray):
return 'numpy'
module = tensor.__class__.__module__
return module.split('.')[0]
def _get_backend_asarray_func(backend):
"""
Infer the package that defines this tensor.
"""
if backend is torch:
return torch_asarray
else:
return backend.asarray
def wrap_operand(native_operand):
"""
Wrap one "native" operand so that package-agnostic API can be used.
"""
wrapped_operand = _TENSOR_TYPES[infer_tensor_package(native_operand)](native_operand)
return wrapped_operand
def check_valid_package(native_operands):
"""
Check if the operands belong to one of the supported packages.
"""
operands_pkg = [infer_tensor_package(o) for o in native_operands]
checks = [p in _SUPPORTED_PACKAGES for p in operands_pkg]
if not all(checks):
unknown = [f"{location}: {operands_pkg[location]}" for location, predicate in enumerate(checks) if predicate is False]
unknown = formatters.array2string(unknown)
message = f"""The operands should be ndarray-like objects from one of {_SUPPORTED_PACKAGES} packages.
The unsupported operands as a sequence of "position: package" is: \n{unknown}"""
raise ValueError(message)
return operands_pkg
def check_valid_operand_type(wrapped_operands):
"""
Check if the wrapped operands are ndarray-like.
"""
istensor = [o.istensor() for o in wrapped_operands]
if not all(istensor):
unknown = [f"{location}: {type(wrapped_operands[location].tensor)}"
for location, predicate in enumerate(istensor) if predicate is False]
unknown = formatters.array2string(unknown)
message = f"""The operands should be ndarray-like objects from one of {_SUPPORTED_PACKAGES} packages.
The unsupported operands as a sequence of "position: type" is: \n{unknown}"""
raise ValueError(message)
def wrap_operands(native_operands):
"""
Wrap the "native" operands so that package-agnostic API can be used.
"""
operands_pkg = check_valid_package(native_operands)
wrapped_operands = tuple(_TENSOR_TYPES[operands_pkg[i]](o) for i, o in enumerate(native_operands))
check_valid_operand_type(wrapped_operands)
return wrapped_operands
def to(operands, device):
"""
Copy the wrapped operands to the specified device ('cpu' or int) and return the
wrapped operands on the device.
"""
operands = tuple(o.to(device) for o in operands)
return wrap_operands(operands)
def copy_(src, dest):
"""
Copy the wrapped operands in dest to the corresponding wrapped operands in src.
"""
for s, d in zip(src, dest):
if s.device_id is None:
s = wrap_operand(s.to(d.device_id))
d.copy_(s.tensor)
| cuQuantum-main | python/cuquantum/cutensornet/_internal/tensor_wrapper.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of functions for parsing Einsum expressions.
"""
from collections import Counter
from itertools import chain
import re
import sys
import numpy as np
from . import formatters
from .tensor_wrapper import wrap_operands
from ...utils import WHITESPACE_UNICODE
DISALLOWED_LABELS = set(['.', '-', '>'])
native_to_str = lambda native : "'" + ''.join(s if s is not Ellipsis else '...' for s in native) + "'"
def select_morpher(interleaved, mapper=None):
"""
Select appropriate function for mode label representation based on string or interleaved format.
"""
if mapper is None:
return (lambda s : tuple(s)) if interleaved else native_to_str
return (lambda s : tuple(mapper(s))) if interleaved else lambda s : native_to_str(mapper(s))
class ModeLabelMapper(object):
"""
Map mode labels, with special treatment for Ellipsis characters.
"""
def __init__(self, _map):
"""
Args:
_map = dict-like object to map mode labels.
"""
self._map = _map
def __call__(self, sequence):
return tuple(s if s is Ellipsis else self._map[s] for s in sequence)
def parse_single(single):
"""
Parse single operand mode labels considering ellipsis. Leading or trailing whitespace, if present, is removed.
"""
whitespace = WHITESPACE_UNICODE
subexpr = single.strip(whitespace).split('...')
n = len(subexpr)
expr = [[Ellipsis]] * (2*n - 1)
expr[::2] = subexpr
return tuple(chain(*expr))
def check_single(single):
"""
Check for disallowed characters used as mode labels for a single operand.
"""
whitespace = WHITESPACE_UNICODE
for s in single:
if s is Ellipsis:
continue
if s in whitespace or s in DISALLOWED_LABELS:
return False
return True
def parse_einsum_str(expr):
"""
Parse einsum expression in string format, retaining ellipses if present.
Return operand as well as output mode labels if explicit form or None for implicit form.
"""
inputs, output, *rest = expr.split('->') if "->" in expr else (expr, None)
if rest:
raise ValueError("""Invalid expression.
It is not permitted to specify more than one '->' in the Einstein summation expression.""")
inputs = list(parse_single(_input) for _input in inputs.split(","))
if output is not None:
output = parse_single(output)
checks = [check_single(_input) for _input in inputs]
if not all(checks):
incorrect = [f"{location}: {native_to_str(inputs[location])}"
for location, predicate in enumerate(checks) if predicate is False]
incorrect = formatters.array2string(incorrect)
message = f"""Incorrect term.
Whitespace characters and characters from the set {DISALLOWED_LABELS} cannot be used as mode labels in a summation expression.
The incorrectly specified terms as a sequence of "position: term" are: \n{incorrect}"""
raise ValueError(message)
return inputs, output
def parse_einsum_interleaved(operand_sublists):
"""
Parse einsum expression in interleaved format, retaining ellipses if present.
Return operands as well as output mode labels if explicit form or None for implicit form.
"""
inputs = list()
operands = list()
N = len(operand_sublists) // 2
for i in range(N):
operands.append(operand_sublists[2*i])
_input = operand_sublists[2*i + 1]
if isinstance(_input, str):
[_input], _ = parse_einsum_str(_input)
inputs.append(_input)
N = len(operand_sublists)
output = operand_sublists[N-1] if N % 2 == 1 else None
if isinstance(output, str):
[output], _ = parse_einsum_str(output)
return operands, inputs, output
def check_ellipses(user_inputs, morpher):
"""
Check ellipsis specification for validity.
Args:
user_inputs: Einsum expression in "neutral format" (sequence of sequences) before mapping.
morpher: A callable that transforms a term in neutral format (sequence) to string or interleaved format.
"""
checks = [user_input.count(Ellipsis) <= 1 for user_input in user_inputs]
if not all(checks):
incorrect = [f"{location}: {morpher(user_inputs[location])}"
for location, predicate in enumerate(checks) if predicate is False]
incorrect = formatters.array2string(incorrect)
message = f"""Incorrect ellipsis use.
There must not be more than one ellipsis present in each term.
The incorrectly specified terms as a sequence of "position: term" are: \n{incorrect}"""
raise ValueError(message)
def check_einsum_with_operands(user_inputs, operands, morpher):
"""
Check that the number of modes in each Einsum term is consistent with the shape of the corresponding operand.
Args:
operands: Wrapped operands.
user_inputs: Einsum expression in "neutral format" (sequence of sequences) before mapping.
morpher: A callable that transforms a term in neutral format (sequence) to string or interleaved format.
"""
checks = [len(i) - 1 <= len(o.shape) if Ellipsis in i else len(i) == len(o.shape) for i, o in zip(user_inputs, operands)]
if not all(checks):
mismatch = [f"{location}: {morpher(user_inputs[location])} <=> {operands[location].shape}"
for location, predicate in enumerate(checks) if predicate is False]
mismatch = formatters.array2string(mismatch)
message = f"""Term-operand shape mismatch.
The number of mode labels in each term of the expression must match the shape of the corresponding operand.
The mismatch as a sequence of "position: mode labels in term <=> operand shape" is: \n{mismatch}"""
raise ValueError(message)
def map_modes(user_inputs, user_output, num_extra_labels, morpher):
"""
Map modes in user-defined inputs and output to ordinals, leaving ellipsis for later processing. Create extra mode labels
in anticipation of ellipsis replacement. Create the forward as well as inverse maps.
Args:
user_inputs: Einsum expression in "neutral format" (sequence of sequences) before mapping.
user_output: The output mode labels before mapping as a sequence or None.
num_extra_labels: The number of extra mode labels to generate to use in ellipsis expansion later.
morpher: A callable that transforms a term in neutral format (sequence) to string or interleaved format.
Returns:
tuple: A 5-tuple containing (mapped input, mapped output, forward map, reverse map, largest label).
"""
ordinal = 0
mode_map_user_to_ord = dict()
for modes in user_inputs:
for mode in modes:
if mode not in mode_map_user_to_ord:
mode_map_user_to_ord[mode] = ordinal
ordinal += 1
mode_map_user_to_ord.update((f'__{i}__', i) for i in range(ordinal, ordinal+num_extra_labels))
label_end = ordinal + num_extra_labels
mode_map_ord_to_user = {v : k for k, v in mode_map_user_to_ord.items()}
inputs = list(tuple(m if m is Ellipsis else mode_map_user_to_ord[m] for m in modes) for modes in user_inputs)
output = None
if user_output is not None:
extra = set(user_output) - set(mode_map_user_to_ord.keys()) - set([Ellipsis])
if extra:
output_modes = morpher(user_output)
message = f"""Extra modes in output.
The specified output modes {output_modes} contain the extra modes: {extra}"""
raise ValueError(message)
output = tuple(m if m is Ellipsis else mode_map_user_to_ord[m] for m in user_output)
return inputs, output, mode_map_user_to_ord, mode_map_ord_to_user, label_end
def create_size_dict(inputs, operands):
"""
Create size dictionary (mode label to extent map) capturing the extent of each mode.
Args:
inputs: Einsum expression in "neutral format" (sequence of sequences) after relabelling modes.
operands: Wrapped operands.
Returns:
size_dict: size dictionary.
"""
size_dict = dict()
for i, _input in enumerate(inputs):
for m, mode in enumerate(_input):
shape = operands[i].shape
if mode in size_dict:
if size_dict[mode] == 1: # Handle broadcasting
size_dict[mode] = shape[m]
elif size_dict[mode] != shape[m] and shape[m] != 1:
message = f"""Extent mismatch.
The extent ({shape[m]}) of mode {m} for operand {i} does not match the extent ({size_dict[mode]}) of the same mode found
in previous operand(s)."""
raise ValueError(message)
else:
size_dict[mode] = shape[m]
return size_dict
def infer_output_mode_labels(inputs, mode_map_ord_to_user=None):
"""
Infer output mode labels (those that appear exactly once).
Args:
inputs: Einsum expression in "neutral format" (sequence of sequences). If `mode_map_ord_to_user` is provided, the
mode labels correspond to ordinals, otherwise they correspond to user labels.
mode_map_ord_to_user: the map from ordinals to user labels.
"""
mode_label_freq = Counter(chain(*inputs))
del mode_label_freq[Ellipsis]
key = None if mode_map_ord_to_user is None else lambda m: mode_map_ord_to_user[m]
return tuple(sorted((m for m, c in mode_label_freq.items() if c == 1), key=key))
def process_ellipses(inputs, output, operands, label_end, mode_map_ord_to_user, mapping_morpher):
"""
Replace ellipses by generated mode labels, using 'label_end' and aligning shapes from the right. Infer or update
output mode labels.
Args:
inputs: Einsum expression in "neutral format" (sequence of sequences) after relabelling modes.
output: The output mode labels after relabelling as a sequence or None.
operands: Wrapped operands.
label_end: One past the largest mode label (int), including modes resulting from Ellipsis expansion.
mode_map_ord_to_user: the map from ordinals to user labels.
mapping_morpher: A callable that transforms a term in neutral format (sequence) to string or interleaved format,
while converting internal labels to user labels.
Returns:
tuple: a 2-tuple (inputs, output) after ellipsis expansion and inferring output mode labels if needed.
"""
inferred = False
if output is None:
output = infer_output_mode_labels(inputs, mode_map_ord_to_user)
inferred = True
shortest, longest = label_end, 0
for i, _input in enumerate(inputs):
if Ellipsis not in _input:
continue
n = len(operands[i].shape) - (len(_input) - 1)
assert n >= 0, "Internal error"
s = _input.index(Ellipsis)
shortest, longest = min(shortest, n), max(longest, n)
inputs[i] = _input[:s] + tuple(range(label_end-n, label_end)) + _input[s+1:]
if not inferred:
count = output.count(Ellipsis)
if count > 1:
message = f"""Incorrect ellipsis use.
The output term cannot have more than one ellipsis. Specified term = {mapping_morpher(output)}"""
raise ValueError(message)
if count == 1: # Replace ellipsis by the longest sequence of labels.
s = output.index(Ellipsis)
output = output[:s] + tuple(range(label_end-longest, label_end)) + output[s+1:]
else: # If all ellipses expand to the same number of mode labels, the latter are reduced.
if shortest != longest:
message = f"""Ellipsis length mismatch for reduction.
The ellipses specified in the expression do not expand to the same number of mode labels and thus cannot be reduced. The
expanded number of dimensions ranges from {shortest} to {longest}."""
raise ValueError(message)
else: # The mode labels corresponding to ellipsis expansion followed by the inferred mode labels.
output = tuple(range(label_end-longest, label_end)) + output
return inputs, output
def parse_einsum(*operands):
"""
Parse the generalized Einstein summation expression in both string and interleaved formats. Any hashable and comparable
object is accepted in the interleaved format for mode label specification, and unicode strings are accepted. If the
output is not provided (implicit form or missing output sublist), it will be inferred from the expression.
Returns wrapped operands, mapped inputs and output, size dictionary based on internal mode numbers, and the forward as
well as the reverse mode maps.
"""
# Parse einsum keeping ellipses.
interleaved = False
if isinstance(operands[0], str):
inputs, output = parse_einsum_str(operands[0])
operands = operands[1:]
else:
interleaved = True
operands, inputs, output = parse_einsum_interleaved(operands)
num_operand, num_input = len(operands), len(inputs)
if num_operand != num_input:
message = f"""Operand-term mismatch.
The number of operands ({num_operand}) must match the number of inputs ({num_input}) specified in the Einsum expression."""
raise ValueError(message)
morpher = select_morpher(interleaved)
if num_operand < 1:
message = "The network must consist of at least one tensor."
raise ValueError(message)
# First wrap operands.
operands = wrap_operands(operands)
# Preliminary checks, before mode label remapping.
ellipses = any(Ellipsis in _input for _input in inputs)
# Ensure at most one ellipsis per operand.
if ellipses:
check_ellipses(inputs, morpher)
# Ensure that ellipsis is not present only in the output.
if not ellipses and output is not None and Ellipsis in output:
message = f"""Invalid ellipsis specification.
The output term {morpher(output)} contains ellipsis while none of the input terms do."""
raise ValueError(message)
# Ensure that the number of modes is consistent with the operand shape.
check_einsum_with_operands(inputs, operands, morpher)
# Calculate the maximum number of extra mode labels that will be needed.
num_extra_labels = max(len(o.shape) for o in operands) if ellipses else 0
# Map data to ordinals for cutensornet.
inputs, output, mode_map_user_to_ord, mode_map_ord_to_user, label_end = map_modes(inputs, output, num_extra_labels, morpher)
mapper = ModeLabelMapper(mode_map_ord_to_user)
mapping_morpher = select_morpher(interleaved, mapper)
# Ellipsis expansion.
if ellipses:
inputs, output = process_ellipses(inputs, output, operands, label_end, mode_map_ord_to_user, mapping_morpher)
elif output is None:
output = infer_output_mode_labels(inputs, mode_map_ord_to_user)
# Create mode-extent map based on internal mode numbers.
size_dict = create_size_dict(inputs, operands)
return operands, inputs, output, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, interleaved or ellipses
| cuQuantum-main | python/cuquantum/cutensornet/_internal/einsum_parser.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to seamlessly use tensors (or ndarray-like objects) from different libraries.
"""
from abc import ABC, abstractmethod
from . import typemaps
from .. import cutensornet as cutn
class Tensor(ABC):
"""
A simple wrapper type for tensors to make the API package-agnostic.
"""
def __init__(self, tensor):
self.tensor = tensor
@property
@abstractmethod
def data_ptr(self):
raise NotImplementedError
@property
@abstractmethod
def device(self):
raise NotImplementedError
@property
@abstractmethod
def device_id(self):
raise NotImplementedError
@property
@abstractmethod
def dtype(self):
raise NotImplementedError
@classmethod
@abstractmethod
def empty(cls, shape, **context):
raise NotImplementedError
@abstractmethod
def numpy(self):
raise NotImplementedError
@property
@abstractmethod
def shape(self):
raise NotImplementedError
@property
@abstractmethod
def strides(self):
raise NotImplementedError
@classmethod
@abstractmethod
def empty(cls, shape, **context):
raise NotImplementedError
@abstractmethod
def to(self, device='cpu'):
raise NotImplementedError
@staticmethod
def create_name_dtype_map(conversion_function, exception_type):
"""
Create a map between CUDA data type names and the corresponding package dtypes for supported data types.
"""
names = typemaps.NAME_TO_DATA_TYPE.keys()
name_to_dtype = dict()
for name in names:
try:
name_to_dtype[name] = conversion_function(name)
except exception_type:
pass
return name_to_dtype
@abstractmethod
def reshape_to_match_tensor_descriptor(self, handle, desc_tensor):
raise NotImplementedError
def create_tensor_descriptor(self, handle, modes):
return cutn.create_tensor_descriptor(handle, self.tensor.ndim, self.shape, self.strides, modes, typemaps.NAME_TO_DATA_TYPE[self.dtype])
| cuQuantum-main | python/cuquantum/cutensornet/_internal/tensor_ifc.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
A collection of (internal use) helper functions.
"""
import contextlib
import ctypes
import functools
from typing import Callable, Dict, Mapping, Optional
import cupy as cp
import numpy as np
from . import formatters
from . import mem_limit
from . import package_wrapper
from . import tensor_wrapper
def infer_object_package(obj):
"""
Infer the package that defines this object.
"""
module = obj.__class__.__module__
return module.split('.')[0]
def check_or_create_options(cls, options, options_description):
"""
Create the specified options dataclass from a dictionary of options or None.
"""
if options is None:
options = cls()
elif isinstance(options, Dict):
options = cls(**options)
if not isinstance(options, cls):
raise TypeError(f"The {options_description} must be provided as an object "
f"of type {cls.__name__} or as a dict with valid {options_description}. "
f"The provided object is '{options}'.")
return options
def _create_stream_ctx_ptr_cupy_stream(package_ifc, stream):
"""
Utility function to create a stream context as a "package-native" object, get stream pointer as well as
create a cupy stream object.
"""
stream_ctx = package_ifc.to_stream_context(stream)
stream_ptr = package_ifc.to_stream_pointer(stream)
stream = cp.cuda.ExternalStream(stream_ptr)
return stream, stream_ctx, stream_ptr
@contextlib.contextmanager
def device_ctx(new_device_id):
"""
Semantics:
1. The device context manager makes the specified device current from the point of entry until the point of exit.
2. When the context manager exits, the current device is reset to what it was when the context manager was entered.
3. Any explicit setting of the device within the context manager (using cupy.cuda.Device().use(), torch.cuda.set_device(),
etc) will overrule the device set by the context manager from that point onwards till the context manager exits. In
other words, the context manager provides a local device scope and the current device can be explicitly reset for the
remainder of that scope.
Corollary: if any library function resets the device globally and this is an undesired side-effect, such functions must be
called from within the device context manager.
Device context managers can be arbitrarily nested.
"""
old_device_id = cp.cuda.runtime.getDevice()
try:
if old_device_id != new_device_id:
cp.cuda.runtime.setDevice(new_device_id)
yield
finally:
# We should always restore the old device at exit.
cp.cuda.runtime.setDevice(old_device_id)
def get_or_create_stream(device_id, stream, op_package):
"""
Create a stream object from a stream pointer or extract the stream pointer from a stream object, or
use the current stream.
Args:
device_id: The device ID.
stream: A stream object, stream pointer, or None.
op_package: The package the tensor network operands belong to.
Returns:
tuple: CuPy stream object, package stream context, stream pointer.
"""
op_package_ifc = package_wrapper.PACKAGE[op_package]
if stream is None:
stream = op_package_ifc.get_current_stream(device_id)
return _create_stream_ctx_ptr_cupy_stream(op_package_ifc, stream)
if isinstance(stream, int):
stream_ptr = stream
if op_package == 'torch':
message = "A stream object must be provided for PyTorch operands, not stream pointer."
raise TypeError(message)
stream_ctx = op_package_ifc.to_stream_context(stream)
stream = cp.cuda.ExternalStream(stream_ptr)
return stream, stream_ctx, stream_ptr
stream_package = infer_object_package(stream)
if stream_package != op_package:
message = "The stream object must belong to the same package as the tensor network operands."
raise TypeError(message)
return _create_stream_ctx_ptr_cupy_stream(op_package_ifc, stream)
def get_memory_limit(memory_limit, device):
"""
Parse user provided memory limit and return the memory limit in bytes.
"""
import re
_, total_memory = device.mem_info
if isinstance(memory_limit, (int, float)):
if memory_limit <= 0:
raise ValueError("The specified memory limit must be greater than 0.")
if memory_limit < 1:
memory_limit *= total_memory
return int(memory_limit)
m = mem_limit.MEM_LIMIT_RE_PCT.match(memory_limit)
if m:
factor = float(m.group(1))
if factor <= 0 or factor > 100:
raise ValueError("The memory limit percentage must be in the range (0, 100].")
return int(factor * total_memory / 100.)
m = mem_limit.MEM_LIMIT_RE_VAL.match(memory_limit)
if not m:
raise ValueError(mem_limit.MEM_LIMIT_DOC % memory_limit)
base = 1000
if m.group('binary'):
base = 1024
powers = { '' : 0, 'k' : 1, 'm' : 2, 'g' : 3 }
unit = m.group('units').lower() if m.group('units') else ''
multiplier = base ** powers[unit]
value = float(m.group('value'))
memory_limit = int(value * multiplier)
return memory_limit
def get_operands_data(operands):
"""
Get the raw data pointer of the input operands for cuTensorNet.
"""
op_data = tuple(o.data_ptr for o in operands)
return op_data
def create_empty_tensor(cls, extents, dtype, device_id, stream_ctx):
"""
Create a wrapped tensor of the same type as (the wrapped) cls on the specified device having the
specified extents and dtype.
The tensor is created within a stream context to allow for asynchronous memory allocators like
CuPy's MemoryAsyncPool.
"""
with stream_ctx:
tensor = cls.empty(extents, dtype=dtype, device=device_id)
tensor = tensor_wrapper.wrap_operand(tensor)
return tensor
def create_output_tensor(cls, package, output, size_dict, device_id, stream, data_type):
"""
Create output tensor and associated data (modes, extents, strides). This operation is
ordered through events and is safe to use with asynchronous memory pools.
"""
modes = tuple(m for m in output)
extents = tuple(size_dict[m] for m in output)
stream, stream_ctx, _ = get_or_create_stream(device_id, stream, package)
with device_ctx(device_id):
output = create_empty_tensor(cls, extents, data_type, device_id, stream_ctx)
output_event = stream.record()
strides = output.strides
return output, output_event, modes, extents, strides
def get_network_device_id(operands):
"""
Return the id (ordinal) of the device the tensor network is on, or None if it is on the CPU.
"""
device_id = operands[0].device_id
if not all(operand.device_id == device_id for operand in operands):
devices = set(operand.device_id for operand in operands)
raise ValueError(f"All tensors in the network are not on the same device. Devices = {devices}.")
return device_id
def get_operands_dtype(operands):
"""
Return the data type name of the tensors.
"""
dtype = operands[0].dtype
if not all(operand.dtype == dtype for operand in operands):
dtypes = set(operand.dtype for operand in operands)
raise ValueError(f"All tensors in the network must have the same data type. Data types found = {dtypes}.")
return dtype
# Unused since cuQuantum 22.11
def get_maximal_alignment(address):
"""
Calculate the maximal alignment of the provided memory location.
"""
alignment = 1
while address % alignment == 0 and alignment < 256:
alignment *= 2
return alignment
def get_operands_package(operands):
"""
Return the package name of the tensors.
"""
package = infer_object_package(operands[0].tensor)
if not all (infer_object_package(operand.tensor) == package for operand in operands):
packages = set(infer_object_package(operand.tensor) for operand in operands)
raise TypeError(f"All tensors in the network must be from the same library package. Packages found = {packages}.")
return package
def check_operands_match(orig_operands, new_operands, attribute, description):
"""
Check if the specified attribute matches between the corresponding new and old operands, and raise an exception if it
doesn't.
"""
checks = [getattr(o, attribute) == getattr(n, attribute) for o, n in zip(orig_operands, new_operands)]
if not all(checks):
mismatch = [f"{location}: {getattr(orig_operands[location], attribute)} => {getattr(new_operands[location], attribute)}"
for location, predicate in enumerate(checks) if predicate is False]
mismatch = formatters.array2string(mismatch)
message = f"""The {description} of each new operand must match the {description} of the corresponding original operand.
The mismatch in {description} as a sequence of "position: original {description} => new {description}" is: \n{mismatch}"""
raise ValueError(message)
# Unused since cuQuantum 22.11
def check_alignments_match(orig_alignments, new_alignments):
"""
Check if alignment matches between the corresponding new and old operands, and raise an exception if it doesn't.
"""
checks = [o == n for o, n in zip(orig_alignments, new_alignments)]
if not all(checks):
mismatch = [f"{location}: {orig_alignments[location]} => {new_alignments[location]}"
for location, predicate in enumerate(checks) if predicate is False]
mismatch = formatters.array2string(mismatch)
message = f"""The data alignment of each new operand must match the data alignment of the corresponding original operand.
The mismatch in data alignment as a sequence of "position: original alignment => new alignment" is: \n{mismatch}"""
raise ValueError(message)
def check_tensor_qualifiers(qualifiers, dtype, num_inputs):
"""
Check if the tensor qualifiers array is valid.
"""
if qualifiers is None:
return 0
prolog = f"The tensor qualifiers must be specified as an one-dimensional NumPy ndarray of 'tensor_qualifiers_dtype' objects."
if not isinstance(qualifiers, np.ndarray):
raise ValueError(prolog)
elif qualifiers.dtype != dtype:
message = prolog + f" The dtype of the ndarray is '{qualifiers.dtype}'."
raise ValueError(message)
elif qualifiers.ndim != 1:
message = prolog + f" The shape of the ndarray is {qualifiers.shape}."
raise ValueError(message)
elif len(qualifiers) != num_inputs:
message = prolog + f" The length of the ndarray is {len(qualifiers)}, while the expected length is {num_inputs}."
raise ValueError(message)
return qualifiers
def check_autotune_params(iterations):
"""
Check if the autotune parameters are of the correct type and within range.
"""
if not isinstance(iterations, int):
raise ValueError("Integer expected.")
if iterations < 0:
raise ValueError("Integer >= 0 expected.")
message = f"Autotuning parameters: iterations = {iterations}."
return message
def get_ptr_from_memory_pointer(mem_ptr):
"""
Access the value associated with one of the attributes 'device_ptr', 'device_pointer', 'ptr'.
"""
attributes = ('device_ptr', 'device_pointer', 'ptr')
for attr in attributes:
if hasattr(mem_ptr, attr):
return getattr(mem_ptr, attr)
message = f"Memory pointer objects should have one of the following attributes specifying the device pointer: {attributes}"
raise AttributeError(message)
class Value:
"""
A simple value wrapper holding a default value.
"""
def __init__(self, default, *, validator: Callable[[object], bool]):
"""
Args:
default: The default value to use.
validator: A callable that validates the provided value.
"""
self.validator = validator
self._data = default
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = self._validate(value)
def _validate(self, value):
if self.validator(value):
return value
raise ValueError(f"Internal Error: value '{value}' is not valid.")
def check_and_set_options(required: Mapping[str, Value], provided: Mapping[str, object]):
"""
Update each option specified in 'required' by getting the value from 'provided' if it exists or using a default.
"""
for option, value in required.items():
try:
value.data = provided.pop(option)
except KeyError:
pass
required[option] = value.data
assert not provided, "Unrecognized options."
@contextlib.contextmanager
def cuda_call_ctx(stream, blocking=True, timing=True):
"""
A simple context manager that provides (non-)blocking behavior depending on the `blocking` parameter for CUDA calls.
The call is timed only for blocking behavior when timing is requested.
An `end` event is recorded after the CUDA call for use in establishing stream ordering for non-blocking calls. This
event is returned together with a `Value` object that stores the elapsed time if the call is blocking and timing is
requested, or None otherwise.
"""
if blocking:
start = cp.cuda.Event(disable_timing = False if timing else True)
stream.record(start)
end = cp.cuda.Event(disable_timing = False if timing and blocking else True)
time = Value(None, validator=lambda v: True)
yield end, time
stream.record(end)
if not blocking:
return
end.synchronize()
if timing:
time.data = cp.cuda.get_elapsed_time(start, end)
# Decorator definitions
def atomic(handler: Callable[[Optional[object]], None], method: bool = False) -> Callable:
"""
A decorator that provides "succeed or roll-back" semantics. A typical use for this is to release partial resources if an
exception occurs.
Args:
handler: A function to call when an exception occurs. The handler takes a single argument, which is the exception
object, and returns a boolean stating whether the same exception should be reraised. We assume that this function
does not raise an exception.
method: Specify if the wrapped function as well as the exception handler are methods bound to the same object
(method = True) or they are free functions (method = False).
Returns:
Callable: A decorator that creates the wrapping.
"""
def outer(wrapped_function):
"""
A decorator that actually wraps the function for exception handling.
"""
@functools.wraps(wrapped_function)
def inner(*args, **kwargs):
"""
Call the wrapped function and return the result. If an exception occurs, then call the exception handler and
reraise the exception.
"""
try:
result = wrapped_function(*args, **kwargs)
except BaseException as e:
if method:
flag = handler(args[0], e)
else:
flag = handler(e)
if flag:
raise e
return result
return inner
return outer
def precondition(checker: Callable[..., None], what: str = "") -> Callable:
"""
A decorator that adds checks to ensure any preconditions are met.
Args:
checker: The function to call to check whether the preconditions are met. It has the same signature as the wrapped
function with the addition of the keyword argument `what`.
what: A string that is passed in to `checker` to provide context information.
Returns:
Callable: A decorator that creates the wrapping.
"""
def outer(wrapped_function):
"""
A decorator that actually wraps the function for checking preconditions.
"""
@functools.wraps(wrapped_function)
def inner(*args, **kwargs):
"""
Check preconditions and if they are met, call the wrapped function.
"""
checker(*args, **kwargs, what=what)
result = wrapped_function(*args, **kwargs)
return result
return inner
return outer
def get_mpi_comm_pointer(comm):
"""Simple helper to get the address to and size of a ``MPI_Comm`` handle.
Args:
comm (mpi4py.MPI.Comm): An MPI communicator.
Returns:
tuple: A pair of int values representing the address and the size.
"""
try:
from mpi4py import MPI # init!
except ImportError as e:
raise RuntimeError("please install mpi4py") from e
if not isinstance(comm, MPI.Comm):
raise ValueError("invalid MPI communicator")
comm_ptr = MPI._addressof(comm) # = MPI_Comm*
mpi_comm_size = MPI._sizeof(MPI.Comm)
return comm_ptr, mpi_comm_size
| cuQuantum-main | python/cuquantum/cutensornet/_internal/utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from types import MappingProxyType
from cirq import protocols, unitary, Circuit, MeasurementGate, Moment
import cupy as cp
from .tensor_wrapper import _get_backend_asarray_func
def remove_measurements(circuit):
"""
Return a circuit with final measurement operations removed
"""
circuit = circuit.copy()
if circuit.has_measurements():
if not circuit.are_all_measurements_terminal():
raise ValueError('mid-circuit measurement not supported in tensor network simulation')
else:
predicate = lambda operation: isinstance(operation.gate, MeasurementGate)
measurement_gates = list(circuit.findall_operations(predicate))
circuit.batch_remove(measurement_gates)
return circuit
def get_inverse_circuit(circuit):
"""
Return a circuit with all gate operations inversed
"""
return protocols.inverse(circuit)
def unfold_circuit(circuit, dtype='complex128', backend=cp):
"""
Unfold the circuit to obtain the qubits and all gate tensors.
Args:
circuit: A :class:`cirq.Circuit` object. All parameters in the circuit must be resolved.
dtype: Data type for the tensor operands.
backend: The package the tensor operands belong to.
Returns:
All qubits and gate operations from the input circuit
"""
qubits = sorted(circuit.all_qubits())
asarray = _get_backend_asarray_func(backend)
gates = []
for moment in circuit.moments:
for operation in moment:
gate_qubits = operation.qubits
tensor = unitary(operation).reshape((2,) * 2 * len(gate_qubits))
tensor = asarray(tensor, dtype=dtype)
gates.append((tensor, operation.qubits))
return qubits, gates
def get_lightcone_circuit(circuit, coned_qubits):
"""
Use unitary reversed lightcone cancellation technique to reduce the effective circuit size based on the qubits to be coned.
Args:
circuit: A :class:`cirq.Circuit` object.
coned_qubits: An iterable of qubits to be coned.
Returns:
A :class:`cirq.Circuit` object that potentially contains less number of gates
"""
coned_qubits = set(coned_qubits)
all_operations = list(circuit.all_operations())
n_qubits = len(circuit.all_qubits())
ix = len(all_operations)
tail_operations = []
while len(coned_qubits) != n_qubits and ix>0:
ix -= 1
operation = all_operations[ix]
qubit_set = set(operation.qubits)
if qubit_set & coned_qubits:
tail_operations.append(operation)
coned_qubits |= qubit_set
newqc = Circuit(all_operations[:ix]+tail_operations[::-1])
return newqc
| cuQuantum-main | python/cuquantum/cutensornet/_internal/circuit_parser_utils_cirq.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Functions to link type names with CUDA data and compute types.
"""
__all__ = ['NAME_TO_DATA_TYPE', 'NAME_TO_COMPUTE_TYPE']
import re
# hack to break circular import
from cuquantum.utils import ComputeType, cudaDataType
def create_cuda_data_type_map(cuda_data_type_enum_class):
"""
Map the data type name to the corresponding CUDA data type.
"""
cuda_data_type_pattern = re.compile(r'CUDA_(?P<cr>C|R)_(?P<width>\d+)(?P<type>F|I|U|BF)')
type_code_map = { 'i' : 'int', 'u' : 'uint', 'f' : 'float', 'bf' : 'bfloat' }
cuda_data_type_map = dict()
for d in cuda_data_type_enum_class:
m = cuda_data_type_pattern.match(d.name)
is_complex = m.group('cr').lower() == 'c'
type_code = type_code_map[m.group('type').lower()]
if is_complex and type_code != 'float':
continue
width = int(m.group('width'))
if is_complex:
width *= 2
type_code = 'complex'
name = type_code + str(width)
cuda_data_type_map[name] = d
return cuda_data_type_map
def create_cuda_compute_type_map(cuda_compute_type_enum_class):
"""
Map the data type name to the corresponding CUDA compute type.
"""
cuda_compute_type_pattern = re.compile(r'COMPUTE_(?:(?P<width>\d+)(?P<type>F|I|U|BF)|(?P<tf32>TF32))')
type_code_map = { 'i' : 'int', 'u' : 'uint', 'f' : 'float', 'bf' : 'bfloat' }
cuda_compute_type_map = dict()
for c in cuda_compute_type_enum_class:
if c.name == 'COMPUTE_DEFAULT':
continue
m = cuda_compute_type_pattern.match(c.name)
if not m:
raise ValueError("Internal error - unexpected enum entry")
if m.group('tf32'):
continue
name = type_code_map[m.group('type').lower()] + m.group('width')
cuda_compute_type_map[name] = c
# Treat complex types as special case.
cuda_compute_type_map['complex64'] = cuda_compute_type_enum_class.COMPUTE_32F
cuda_compute_type_map['complex128'] = cuda_compute_type_enum_class.COMPUTE_64F
return cuda_compute_type_map
NAME_TO_DATA_TYPE = create_cuda_data_type_map(cudaDataType)
NAME_TO_COMPUTE_TYPE = create_cuda_compute_type_map(ComputeType)
| cuQuantum-main | python/cuquantum/cutensornet/_internal/typemaps.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to Torch operations.
"""
__all__ = ['TorchPackage']
import torch
from .package_ifc import Package
class TorchPackage(Package):
@staticmethod
def get_current_stream(device_id):
return torch.cuda.current_stream(device=device_id)
@staticmethod
def to_stream_pointer(stream):
return stream.cuda_stream
@staticmethod
def to_stream_context(stream):
return torch.cuda.stream(stream)
@classmethod
def create_external_stream(device_id, stream_ptr):
return torch.cuda.ExternalStream(stream_ptr, device=device_id)
@staticmethod
def create_stream(device_id):
stream = torch.cuda.Stream(device=device_id)
return stream
| cuQuantum-main | python/cuquantum/cutensornet/_internal/package_ifc_torch.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Interface to seamlessly use Torch tensor objects.
"""
__all__ = ['TorchTensor']
import torch
from . import typemaps
from .tensor_ifc import Tensor
from .. import cutensornet as cutn
class TorchTensor(Tensor):
"""
Tensor wrapper for Torch Tensors.
"""
name = 'torch'
module = torch
name_to_dtype = Tensor.create_name_dtype_map(conversion_function=lambda name: getattr(torch, name), exception_type=AttributeError)
def __init__(self, tensor):
super().__init__(tensor)
@property
def data_ptr(self):
return self.tensor.data_ptr()
@property
def device(self):
str(self.tensor.device).split(':')[0]
@property
def device_id(self):
return self.tensor.device.index
@property
def dtype(self):
"""Name of the data type"""
return str(self.tensor.dtype).split('.')[-1]
@property
def shape(self):
return tuple(self.tensor.shape)
@property
def strides(self):
return self.tensor.stride()
def numpy(self):
return self.tensor.get()
@classmethod
def empty(cls, shape, **context):
"""
Create an empty tensor of the specified shape and data type on the specified device (None, 'cpu', or device id).
"""
name = context.get('dtype', 'float32')
dtype = TorchTensor.name_to_dtype[name]
device = context.get('device', None)
tensor = torch.empty(shape, dtype=dtype, device=device)
return tensor
def to(self, device='cpu'):
"""
Create a copy of the tensor on the specified device (integer or
'cpu'). Copy to Numpy ndarray if CPU, otherwise return Cupy type.
"""
if not(device == 'cpu' or isinstance(device, int)):
raise ValueError(f"The device must be specified as an integer or 'cpu', not '{device}'.")
tensor_device = self.tensor.to(device=device)
return tensor_device
def copy_(self, src):
"""
Inplace copy of src (copy the data from src into self).
"""
self.tensor.copy_(src)
def istensor(self):
"""
Check if the object is ndarray-like.
"""
return isinstance(self.tensor, torch.Tensor)
def reshape_to_match_tensor_descriptor(self, handle, desc_tensor):
_, _, extents, strides = cutn.get_tensor_details(handle, desc_tensor)
if tuple(extents) != self.shape:
#note: torch strides is not scaled by bytes
self.tensor = torch.as_strided(self.tensor, tuple(extents), tuple(strides))
| cuQuantum-main | python/cuquantum/cutensornet/_internal/tensor_ifc_torch.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import types
try:
import cirq
from . import circuit_parser_utils_cirq
except ImportError:
cirq = circuit_parser_utils_cirq = None
import cupy as cp
import numpy as np
try:
import qiskit
from . import circuit_parser_utils_qiskit
except ImportError:
qiskit = circuit_parser_utils_qiskit = None
from .tensor_wrapper import _get_backend_asarray_func
from ...utils import WHITESPACE_UNICODE
EINSUM_SYMBOLS_BASE = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
WHITESPACE_SYMBOLS_ID = None
CIRQ_MIN_VERSION = '0.6.0'
QISKIT_MIN_VERSION = '0.24.0' # qiskit metapackage version
EMPTY_DICT = types.MappingProxyType({})
def check_version(package_name, version, minimum_version):
"""
Check if the current version of a package is above the required minimum.
"""
version_numbers = [int(i) for i in version.split('.')]
minimum_version_numbers = [int(i) for i in minimum_version.split('.')]
if version_numbers < minimum_version_numbers:
raise NotImplementedError(f'CircuitToEinsum currently supports {package_name} above {minimum_version},'
f'current version: {version}')
return None
def _get_symbol(i):
"""
Return a unicode as label for index. Whitespace unicode characters are skipped.
This function can offer 1113955 (= sys.maxunicode - 140 - 16) unique symbols.
"""
if i < 52:
return EINSUM_SYMBOLS_BASE[i]
global WHITESPACE_SYMBOLS_ID
if WHITESPACE_SYMBOLS_ID is None:
whitespace = WHITESPACE_UNICODE
WHITESPACE_SYMBOLS_ID = np.asarray([ord(c) for c in whitespace], dtype=np.int32)
WHITESPACE_SYMBOLS_ID = WHITESPACE_SYMBOLS_ID[WHITESPACE_SYMBOLS_ID >= 192]
# leave "holes" in the integer -> unicode mapping to avoid using whitespaces as symbols
i += 140
offset = 0
for hole in WHITESPACE_SYMBOLS_ID: # loop size = 16
if i + offset < hole:
break
offset += 1
try:
return chr(i + offset)
except ValueError as e:
raise ValueError(f"{i=} would exceed unicode limit") from e
def infer_parser(circuit):
"""
Infer the package that defines the circuit object.
"""
if qiskit and isinstance(circuit, qiskit.QuantumCircuit):
import importlib.metadata
qiskit_version = importlib.metadata.version('qiskit') # qiskit metapackage version
check_version('qiskit', qiskit_version, QISKIT_MIN_VERSION)
return circuit_parser_utils_qiskit
elif cirq and isinstance(circuit, cirq.Circuit):
cirq_version = cirq.__version__
check_version('cirq', cirq_version, CIRQ_MIN_VERSION)
return circuit_parser_utils_cirq
else:
base = circuit.__module__.split('.')[0]
raise NotImplementedError(f'circuit from {base} not supported')
def parse_inputs(qubits, gates, dtype, backend):
"""
Given a sequence of qubits and gates, generate the mode labels,
tensor operands and qubits_frontier map for the initial states and gate operations.
"""
n_qubits = len(qubits)
operands = get_bitstring_tensors('0'*n_qubits, dtype, backend=backend)
mode_labels, qubits_frontier, next_frontier = _init_mode_labels_from_qubits(qubits)
gate_mode_labels, gate_operands = parse_gates_to_mode_labels_operands(gates,
qubits_frontier,
next_frontier)
mode_labels += gate_mode_labels
operands += gate_operands
return mode_labels, operands, qubits_frontier
def parse_bitstring(bitstring, n_qubits=None):
"""
Parse the bitstring into standard form.
"""
if n_qubits is not None:
if len(bitstring) != n_qubits:
raise ValueError(f'bitstring must be of the same length as number of qubits {n_qubits}')
if not isinstance(bitstring, str):
bitstring = ''.join(map(str, bitstring))
if not set(bitstring).issubset(set('01')):
raise ValueError('bitstring must be a sequence of 0/1')
return bitstring
def parse_fixed_qubits(fixed):
"""
Given a set of qubits with fixed states, return the output bitstring and corresponding qubits order.
"""
if fixed:
fixed_qubits, fixed_bitstring = zip(*fixed.items())
else:
fixed_qubits, fixed_bitstring = (), ()
return fixed_qubits, fixed_bitstring
def _init_mode_labels_from_qubits(qubits):
"""
Given a set of qubits, initialize the mode labels, tensor operands and index mapping for the input state.
Returns mode labels, qubit-frontier map, and the next frontier.
"""
from itertools import count
n = len(qubits)
return [[i] for i in range(n)], dict(zip(qubits, count())), n
def get_bitstring_tensors(bitstring, dtype='complex128', backend=cp):
"""
Create the tensors operands for a given bitstring state.
Args:
bitstring: A sequence of 0/1 specifing the product state.
dtype: Data type for the tensor operands.
backend: The package the tensor operands belong to.
Returns:
A list of tensor operands stored as `backend` array
"""
asarray = _get_backend_asarray_func(backend)
state_0 = asarray([1, 0], dtype=dtype)
state_1 = asarray([0, 1], dtype=dtype)
basis_map = {'0': state_0,
'1': state_1}
operands = [basis_map[ibit] for ibit in bitstring]
return operands
def convert_mode_labels_to_expression(input_mode_labels, output_mode_labels):
"""
Create an Einsum expression from input and output index labels.
Args:
input_mode_labels: A sequence of mode labels for each input tensor.
output_mode_labels: The desired mode labels for the output tensor.
Returns:
An Einsum expression in explicit form.
"""
input_symbols = [''.join(map(_get_symbol, idx)) for idx in input_mode_labels]
expression = ','.join(input_symbols) + '->' + ''.join(map(_get_symbol, output_mode_labels))
return expression
def get_pauli_gates(pauli_map, dtype='complex128', backend=cp):
"""
Populate the gates for all pauli operators.
Args:
pauli_map: A dictionary mapping qubits to pauli operators.
dtype: Data type for the tensor operands.
backend: The package the tensor operands belong to.
Returns:
A sequence of pauli gates.
"""
asarray = _get_backend_asarray_func(backend)
pauli_i = asarray([[1,0], [0,1]], dtype=dtype)
pauli_x = asarray([[0,1], [1,0]], dtype=dtype)
pauli_y = asarray([[0,-1j], [1j,0]], dtype=dtype)
pauli_z = asarray([[1,0], [0,-1]], dtype=dtype)
operand_map = {'I': pauli_i,
'X': pauli_x,
'Y': pauli_y,
'Z': pauli_z}
gates = []
for qubit, pauli_char in pauli_map.items():
operand = operand_map.get(pauli_char)
if operand is None:
raise ValueError('pauli string character must be one of I/X/Y/Z')
gates.append((operand, (qubit,)))
return gates
def parse_gates_to_mode_labels_operands(
gates,
qubits_frontier,
next_frontier
):
"""
Populate the indices for all gate tensors
Args:
gates: An list of gate tensors and the corresponding qubits.
qubits_frontier: The map of the qubits to its current frontier index.
next_frontier: The next index to use.
Returns:
Gate mode labels and gate operands.
"""
mode_labels = []
operands = []
for tensor, gate_qubits in gates:
operands.append(tensor)
input_mode_labels = []
output_mode_labels = []
for q in gate_qubits:
input_mode_labels.append(qubits_frontier[q])
output_mode_labels.append(next_frontier)
qubits_frontier[q] = next_frontier
next_frontier += 1
mode_labels.append(output_mode_labels+input_mode_labels)
return mode_labels, operands
| cuQuantum-main | python/cuquantum/cutensornet/_internal/circuit_converter_utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Create map from package names to package interface objects.
"""
__all__ = ['PACKAGE']
from .package_ifc_cupy import CupyPackage
PACKAGE = {'cupy': CupyPackage}
try:
import torch
from .package_ifc_torch import TorchPackage
PACKAGE['torch'] = TorchPackage
except ImportError as e:
pass
| cuQuantum-main | python/cuquantum/cutensornet/_internal/package_wrapper.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Configuration for tensor network contraction and decomposition.
"""
__all__ = ['ContractDecomposeAlgorithm', 'ContractDecomposeInfo']
import dataclasses
import re
from typing import Dict, Optional, Union, Literal
from .. import configuration
from ..tensor import QRMethod, SVDMethod, SVDInfo
from .._internal import utils
@dataclasses.dataclass
class ContractDecomposeAlgorithm:
"""A data class for specifying the algorithm to use for the contract and decompose operations.
Three choices are supported, as listed below:
#. When ``svd_method`` is `False` while ``qr_method`` is not `False` (default),
this amounts to direct contraction of the tensor network followed by a QR decomposition.
#. When ``qr_method`` is `False` while ``svd_method`` is not `False`,
this amounts to direct contraction of the tensor network followed by a singular value decomposition.
#. When neither ``qr_method`` and ``svd_method`` is `False`,
this amounts to QR-assisted contraction with singular value decomposition.
QR decomposition will first be applied onto certain input tensors to reduce the network size.
The resulting R tensors along with the remaining tensors form a new network that will be contracted and decomposed using SVD.
The Q tensors from the first QR operations along with the SVD outputs are then subject to two more contractions to yield the final output.
.. note::
The third choice above (QR-assisted contract and SVD) is currently only supported for ternary operands that are fully connected to each other with un-contracted modes on each tensor.
The results from the third choice is expected to be equivalent to that from the second choice but typically at lower computational cost.
Attributes:
qr_method: The QR method used for the decomposition. See :class:`~cuquantum.cutensornet.tensor.QRMethod`.
svd_method: The SVD method used for the decomposition. See :class:`~cuquantum.cutensornet.tensor.SVDMethod`.
svd_info: The SVD information during runtime. See :class:`~cuquantum.cutensornet.tensor.SVDInfo`.
"""
qr_method: Optional[Union[QRMethod, Literal[False, None],Dict]] = dataclasses.field(default_factory=QRMethod)
svd_method: Optional[Union[SVDMethod,Literal[False, None],Dict]] = False
def __post_init__(self):
if self.qr_method is False and self.svd_method is False:
raise ValueError("Must specify at least one of the qr_method or svd_method")
if self.qr_method is not False:
self.qr_method = utils.check_or_create_options(QRMethod, self.qr_method, "QR Method")
if self.svd_method is not False:
self.svd_method = utils.check_or_create_options(SVDMethod, self.svd_method, "SVD Method")
@dataclasses.dataclass
class ContractDecomposeInfo:
"""A data class for capturing contract-decompose information.
Attributes:
qr_method: The QR method used for the decomposition. See :class:`~cuquantum.cutensornet.tensor.QRMethod`.
svd_method: The SVD method used for the decomposition. See :class:`~cuquantum.cutensornet.tensor.SVDMethod`.
svd_info: The SVD information during runtime. See :class:`~cuquantum.cutensornet.tensor.SVDInfo`.
optimizer_info: The information for the contraction path to form the intermediate tensor. See :class:`~OptimizerInfo`
"""
qr_method: Union[QRMethod, Literal[False, None],Dict]
svd_method: Union[SVDMethod,Literal[False, None],Dict]
svd_info: Optional[SVDInfo] = None
optimizer_info: Optional[configuration.OptimizerInfo] = None
def __str__(self):
core_method = 'QR' if self.svd_method is False else 'SVD'
indent = 4
repr = f"""Contract-Decompose Information:
Summary of Operations:
Contraction followed by {core_method} decomposition."""
if self.svd_method is not False and self.qr_method is not False: # QR-assisted
repr += f"""
Before contraction, QR is applied to reduce the size of the tensor network. Post-decomposition contractions are performed to construct the final outputs."""
# optimizer info, hack below to match string indentation
if self.optimizer_info is not None:
optimizer_info = re.sub(r"\n", fr"\n{' ' * indent}", str(self.optimizer_info))
repr += f"""
{optimizer_info}"""
# svd_info, hack below to match string indentation
if self.svd_info is not None:
svd_info = re.sub(r"\n", fr"\n{' ' * indent}", str(self.svd_info))
repr += f"""
{svd_info}"""
return repr
| cuQuantum-main | python/cuquantum/cutensornet/experimental/configuration.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Tensor network contraction and decomposition.
"""
__all__ = ['contract_decompose']
import dataclasses
import logging
from .configuration import ContractDecomposeAlgorithm, ContractDecomposeInfo
from ._internal.utils import is_gate_split, maybe_truncate_qr_output_operands
from .. import cutensornet as cutn
from ..configuration import NetworkOptions
from ..tensor import decompose, SVDInfo
from ..tensor_network import contract
from .._internal import decomposition_utils
from .._internal import einsum_parser
from .._internal import tensor_wrapper
from .._internal import utils
def _gate_split(wrapped_operands, inputs, outputs, size_dict, max_mid_extent, algorithm, options, stream, return_info):
"""
perform gate split operation by calling ``cutensornetGateSplit``
Args:
wrapped_operands : Thin wrapped tensors given the original input operands (not copied to device yet for Numpy ndarrays).
inputs : A sequence of modes for input tensors in "neutral format" (sequence of sequences).
outputs : A sequence of modes for output tensors in "neutral format" (sequence of sequences).
size_dict : A dictionary mapping the modes to the extent.
algorithm : A ``ContractDecomposeAlgorithm`` object specifying the algorithm for the gate split operation.
options : Specify options for the operation as a :class:`~cuquantum.NetworkOptions` object.
max_mid_extent : The maximal mid extent (reduced) expected for the output of the operation.
stream : Provide the CUDA stream to use for the autotuning operation. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
return_info : If true, information about the contraction and decomposition will also be returned as a ::class:`ContractDecomposeInfo` object.
"""
# placeholder to help avoid resource leak
handle = workspace_desc = svd_config = svd_info = None
input_tensor_descriptors = output_tensor_descriptors = []
try:
# Options converted to an internal option
wrapped_operands, options, own_handle, operands_location = decomposition_utils.parse_decompose_operands_options(
options, wrapped_operands, allowed_dtype_names=decomposition_utils.DECOMPOSITION_DTYPE_NAMES)
mid_extent = max_mid_extent if algorithm.svd_method.max_extent is None else min(max_mid_extent, algorithm.svd_method.max_extent)
handle = options.handle
package = utils.infer_object_package(wrapped_operands[0].tensor)
stream, stream_ctx, stream_ptr = utils.get_or_create_stream(options.device_id, stream, package)
options.logger.info("Calling specicialized kernel `cutensornetGateSplit` for contraction and decomposition.")
# Create input/output tensor descriptors and empty output operands
input_tensor_descriptors, output_operands, output_tensor_descriptors, s, s_ptr = decomposition_utils.create_operands_and_descriptors(
handle, wrapped_operands, size_dict, inputs, outputs,
mid_extent, algorithm.svd_method, options.device_id, stream_ctx, options.logger)
# Parse SVDConfig
svd_config = cutn.create_tensor_svd_config(handle)
decomposition_utils.parse_svd_config(handle, svd_config, algorithm.svd_method, options.logger)
# Infer GateSplitAlgorithm
gate_algorithm = cutn.GateSplitAlgo.DIRECT if algorithm.qr_method is False else cutn.GateSplitAlgo.REDUCED
# Create workspace descriptor
workspace_desc = cutn.create_workspace_descriptor(handle)
workspace_ptr = None
options.logger.debug("Querying workspace size...")
cutn.workspace_compute_gate_split_sizes(handle,
*input_tensor_descriptors, *output_tensor_descriptors,
gate_algorithm, svd_config, options.compute_type, workspace_desc)
# Allocate and set workspace
workspaces = dict()
for mem_space in (cutn.Memspace.DEVICE, cutn.Memspace.HOST):
workspaces[mem_space] = decomposition_utils.allocate_and_set_workspace(handle, options.allocator, workspace_desc,
cutn.WorksizePref.MIN, mem_space, cutn.WorkspaceKind.SCRATCH, options.device_id,
stream, stream_ctx, options.logger, task_name='contract decomposition')
options.logger.info("Starting contract-decompose (gate split)...")
timing = bool(options.logger and options.logger.handlers)
blocking = options.blocking is True or operands_location == 'cpu'
if blocking:
options.logger.info("This call is blocking and will return only after the operation is complete.")
else:
options.logger.info("This call is non-blocking and will return immediately after the operation is launched on the device.")
svd_info = cutn.create_tensor_svd_info(handle)
with utils.device_ctx(options.device_id), utils.cuda_call_ctx(stream, blocking, timing) as (last_compute_event, elapsed):
cutn.gate_split(handle,
input_tensor_descriptors[0], wrapped_operands[0].data_ptr,
input_tensor_descriptors[1], wrapped_operands[1].data_ptr,
input_tensor_descriptors[2], wrapped_operands[2].data_ptr,
output_tensor_descriptors[0], output_operands[0].data_ptr,
s_ptr,
output_tensor_descriptors[1], output_operands[1].data_ptr,
gate_algorithm,
svd_config,
options.compute_type,
svd_info,
workspace_desc,
stream_ptr)
if elapsed.data is not None:
options.logger.info(f"The contract-decompose (gate split) operation took {elapsed.data:.3f} ms to complete.")
svd_info_obj = SVDInfo(**decomposition_utils.get_svd_info_dict(handle, svd_info))
# Update the operand to reduced_extent if needed
for (wrapped_tensor, tensor_desc) in zip(output_operands, output_tensor_descriptors):
wrapped_tensor.reshape_to_match_tensor_descriptor(handle, tensor_desc)
reduced_extent = svd_info_obj.reduced_extent
if s is not None:
if reduced_extent != mid_extent:
s.tensor = s.tensor[:reduced_extent]
finally:
# when host workspace is allocated, synchronize stream before return
if workspaces[cutn.Memspace.HOST] is not None:
stream.synchronize()
# Free resources
decomposition_utils._destroy_tensor_descriptors(input_tensor_descriptors)
decomposition_utils._destroy_tensor_descriptors(output_tensor_descriptors)
if svd_config is not None:
cutn.destroy_tensor_svd_config(svd_config)
if svd_info is not None:
cutn.destroy_tensor_svd_info(svd_info)
if workspace_desc is not None:
cutn.destroy_workspace_descriptor(workspace_desc)
if own_handle and handle is not None:
cutn.destroy(handle)
u, v, s = [decomposition_utils.get_return_operand_data(t, operands_location) for t in output_operands + [s, ]]
if return_info:
info = ContractDecomposeInfo(qr_method=algorithm.qr_method,
svd_method=algorithm.svd_method,
svd_info=svd_info_obj)
return u, s, v, info
else:
return u, s, v
def contract_decompose(subscripts, *operands, algorithm=None, options=None, optimize=None, stream=None, return_info=False):
r"""
Evaluate the compound expression for contraction and decomposition on the input operands.
The expression follows a combination of Einstein summation notation for contraction and the decomposition notation for decomposition
(as in :func:`~cuquantum.cutensornet.tensor.decompose`).
The input represents a tensor network that will be contracted to form an intermediate tensor for subsequent decomposition operation,
yielding two or three outputs depending on the final decomposition method.
The expression requires explicit specification of modes for the input and output tensors (excluding ``S`` for SVD method).
The modes for the intermediate tensor are inferred based on the subscripts representing the output modes by using the implicit form of
the Einstein summation expression (similar to the treatment in ``numpy.einsum`` implicit mode).
See the notes and examples for clarification.
Args:
subscripts : The mode labels (subscripts) defining the contraction and decomposition operation as a comma-separated sequence of
characters. Unicode characters are allowed in the expression thereby expanding the size of the tensor network that
can be specified.
algorithm : Specify the algorithm to perform the contraction and decomposition. Alternatively,
a `dict` containing the parameters for the :class:`ContractDecomposeAlgorithm` constructor can be provided.
If not specified, the value will be set to the default-constructed ``ContractDecomposeAlgorithm`` object.
operands : A sequence of tensors (ndarray-like objects). The currently supported types are :class:`numpy.ndarray`,
:class:`cupy.ndarray`, and :class:`torch.Tensor`.
options : Specify options for the tensor network as a :class:`~cuquantum.NetworkOptions` object. Alternatively, a `dict`
containing the parameters for the ``NetworkOptions`` constructor can also be provided. If not specified,
the value will be set to the default-constructed ``NetworkOptions`` object.
optimize : This parameter specifies options for path optimization as an :class:`~cuquantum.OptimizerOptions` object. Alternatively, a
dictionary containing the parameters for the ``OptimizerOptions`` constructor can also be provided. If not
specified, the value will be set to the default-constructed ``OptimizerOptions`` object.
stream: Provide the CUDA stream to use for the autotuning operation. Acceptable inputs include ``cudaStream_t``
(as Python :class:`int`), :class:`cupy.cuda.Stream`, and :class:`torch.cuda.Stream`. If a stream is not provided,
the current stream will be used.
return_info : If true, information about the contraction and decomposition will also be returned as a ::class:`ContractDecomposeInfo` object.
Returns:
Depending on the decomposition setting specified in ``algorithm``, the results returned may vary:
- For QR decomposition (default), if ``return_info`` is `False`, the output tensors Q and R (ndarray-like objects) of the same type
and on the same device as the input operand are returned as the result of the decomposition. If ``return_info`` is `True`,
a 3-tuple of output tensors Q, R and a `ContractDecomposeInfo` object that contains information about the operations will be returned.
- For SVD decomposition, if ``return_info`` is `False`, a 3-tuple of output tensors U, S and V (ndarray-like objects)
of the same type as the input operand are returned as the result of the decomposition. If ``return_info`` is `True`,
a 4-tuple of output tensors U, S, V and a `ContractDecomposeInfo` object that contains information about the operations will be returned.
Note, depending on the choice of :attr:`~ContractDecomposeAlgorithm.svd_method.partition`, the returned S operand may be `None`.
Also see :attr:`~SVDMethod.partition`.
The contract and decompose expression adopts a combination of Einstein summation notation for contraction and the decomposition notation
introduced in :func:`~cuquantum.cutensornet.tensor.decompose`.
The ``subscripts`` string is a list of subscript labels where each label refers to a mode of the corresponding operand.
The subscript labels are separated by either comma or identifier ``->``.
The subscript labels before the identifier ``->`` are viewed as inputs, and the ones after are viewed as outputs, respectively.
The requirements on the subscripts for SVD and QR decomposition are summarized below:
- For SVD and QR decomposition, the subscripts string is expected to contain more than one input and exactly two output labels (The modes for `S` is not needed in the case of SVD).
- One and only one identical mode is expected to exist in the two output mode labels.
- The modes for the intermediate tensor which will be decomposed are inferred based on the subscripts representing the output modes by using the implicit form of
the Einstein summation expression (similar to the treatment in ``numpy.einsum`` implicit mode).
Therefore, assembling the input modes and the intermediate modes together should result in a valid ``numpy.einsum`` expression (classical or generalized).
Examples:
>>> # equivalent:
>>> # q, r = numpy.linalg.qr(numpy.einsum('ij,jk->ik', a, b))
>>> q, r = contract_decompose('ij,jk->ix,xk', a, b)
>>> # equivalent:
>>> # u, s, v = numpy.linalg.svd(numpy.einsum('ij,jk->ik', a, b), full_matrices=False)
>>> u, s, v = tensor_decompose('ij,jk->ix,xk', a, algorithm={'qr_method':False, 'svd_method': {}})
For generalization to generic tensor network with multi-dimensional tensors (``a``, ``b``, ``c`` are all rank-4 tensors).
In this case, the intermediate modes ``ijabe`` is inferred from the output modes ``ixeb`` and ``jax``:
>>> # equivalent:
>>> # t = contract('ijc,cad,dbe->ijabe', a, b, c)
>>> # u, s, v = tensor.decompose('ijabe->ixeb,jax', t, method=SVDMethod())
>>> u, s, v = contract_decompose('ijc,cad,dbe->ixeb,jax', a, b, c, algorithm={'qr_method': False, 'svd_method': True})
If the contract and decompose problem amounts to a **ternary-operand gate split problem** commonly seen in quantum circuit simulation
(see :ref:`Gate Split Algorithm<gatesplitalgo>` for details),
the user may be able to take advantage of optimized kernels from `cutensornetGateSplit` by placing the gate operand as the last one in the input operands.
In this case, QR decomposition can potentially be used to speed up the execution of contraction and SVD.
This can be achieved by setting both :attr:`~ContractDecomposeAlgorithm.qr_method` and both :attr:`~ContractDecomposeAlgorithm.svd_method`,
as demonstrated below.
Example:
Applying a two-qubit gate to adjacent MPS tensors:
>>> a, _, b = contract_decompose('ipj,jqk,pqPQ->iPx,xQk', a, b, gate, algorithm={'qr_method':{}, 'svd_method':{}})
**Broadcasting** is supported for certain cases via ellipsis notation.
One may add ellipses in the input modes to represent all the modes that are not explicitly specified in the labels.
In such case, an ellipsis is allowed to appear in at most one of the output modes. If an ellipsis appears in one of the output modes,
the implicit modes are partitioned onto the corresponding output. If no ellipsis is found in the output, the implicit modes will be summed over
to construct the intermediate tensors.
Examples:
Below are some examples based on two rank-4 tensors ``a`` and ``b``:
>>> # equivalent:
>>> # out = contract_decompose('ijab,abcd->ijx,xcd', a, b)
>>> out = contract_decompose('ijab,ab...->ijx,x...', a, b) # intermediate modes being "ijcd"
>>> # equivalent:
>>> # out = contract_decompose('ijab,abcd->ix,xj', a, b)
>>> out = contract_decompose('ijab,ab...->ix,xj', a, b) # intermediate modes being "ij"
>>> # equivalent:
>>> # out = contract_decompose('ijab,jkab->ix,xj', a, b)
>>> out = contract_decompose('ij...,jk...->ix,xj', a, b) # intermediate modes being "ij"
>>> # equivalent:
>>> # out = contract_decompose('ijab,jkab->ixab,xj', a, b)
>>> out = contract_decompose('ij...,jk...->ix...,xj', a, b) # intermediate modes being "ijab"
Note that the number of modes that are implicitly represented by the ellipses must be the same for all occurrences.
.. note::
It is encouraged for users to maintain the library handle themselves so as to reduce the context initialization time:
.. code-block:: python
from cuquantum import cutensornet as cutn
from cuquantum.cutensornet.experimental import contract_decompose
handle = cutn.create()
q, r = contract_decompose(..., options={"handle": handle}, ...)
# ... the same handle can be reused for further calls ...
# when it's done, remember to destroy the handle
cutn.destroy(handle)
"""
algorithm = utils.check_or_create_options(ContractDecomposeAlgorithm, algorithm, "Contract Decompose Algorithm")
options = utils.check_or_create_options(NetworkOptions, options, "Network Options")
logger = logging.getLogger() if options.logger is None else options.logger
logger.info(f"CUDA runtime version = {cutn.get_cudart_version()}")
logger.info(f"cuTensorNet version = {cutn.MAJOR_VER}.{cutn.MINOR_VER}.{cutn.PATCH_VER}")
logger.info("Beginning operands parsing...")
# Parse subscipts and operands
wrapped_operands, inputs, outputs, size_dict, mode_map_user_to_ord, mode_map_ord_to_user, max_mid_extent = decomposition_utils.parse_decomposition(subscripts, *operands)
if is_gate_split(inputs, outputs, algorithm):
# dedicated kernel for GateSplit problem
return _gate_split(wrapped_operands, inputs, outputs, size_dict, max_mid_extent, algorithm, options, stream, return_info)
try:
# contraction followed by decomposition
wrapped_operands, options, own_handle, operands_location = decomposition_utils.parse_decompose_operands_options(
options, wrapped_operands, allowed_dtype_names=decomposition_utils.DECOMPOSITION_DTYPE_NAMES)
intermediate_modes = einsum_parser.infer_output_mode_labels(outputs)
intermediate_labels = []
ellipses_processed = False
for _modes in intermediate_modes:
m = mode_map_ord_to_user[_modes]
if m.startswith('__'): # extra internal modes represented by ellipses
if not ellipses_processed:
m = '...'
ellipses_processed = True
else:
continue
intermediate_labels.append(m)
intermediate_labels = ''.join(intermediate_labels)
input_modes, output_modes = subscripts.split('->')
einsum_subscripts = f"{input_modes}->{intermediate_labels}"
decompose_subscripts = f"{intermediate_labels}->{output_modes}"
if operands_location == 'cpu':
# avoid double transfer
operands = [o.tensor for o in wrapped_operands]
info_dict = {'svd_method': algorithm.svd_method,
'qr_method': algorithm.qr_method}
logger.info("Beginning contraction of the input tensor network...")
intm_output = contract(einsum_subscripts, *operands, options=options, optimize=optimize, stream=stream, return_info=return_info)
logger.info("Contraction of the input tensor network is completed.")
if return_info:
intm_output, (_, info_dict['optimizer_info']) = intm_output # Discard the path as it's part of optimizer_info.
#NOTE: The direct integration here is based on splitting the contract_decompose problem into two sub-problems
# - 1. contraction.
# - 2. decomposition.
# If the algorithm is naively applied, one may not find the optimal reduce extent, for example:
# A[x,y] B[y,z] with input extent x=4, y=2, z=4 -> contract QR decompose -> A[x,k]B[k,z] .
# When naively applying the direct algorithm above, the mid extent k in the output will be 4 (QR on a 4x4 matrix).
# For contract QR decomposition, we manually slice the extents in the outputs.
# For contract SVD decomposition, we inject max_extent as part of the internal SVDMethod.
logger.info("Beginning decomposition of the intermediate tensor...")
if algorithm.qr_method and algorithm.svd_method is False:
# contract and QR decompose
results = decompose(decompose_subscripts, intm_output, method=algorithm.qr_method, options=dataclasses.asdict(options), stream=stream, return_info=False)
results = maybe_truncate_qr_output_operands(results, outputs, max_mid_extent)
if operands_location == 'cpu':
results = [tensor_wrapper.wrap_operand(o).to('cpu') for o in results]
elif algorithm.svd_method and algorithm.qr_method is False:
# contract and SVD decompose
use_max_mid_extent = algorithm.svd_method.max_extent is None
if use_max_mid_extent:
algorithm.svd_method.max_extent = max_mid_extent
results = decompose(decompose_subscripts, intm_output, method=algorithm.svd_method, options=dataclasses.asdict(options), stream=stream, return_info=return_info)
if use_max_mid_extent:
# revert back
algorithm.svd_method.max_extent = None
if return_info:
results, info_dict['svd_info'] = results[:-1], results[-1]
if operands_location == 'cpu':
results = [o if o is None else tensor_wrapper.wrap_operand(o).to('cpu') for o in results]
else:
raise NotImplementedError("contract_decompose currently doesn't support QR assisted SVD contract decomposition for more than 3 operands")
logger.info("Decomposition of the intermediate tensor is completed.")
finally:
if own_handle and options.handle is not None:
cutn.destroy(options.handle)
if not return_info:
return results
else:
return *results, ContractDecomposeInfo(**info_dict)
| cuQuantum-main | python/cuquantum/cutensornet/experimental/tensor_network.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .configuration import *
from .tensor_network import * | cuQuantum-main | python/cuquantum/cutensornet/experimental/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
| cuQuantum-main | python/cuquantum/cutensornet/experimental/_internal/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from cuquantum.cutensornet._internal.einsum_parser import infer_output_mode_labels
def is_gate_split(inputs, outputs, algo):
"""
Check if the input and output modes refers to a GateSplit problem.
Args:
inputs: Einsum expression in "neutral format" (sequence of sequences) after mapping.
outputs: Einsum expression in "neutral format" (sequence of sequences) after mapping.
algo: The algorithm specified for the contract and decompose operations
"""
if algo.svd_method is False: # contract QR decompose
return False
if infer_output_mode_labels(inputs) != infer_output_mode_labels(outputs):
return False
if len(inputs) == 3:
# This requires:
# 1. input A/B/G fully connected
# 2. The number of opens/uncontracted modes of G parititioned onto output A and B must be non-zero
a_in, b_in, g, a_out, b_out = map(set, inputs+outputs)
ab_in = a_in & b_in
ag_in = a_in & g
bg_in = b_in & g
g_open = g - ag_in - bg_in
ag_out = g_open & a_out
bg_out = g_open & b_out
return all([ab_in, ag_in, bg_in, ag_out, bg_out])
return False
def maybe_truncate_qr_output_operands(operands, modes, mid_extent):
"""
Given the output operands and modes for QR decomposition, possibly truncate the mid extent of the operands to match the specified mid extent.
"""
shared_mode = (set(modes[0]) & set(modes[1])).pop()
truncated_operands = []
for o, labels in zip(operands, modes):
idx = labels.index(shared_mode)
if o.shape[idx] == mid_extent:
return operands
slices = [slice(None)] * o.ndim
slices[idx] = slice(0, mid_extent)
truncated_operands.append(o[tuple(slices)])
return truncated_operands
| cuQuantum-main | python/cuquantum/cutensornet/experimental/_internal/utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from cuquantum.custatevec.custatevec import *
| cuQuantum-main | python/cuquantum/custatevec/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# This module implements basic PEP 517 backend support, see e.g.
# - https://peps.python.org/pep-0517/
# - https://setuptools.pypa.io/en/latest/build_meta.html#dynamic-build-dependencies-and-other-build-meta-tweaks
# Specifically, there are 5 APIs required to create a proper build backend, see below.
# For now it's mostly a pass-through to setuptools, except that we need to determine
# some dependencies at build time.
#
# Note that we purposely do not implement the PEP-660 API hooks so that "pip install ...
# --no-build-isolation -e ." behaves as expected (in-place build/installation without
# creating a wheel). This may require pip>21.3.0.
from packaging.version import Version
from setuptools import build_meta as _build_meta
import utils # this is builder.utils (the build system has sys.path set up)
prepare_metadata_for_build_wheel = _build_meta.prepare_metadata_for_build_wheel
build_wheel = _build_meta.build_wheel
build_sdist = _build_meta.build_sdist
# Note: this function returns a list of *build-time* dependencies, so it's not affected
# by "--no-deps" based on the PEP-517 design.
def get_requires_for_build_wheel(config_settings=None):
# set up version constraints: note that CalVer like 22.03 is normalized to
# 22.3 by setuptools, so we must follow the same practice in the constraints;
# also, we don't need the patch number here
cuqnt_require = [f'custatevec-cu{utils.cuda_major_ver}~=1.4', # ">=1.4.0,<2"
f'cutensornet-cu{utils.cuda_major_ver}~=2.2', # ">=2.2.0,<3"
]
return _build_meta.get_requires_for_build_wheel(config_settings) + cuqnt_require
# Note: We have never promised to support sdist (CUQNT-514). We really cannot
# care less about the correctness here. If we are lucky, setuptools would do
# the right thing for us, but even if it's wrong let's not worry about it.
def get_requires_for_build_sdist(config_settings=None):
return _build_meta.get_requires_for_build_sdist(config_settings)
| cuQuantum-main | python/builder/pep517.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# How does the build system for cuquantum-python work?
#
# - When building a wheel ("pip wheel", "pip install .", or "python setup.py
# bdist_wheel" (discouraged!)), we want to build against the cutensor &
# cuquantum wheels that would be installed to site-packages, so we need
# two things:
# 1. make them the *build-time* dependencies
# 2. set up linker flags to modify rpaths
#
# - For 1. we opt in to use PEP-517, as setup_requires is known to not work
# automatically for users. This is the "price" we pay (by design of
# PEP-517), as it creates a new, "isolated" environment (referred to as
# build isolation) to which all build-time dependencies that live on PyPI
# are installed. Another "price" (also by design) is in the non-editable
# mode (without the "-e" flag) it always builds a wheel for installation.
#
# - For 2. the solution is to create our own bdist_wheel (called first) and
# build_ext (called later) commands. The former would inform the latter
# whether we are building a wheel.
#
# - There is an escape hatch for 1. which is to set "--no-build-isolation".
# Then, users are expected to set CUQUANTUM_ROOT (or CUSTATEVEC_ROOT &
# CUTENSORNET_ROOT) and manage all build-time dependencies themselves.
# This, together with "-e", would not produce any wheel, which is the old
# behavior offered by the environment variable CUQUANTUM_IGNORE_SOLVER=1
# that we removed and no longer works.
#
# - In any case, the custom build_ext command is in use, which would compute
# the needed compiler flags (depending on it's building a wheel or not)
# and overwrite the incoming Extension instances.
#
# - In any case, the dependencies (on PyPI wheels) are set up by default,
# and "--no-deps" can be passed as usual to tell pip to ignore the
# *run-time* dependencies.
| cuQuantum-main | python/builder/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import re
import site
import sys
from packaging.version import Version
from setuptools.command.build_ext import build_ext as _build_ext
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
# Get __version__ variable
source_root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(source_root, '..', 'cuquantum', '_version.py')) as f:
exec(f.read())
cuqnt_py_ver = __version__
cuqnt_py_ver_obj = Version(cuqnt_py_ver)
cuqnt_ver_major_minor = f"{cuqnt_py_ver_obj.major}.{cuqnt_py_ver_obj.minor}"
del __version__, cuqnt_py_ver_obj, source_root
# We can't assume users to have CTK installed via pip, so we really need this...
# TODO(leofang): try /usr/local/cuda?
try:
cuda_path = os.environ['CUDA_PATH']
except KeyError as e:
raise RuntimeError('CUDA is not found, please set $CUDA_PATH') from e
def check_cuda_version():
try:
# We cannot do a dlopen and call cudaRuntimeGetVersion, because it
# requires GPUs. We also do not want to rely on the compiler utility
# provided in distutils (deprecated) or setuptools, as this is a very
# simple string parsing task.
# TODO: switch to cudaRuntimeGetVersion once it's fixed (nvbugs 3624208)
cuda_h = os.path.join(cuda_path, 'include', 'cuda.h')
with open(cuda_h, 'r') as f:
cuda_h = f.read()
m = re.search('#define CUDA_VERSION ([0-9]*)', cuda_h)
if m:
ver = int(m.group(1))
else:
raise RuntimeError("cannot parse CUDA_VERSION")
except:
raise
else:
# 11020 -> "11.2"
return str(ver // 1000) + '.' + str((ver % 100) // 10)
# We support CUDA 11/12 starting 23.03
cuda_ver = check_cuda_version()
if cuda_ver == '11.0':
cuda_major_ver = '11'
elif '11.0' < cuda_ver < '12.0':
cuda_major_ver = '11'
elif '12.0' <= cuda_ver < '13.0':
cuda_major_ver = '12'
else:
raise RuntimeError(f"Unsupported CUDA version: {cuda_ver}")
building_wheel = False
class bdist_wheel(_bdist_wheel):
def run(self):
global building_wheel
building_wheel = True
super().run()
class build_ext(_build_ext):
def _set_library_roots(self):
custatevec_root = cutensornet_root = cutensor_root = None
# Note that we need sys.path because of build isolation (since PEP 517)
py_paths = sys.path + [site.getusersitepackages()] + site.getsitepackages()
# search order:
# 1. installed "cuquantum" package
# 2. env var
for path in py_paths:
path = os.path.join(path, 'cuquantum')
if os.path.isdir(os.path.join(path, 'include')):
custatevec_root = cutensornet_root = path
break
else:
# We allow setting CUSTATEVEC_ROOT and CUTENSORNET_ROOT separately for the ease
# of development, but users are encouraged to either install cuquantum from PyPI
# or conda, or set CUQUANTUM_ROOT to the existing installation.
cuquantum_root = os.environ.get('CUQUANTUM_ROOT')
try:
custatevec_root = os.environ['CUSTATEVEC_ROOT']
except KeyError as e:
if cuquantum_root is None:
raise RuntimeError('cuStateVec is not found, please set $CUQUANTUM_ROOT '
'or $CUSTATEVEC_ROOT') from e
else:
custatevec_root = cuquantum_root
try:
cutensornet_root = os.environ['CUTENSORNET_ROOT']
except KeyError as e:
if cuquantum_root is None:
raise RuntimeError('cuTensorNet is not found, please set $CUQUANTUM_ROOT '
'or $CUTENSORNET_ROOT') from e
else:
cutensornet_root = cuquantum_root
return custatevec_root, cutensornet_root
def _prep_includes_libs_rpaths(self):
"""
Set global vars cusv_incl_dir, cutn_incl_dir, cusv_lib_dir, cutn_lib_dir,
cusv_lib, cutn_lib, and extra_linker_flags.
"""
custatevec_root, cutensornet_root = self._set_library_roots()
global cusv_incl_dir, cutn_incl_dir
cusv_incl_dir = [os.path.join(cuda_path, 'include'),
os.path.join(custatevec_root, 'include')]
cutn_incl_dir = [os.path.join(cuda_path, 'include'),
os.path.join(cutensornet_root, 'include')]
global cusv_lib_dir, cutn_lib_dir
# we include both lib64 and lib to accommodate all possible sources
cusv_lib_dir = [os.path.join(custatevec_root, 'lib'),
os.path.join(custatevec_root, 'lib64')]
cutn_lib_dir = [os.path.join(cutensornet_root, 'lib'),
os.path.join(cutensornet_root, 'lib64')]
global cusv_lib, cutn_lib, extra_linker_flags
if not building_wheel:
# Note: with PEP-517 the editable mode would not build a wheel for installation
# (and we purposely do not support PEP-660).
cusv_lib = ['custatevec']
cutn_lib = ['cutensornet']
extra_linker_flags = []
else:
# Note: soname = library major version
# We don't need to link to cuBLAS/cuSOLVER/cuTensor at build time
cusv_lib = [':libcustatevec.so.1']
cutn_lib = [':libcutensornet.so.2']
# The rpaths must be adjusted given the following full-wheel installation:
# - cuquantum-python: site-packages/cuquantum/{custatevec, cutensornet}/ [=$ORIGIN]
# - cusv & cutn: site-packages/cuquantum/lib/
# - cutensor: site-packages/cutensor/lib/
# - cublas: site-packages/nvidia/cublas/lib/
# - cusolver: site-packages/nvidia/cusolver/lib/
# (Note that starting v22.11 we use the new wheel format, so all lib wheels have suffix -cuXX,
# and cuBLAS/cuSOLVER additionally have prefix nvidia-.)
ldflag = "-Wl,--disable-new-dtags,"
ldflag += "-rpath,$ORIGIN/../lib,"
ldflag += "-rpath,$ORIGIN/../../cutensor/lib,"
ldflag += "-rpath,$ORIGIN/../../nvidia/cublas/lib,"
ldflag += "-rpath,$ORIGIN/../../nvidia/cusolver/lib"
extra_linker_flags = [ldflag]
print("\n"+"*"*80)
print("CUDA version:", cuda_ver)
print("CUDA path:", cuda_path)
print("cuStateVec path:", custatevec_root)
print("cuTensorNet path:", cutensornet_root)
print("*"*80+"\n")
def build_extension(self, ext):
if ext.name.endswith("custatevec"):
ext.include_dirs = cusv_incl_dir
ext.library_dirs = cusv_lib_dir
ext.libraries = cusv_lib
ext.extra_link_args = extra_linker_flags
elif ext.name.endswith("cutensornet"):
ext.include_dirs = cutn_incl_dir
ext.library_dirs = cutn_lib_dir
ext.libraries = cutn_lib
ext.extra_link_args = extra_linker_flags
super().build_extension(ext)
def build_extensions(self):
self._prep_includes_libs_rpaths()
self.parallel = 4 # use 4 threads
super().build_extensions()
| cuQuantum-main | python/builder/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from importlib.metadata import distribution
import os.path
def find_package_location(package_name):
path = None
try:
path = _find_package_location_by_license(package_name)
except:
pass
if path is None:
path = _find_package_location_by_root(package_name)
return path
def _find_package_location_by_root(package_name):
"""This should not fail, unless the package is not installed."""
dist = distribution(package_name)
roots = set()
for f in dist.files:
dirname = os.path.dirname(str(f.locate()))
if not dirname.endswith("dist-info") and not dirname.endswith("__pycache__"):
roots.add(dirname)
path = os.path.commonprefix(tuple(roots))
return path
def _find_package_location_by_license(package_name):
"""This function assumes a file named LICENSE is placed at the package root."""
dist = distribution(package_name)
for f in dist.files:
if str(f).endswith("LICENSE"):
license = f
break
else:
raise RuntimeError(f"cannot locate the directory for {package_name}")
path = os.path.dirname(license.locate())
return path
def get_library_path(library, cuda_major=11):
if library in ("cuda-runtime", "cublas", "cusolver", "cusparse"):
package_name = f"nvidia-{library}-cu{cuda_major}"
subdir = library.replace("-", "_")
elif library in ("cutensor", "custatevec", "cutensornet"):
package_name = f"{library}-cu{cuda_major}"
subdir = ""
else:
raise NotImplementedError(f"library {library} is not recognized")
dirname = os.path.join(find_package_location(package_name), subdir)
assert os.path.isdir(dirname)
return dirname
def get_include_path(library, cuda_major=11):
dirname = os.path.join(get_library_path(library, cuda_major), "include")
assert os.path.isdir(dirname)
return dirname
def get_link_path(library, cuda_major=11):
dirname = os.path.join(get_library_path(library, cuda_major), "lib")
assert os.path.isdir(dirname)
return dirname
if __name__ == "__main__":
#package_name = "cutensor-cu11"
#package_name = "custatevec-cu11"
#package_name = "cutensornet-cu11"
#package_name = "nvidia-cublas-cu11"
#package_name = "nvidia-cuda-runtime-cu11"
#package_name = "nvidia"
#print(find_package_location(package_name))
print(get_include_path("cutensor"))
print(get_include_path("cublas"))
print(get_include_path("cuda-runtime"))
print(get_include_path("cusolver"))
print(get_include_path("custatevec"))
print(get_include_path("cutensornet"))
print(get_link_path("cutensor"))
print(get_link_path("cublas"))
print(get_link_path("cuda-runtime"))
print(get_link_path("cusolver"))
print(get_link_path("custatevec"))
print(get_link_path("cutensornet"))
| cuQuantum-main | extra/demo_build_with_wheels/search_package_path.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import glob
import importlib
import os
import shutil
import site
import subprocess
import sys
from setuptools import find_packages, setup
source_root = os.path.abspath(os.path.dirname(__file__))
# Use README for the project long description
with open(os.path.join(source_root, "README.md")) as f:
long_description = f.read()
# Get project version
with open(os.path.join(source_root, "cuquantum_benchmarks", "__init__.py")) as f:
exec(f.read())
version = __version__
del __version__
# A user could have cuquantum-python-cuXX installed but not cuquantum-python,
# so before asking pip to install it we need to confirm
install_requires = [
"psutil",
"scipy",
"networkx",
"nvtx",
]
if importlib.util.find_spec('cuquantum') is None:
install_requires.append("cuquantum-python>=23.3")
setup(
name="cuquantum-benchmarks",
version=version,
description="NVIDIA cuQuantum Performance Benchmark Suite",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NVIDIA/cuQuantum",
author="NVIDIA Corporation",
author_email="[email protected]",
license="BSD-3-Clause",
license_files = ('LICENSE',),
keywords=["cuda", "nvidia", "state vector", "tensor network", "high-performance computing", "quantum computing",
"quantum circuit simulation"],
packages=find_packages(include=['cuquantum_benchmarks', 'cuquantum_benchmarks.*']),
package_data={"": ["*.py"],},
entry_points = {
'console_scripts': [
'cuquantum-benchmarks = cuquantum_benchmarks.run:run',
]
},
zip_safe=False,
setup_requires=[
"setuptools",
],
install_requires=install_requires,
extras_require={
"all": ["cirq", "qsimcirq", "qiskit", "pennylane", "pennylane-lightning", "pennylane-lightning[gpu]"],
},
classifiers=[
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: BSD License",
"Environment :: GPU :: NVIDIA CUDA",
"Programming Language :: Python :: 3 :: Only",
],
)
| cuQuantum-main | benchmarks/setup.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import pytest
try:
import xdist
except ImportError:
@pytest.fixture(scope="session")
def worker_id(request):
return "master"
else:
del pytest, xdist
| cuQuantum-main | benchmarks/tests/conftest.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import contextlib
import glob
import os
import shutil
import sys
import subprocess
import pytest
from cuquantum_benchmarks.config import benchmarks
@pytest.fixture()
def visible_device(worker_id):
""" Assign 1 device for each test workers to enable test parallelization.
- If pytest-dist is not installed or unused (pytest -n ... is not set), just
pass through CUDA_VISIBLE_DEVICES as is.
- Otherwise, we assign 1 device for each worker. If there are more workers
than devices, we round-robin.
- In this case, CUDA_VISIBLE_DEVICES should be explicitly set, otherwise
we just take device 0.
"""
visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "0")
if worker_id == "master":
return visible_devices
visible_devices = [int(i) for i in visible_devices.split(",")]
total_devices = len(visible_devices)
total_workers = int(os.environ["PYTEST_XDIST_WORKER_COUNT"])
worker_id = int(worker_id.lstrip("gw"))
if total_devices >= total_workers:
return visible_devices[worker_id]
else:
# round robin + oversubscription
return visible_devices[worker_id % total_devices]
@pytest.mark.parametrize(
"combo", (
# (frontend, backend, support_mpi)
("cirq", "cirq", False),
("cirq", "qsim", False),
("cirq", "qsim-cuda", False),
("cirq", "qsim-cusv", False),
("cirq", "qsim-mgpu", False),
("cirq", "cutn", True),
("qiskit", "aer", True),
("qiskit", "aer-cuda", True),
("qiskit", "aer-cusv", True),
("qiskit", "cusvaer", True),
("qiskit", "cutn", True),
("naive", "naive", False),
("pennylane", "pennylane", False),
("pennylane", "pennylane-lightning-gpu", False),
("pennylane", "pennylane-lightning-qubit", False),
("pennylane", "pennylane-lightning-kokkos", False),
("qulacs", "qulacs-cpu", False),
("qulacs", "qulacs-gpu", False),
)
)
@pytest.mark.parametrize(
"nqubits", (4,)
)
@pytest.mark.parametrize(
"benchmark", tuple(benchmarks.keys())
)
@pytest.mark.parametrize(
"precision", ("single", "double")
)
class TestCmdCircuit:
# TODO: perhaps this function should live in the _utils module...?
def _skip_if_unavailable(self, combo, nqubits, benchmark, precision):
frontend, backend, support_mpi = combo
# check frontend exists
if frontend == "cirq":
try:
import cirq
except ImportError:
pytest.skip("cirq not available")
elif frontend == "qiskit":
try:
import qiskit
except ImportError:
pytest.skip("qiskit not available")
elif frontend == "naive":
from cuquantum_benchmarks.frontends import frontends
if "naive" not in frontends:
pytest.skip("naive not available")
elif frontend == "pennylane":
try:
import pennylane
except ImportError:
pytest.skip("pennylane not available")
elif frontend == "qulacs":
try:
import qulacs
except ImportError:
pytest.skip("qulacs not available")
# check backend exists
if backend == "aer-cuda":
skip = False
try:
from qiskit.providers.aer import AerSimulator
except ImportError:
skip = True
else:
# there is no other way :(
s = AerSimulator()
if 'GPU' not in s.available_devices():
skip = True
if skip:
pytest.skip("aer-cuda not available")
elif backend in ("aer-cusv", "cusvaer"):
# no way to tell if the Aer-cuStateVec integration is built, so only
# check if we're inside the container, we're being conservative here...
try:
import cusvaer
except ImportError:
pytest.skip(f"{backend} not available")
elif backend == "aer":
try:
from qiskit.providers.aer import AerSimulator
except ImportError:
pytest.skip("aer not available")
elif backend == "qsim-cuda":
from qsimcirq import qsim_gpu
if qsim_gpu is None:
pytest.skip("qsim-cuda not available")
elif backend == "qsim-cusv":
from qsimcirq import qsim_custatevec
if qsim_custatevec is None:
pytest.skip("qsim-cusv not available")
elif backend == "qsim-mgpu":
try:
from qsimcirq import qsim_mgpu
except ImportError:
pytest.skip("qsim-mgpu not available")
elif backend == "pennylane":
try:
import pennylane
except ImportError:
pytest.skip("pennylane not available")
elif backend == "pennylane-lightning-gpu":
try:
from pennylane_lightning_gpu import LightningGPU
except ImportError:
pytest.skip("pennylane-lightning-gpu not available")
elif backend == "pennylane-lightning-qubit":
try:
from pennylane_lightning.lightning_qubit import LightningQubit
except ImportError:
pytest.skip("pennylane-lightning-qubit not available")
elif backend == "pennylane-lightning-kokkos":
try:
import pennylane_lightning_kokkos
except ImportError:
pytest.skip("pennylane-lightning-kokkos not available")
elif backend == "qulacs-cpu":
try:
import qulacs
except ImportError:
pytest.skip(f"{backend} not available")
elif backend == "qulacs-gpu":
try:
import qulacs.QuantumStateGpu
except ImportError:
pytest.skip(f"{backend} not available")
# check MPI exists
if support_mpi:
if shutil.which('mpiexec') is None:
pytest.skip('MPI not available')
if backend == 'cutn' and os.environ.get('CUTENSORNET_COMM_LIB') is None:
pytest.skip('CUTENSORNET_COMM_LIB is not set')
if ((backend == 'cirq' or backend.startswith('qsim'))
and precision == 'double'):
return pytest.raises(subprocess.CalledProcessError), True
if backend.startswith('qulacs') and precision == 'single':
return pytest.raises(subprocess.CalledProcessError), True
return contextlib.nullcontext(), False
def test_benchmark(self, combo, nqubits, benchmark, precision, tmp_path, visible_device):
frontend, backend, support_mpi = combo
ctx, ret = self._skip_if_unavailable(combo, nqubits, benchmark, precision)
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(visible_device)
# internal loop: run the same test twice, without and with MPI
if support_mpi:
# TODO: this may not be robust against conda-forge Open MPI, need to turn
# on MCA parameters via env var
tests = ([], ['mpiexec', '-n', '1'])
else:
tests = ([], )
# use default value from config.py for --ngpus
cmd = [sys.executable, '-m', 'cuquantum_benchmarks', 'circuit',
'--frontend', frontend,
'--backend', backend,
'--ncputhreads', '1',
'--nqubits', str(nqubits),
'--benchmark', benchmark,
'--precision', precision,
'--cachedir', str(tmp_path),
# speed up the tests...
'--nwarmups', '1',
'--nrepeats', '1',
'--verbose']
if backend == 'cusvaer':
cmd += ['--cusvaer-global-index-bits', '--cusvaer-p2p-device-bits']
if backend == 'cutn':
cmd += ['--nhypersamples', '2']
for cmd_prefix in tests:
result = subprocess.run(cmd_prefix+cmd, env=env, capture_output=True)
with ctx:
try:
assert bool(result.check_returncode()) == ret
cached_circuits = [f for f in glob.glob(str(tmp_path / f"circuits/{benchmark}_{nqubits}*.pickle")) if os.path.isfile(f)]
assert len(cached_circuits) == 1
cached_json = [f for f in glob.glob(str(tmp_path / f"data/{benchmark}.json")) if os.path.isfile(f)]
assert len(cached_json) == 1 # TODO: test aggregate behavior too?
except:
# make debugging easier
print("stdout:\n", result.stdout.decode())
print("stderr:\n", result.stderr.decode())
raise
finally:
print("cmd:\n", ' '.join(cmd_prefix+cmd))
# TODO: test invalid cases and ensure we raise errors
class TestCmdApi:
@pytest.mark.parametrize(
"args", (
["--nqubits", "4", "--ntargets", "2"],
["--nqubits", "4", "--targets", "2,3"],
["--nqubits", "6", "--ntargets", "3", "--controls", "3"],
["--nqubits", "4", "--targets", "2,3", "--ncontrols", "1"],
["--nqubits", "4", "--targets", "2,3", "--controls", "1"],
)
)
@pytest.mark.parametrize(
"matrix_prop", (
[], # default
["--layout", "column", "--adjoint"],
)
)
@pytest.mark.parametrize(
"precision", ("single", "double")
)
@pytest.mark.parametrize(
"flush", (True, False)
)
def test_apply_matrix(self, args, matrix_prop, precision, flush, tmp_path, visible_device):
benchmark = 'apply_matrix'
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(visible_device)
cmd = [sys.executable, '-m', 'cuquantum_benchmarks', 'api',
'--benchmark', benchmark,
'--precision', precision,
'--cachedir', str(tmp_path),
# speed up the tests...
'--nwarmups', '1',
'--nrepeats', '1',
'--verbose']
cmd += args
cmd += matrix_prop
if flush:
cmd += ['--flush-cache']
result = subprocess.run(cmd, env=env, capture_output=True)
try:
assert bool(result.check_returncode()) == False
cached_json = [f for f in glob.glob(str(tmp_path / f"data/{benchmark}.json")) if os.path.isfile(f)]
assert len(cached_json) == 1 # TODO: test aggregate behavior too?
except:
# make debugging easier
print("stdout:\n", result.stdout.decode())
print("stderr:\n", result.stderr.decode())
raise
finally:
print("cmd:\n", ' '.join(cmd))
@pytest.mark.parametrize(
"args", (
("--nqubits", "4", "--ntargets", "2",),
("--nqubits", "4", "--targets", "2,3",),
("--nqubits", "6", "--ntargets", "2", "--controls", "3",),
("--nqubits", "4", "--targets", "1,2", "--ncontrols", "1",),
("--nqubits", "4", "--targets", "2,3", "--controls", "1",),
)
)
@pytest.mark.parametrize(
"diag", (
(),
("--has-diag", "--location-diag", "device",),
("--has-diag", "--precision-diag", "double", "--precision", "double",),
)
)
@pytest.mark.parametrize(
"perm", (
("--has-perm",),
("--has-perm", "--location-perm", "device",),
("--perm-table", "2,3,0,1",), # this test assumes ntargets=2 always
)
)
@pytest.mark.parametrize(
"matrix_prop", (
(), # default
("--adjoint",),
)
)
def test_apply_generalized_permutation_matrix(
self, args, diag, perm, matrix_prop, tmp_path, visible_device):
benchmark = 'apply_generalized_permutation_matrix'
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(visible_device)
cmd = [sys.executable, '-m', 'cuquantum_benchmarks', 'api',
'--benchmark', benchmark,
'--cachedir', str(tmp_path),
# speed up the tests...
'--nwarmups', '1',
'--nrepeats', '1',
'--verbose']
cmd += args
cmd += diag
cmd += perm
cmd += matrix_prop
result = subprocess.run(cmd, env=env, capture_output=True)
try:
assert bool(result.check_returncode()) == False
cached_json = [f for f in glob.glob(str(tmp_path / f"data/{benchmark}.json")) if os.path.isfile(f)]
assert len(cached_json) == 1 # TODO: test aggregate behavior too?
except:
# make debugging easier
print("stdout:\n", result.stdout.decode())
print("stderr:\n", result.stderr.decode())
raise
finally:
print("cmd:\n", ' '.join(cmd))
@pytest.mark.parametrize(
"args", (
("--nqubits", "4", "--nbit-ordering", "2", "--nshots", "256"),
("--nqubits", "4", "--bit-ordering", "2,3", "--output-order", "random"),
)
)
@pytest.mark.parametrize(
"precision", ("single", "double")
)
def test_cusv_sampler(self, args, precision, tmp_path, visible_device):
benchmark = 'cusv_sampler'
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(visible_device)
cmd = [sys.executable, '-m', 'cuquantum_benchmarks', 'api',
'--benchmark', benchmark,
'--precision', precision,
'--cachedir', str(tmp_path),
# speed up the tests...
'--nwarmups', '1',
'--nrepeats', '1',
'--verbose']
cmd += args
result = subprocess.run(cmd, env=env, capture_output=True)
try:
assert bool(result.check_returncode()) == False
cached_json = [f for f in glob.glob(str(tmp_path / f"data/{benchmark}.json")) if os.path.isfile(f)]
assert len(cached_json) == 1 # TODO: test aggregate behavior too?
except:
# make debugging easier
print("stdout:\n", result.stdout.decode())
print("stderr:\n", result.stderr.decode())
raise
finally:
print("cmd:\n", ' '.join(cmd))
@pytest.mark.parametrize(
"args", (
["--expr", "abc->abx,xc", "--shape", "4,8,4"],
["--expr", "abcd->ax,bcdx", "--shape", "4,8,4,2"],
)
)
@pytest.mark.parametrize(
"method", (
("--method", "QR",),
("--method", "SVD",),
("--algorithm", "gesvd"),
("--algorithm", "gesvdj"),
("--algorithm", "gesvdr"),
("--algorithm", "gesvdp"),
)
)
@pytest.mark.parametrize(
"precision", ("single", "double")
)
@pytest.mark.parametrize(
"is_complex", (True, False)
)
def test_tensor_decompose(self, args, method, precision, is_complex, tmp_path, visible_device):
benchmark = 'tensor_decompose'
env = os.environ.copy()
env["CUDA_VISIBLE_DEVICES"] = str(visible_device)
cmd = [sys.executable, '-m', 'cuquantum_benchmarks', 'api',
'--benchmark', benchmark,
'--precision', precision,
'--cachedir', str(tmp_path),
# speed up the tests...
'--nwarmups', '1',
'--nrepeats', '1',
'--verbose']
cmd += args
cmd += method
if is_complex:
cmd.append('--is-complex')
result = subprocess.run(cmd, env=env, capture_output=True)
try:
assert bool(result.check_returncode()) == False
cached_json = [f for f in glob.glob(str(tmp_path / f"data/{benchmark}.json")) if os.path.isfile(f)]
assert len(cached_json) == 1 # TODO: test aggregate behavior too?
except:
# make debugging easier
print("stdout:\n", result.stdout.decode())
print("stderr:\n", result.stderr.decode())
raise
finally:
print("cmd:\n", ' '.join(cmd))
| cuQuantum-main | benchmarks/tests/cuquantum_benchmarks_tests/test_run.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import logging
import sys
import multiprocessing
from .backends import backends
from .config import benchmarks
from .config import backends as backend_config
from .frontends import frontends
from .run_interface import BenchApiRunner, BenchCircuitRunner
from ._utils import (EarlyReturnError, MPHandler, RawTextAndDefaultArgFormatter,
str_to_seq,)
frontend_names = [f for f in frontends.keys()]
backend_names = [b for b in backends.keys()]
benchmark_names = [b for b in benchmarks.keys()]
main_description = api_description = circuit_description = r"""
=============== NVIDIA cuQuantum Performance Benchmark Suite ===============
"""
circuit_description += r"""
Note: all frontends and backends are optional and unavailable for use unless installed.
Supported Frontends:
- cirq
- qiskit
- pennylane
- qulacs
Supported Backends:
- aer: runs Qiskit Aer's CPU backend
- aer-cuda: runs the native Qiskit Aer GPU backend
- aer-cusv: runs Qiskit Aer's cuStateVec integration
- cusvaer: runs the *multi-GPU, multi-node* custom Qiskit Aer GPU backend, only
available in the cuQuantum Appliance container
- cirq: runs Cirq's native CPU backend (cirq.Simulator)
- cutn: runs cuTensorNet by constructing the tensor network corresponding to the
benchmark circuit (through cuquantum.CircuitToEinsum)
- qsim: runs qsim's CPU backend
- qsim-cuda: runs the native qsim GPU backend
- qsim-cusv: runs qsim's cuStateVec integration
- qsim-mgpu: runs the *multi-GPU* (single-node) custom qsim GPU backend, only
available in the cuQuantum Appliance container
- pennylane: runs PennyLane's native CPU backend
- pennylane-lightning-gpu: runs the PennyLane-Lightning GPU backend
- pennylane-lightning-qubit: runs the PennyLane-Lightning CPU backend
- pennylane-lightning-kokkos: runs the PennyLane-Lightning Kokkos backend
- qulacs-gpu: runs the Qulacs GPU backend
- qulacs-cpu: runs the Qulacs CPU backend
============================================================================
"""
# main parser
parser = argparse.ArgumentParser(
description=main_description,
formatter_class=RawTextAndDefaultArgFormatter)
subparsers = parser.add_subparsers(dest="cmd", required=True)
# "cuquantum-benchmarks circuit" subcommand
parser_circuit = subparsers.add_parser(
'circuit',
description=circuit_description,
help="benchmark different classes of quantum circuits",
formatter_class=RawTextAndDefaultArgFormatter)
parser_circuit.add_argument('--benchmark', type=str, default='all', choices=benchmark_names+['all'],
help=f'pick the circuit to benchmark')
parser_circuit.add_argument('--frontend', type=str, required=True, choices=frontend_names,
help=f'set the simulator frontend')
parser_circuit.add_argument('--backend', type=str, required=True, choices=backend_names,
help=f'set the simulator backend that is compatible with the frontend')
parser_circuit.add_argument('--new', help='create a new circuit rather than use existing circuit', action='store_true')
# these options make sense to both circuit & api benchmarks, for better UX we need to copy/paste
parser_circuit.add_argument('--cachedir', type=str, default='.', help='set the directory to cache generated data')
parser_circuit.add_argument('--nqubits', type=int, help='set the number of qubits for each benchmark (circuit/api)')
parser_circuit.add_argument('--nwarmups', type=int, default=3, help='set the number of warm-up runs for each benchmark')
parser_circuit.add_argument('--nrepeats', type=int, default=10, help='set the number of repetitive runs for each benchmark')
parser_circuit.add_argument('-v', '--verbose', help='output extra information during benchmarking', action='store_true')
backend = parser_circuit.add_argument_group(
'backend-specific options', 'each backend has its own default config, see cuquantum_benchmarks/config.py for detail')
backend.add_argument('--ngpus', type=int, help='set the number of GPUs to use')
backend.add_argument('--ncputhreads', type=int, help='set the number of CPU threads to use')
backend.add_argument('--nshots', type=int, help='set the number of shots for quantum state measurement')
backend.add_argument('--nfused', type=int, help='set the maximum number of fused qubits for gate matrix fusion')
backend.add_argument('--precision', type=str, choices=('single', 'double'),
help='set the floating-point precision')
backend_cusvaer = parser_circuit.add_argument_group('cusvaer-specific options')
backend_cusvaer.add_argument('--cusvaer-global-index-bits', type=str_to_seq, nargs='?', const='', default=-1,
help='set the global index bits to specify the inter-node network structure. Please refer to the '
'cusvaer backend documentation for further details. If not followed by any argument, '
'the default (empty sequence) is used; '
'otherwise, the argument should be a comma-separated string. '
'Setting this option is mandatory for the cusvaer backend and an error otherwise')
backend_cusvaer.add_argument('--cusvaer-p2p-device-bits', type=int, nargs='?', const=0, default=-1,
help='set the number of p2p device bits. Please refer to the cusvaer backend documentation '
'for further details. If not followed by any argument, the default (0) is used. '
'Setting this option is mandatory for the cusvaer backend and an error otherwise')
backend_cusvaer.add_argument('--cusvaer-data-transfer-buffer-bits', type=int, default=26,
help='set the size of the data transfer buffer in cusvaer. The size is '
'specified as a positive integer. The buffer sized used is (1 << [#bits]). '
'The default is 26 (64 MiB = 1 << 26)')
backend_cusvaer.add_argument('--cusvaer-comm-plugin-type', type=str, nargs='?', default='mpi_auto',
choices=['mpi_auto', 'mpi_openmpi', 'mpi_mpich', 'external', 'self'],
help='set the type of comm plugin used for multi-process simulation. '
'Required to set this option when one needs to use a custom comm plugin.')
backend_cusvaer.add_argument('--cusvaer-comm-plugin-soname', type=str, nargs='?', default='',
help='specify the name of a shared library used for inter-process communication. '
'Required to set this option when one needs to use a custom comm plugin')
backend_cutn = parser_circuit.add_argument_group('cutn-specific options')
backend_cutn.add_argument('--nhypersamples', type=int, default=32, help='set the number of hypersamples for the pathfinder to explore')
# "cuquantum-benchmarks api" subcommand
parser_api = subparsers.add_parser(
'api',
description=api_description,
help="benchmark different APIs from cuQuantum's libraries",
formatter_class=RawTextAndDefaultArgFormatter)
parser_api.add_argument('--benchmark', type=str, required=True,
choices=BenchApiRunner.supported_apis,
help=f'pick the API to benchmark. Specify a benchmark with -h/--help can see detailed help message.')
parser_api.add_argument('--precision', type=str, choices=('single', 'double'), default='single',
help='set the floating-point precision')
# these options make sense to both circuit & api benchmarks, for better UX we need to copy/paste
# TODO: set the arguments programmatically to avoid dups
parser_api.add_argument('--cachedir', type=str, default='.', help='set the directory to cache generated data')
parser_api.add_argument('--nwarmups', type=int, default=3, help='set the number of warm-up runs for each benchmark')
parser_api.add_argument('--nrepeats', type=int, default=10, help='set the number of repetitive runs for each benchmark')
parser_api.add_argument('-v', '--verbose', help='output extra information during benchmarking', action='store_true')
# add_api_benchmark_options() can only be called once throughout the process's lifetime
_is_api_benchmark_options_added = False
def add_api_benchmark_options(parser_api, args=None):
# benchmark-specific options
global _is_api_benchmark_options_added
if _is_api_benchmark_options_added: return
# hack: we want dynamic behavior but the parser can't do the job properly
target = None
if args is None:
what_to_parse = sys.argv # parsing from cmdline
else:
what_to_parse = args
try:
idx = what_to_parse.index('--benchmark')
target = what_to_parse[idx+1]
except (ValueError, IndexError):
return
assert target is not None
if target == 'apply_matrix':
apply_matrix = parser_api.add_argument_group('apply_matrix-specific options')
targets = apply_matrix.add_mutually_exclusive_group(required=True)
targets.add_argument('--targets', type=str_to_seq,
help="set the (comma-separated) target qubit IDs")
targets.add_argument('--ntargets', type=int, help='set the number of target qubits')
controls = apply_matrix.add_mutually_exclusive_group(required=False)
controls.add_argument('--controls', type=str_to_seq,
help="set the (comma-separated) control qubit IDs")
controls.add_argument('--ncontrols', type=int, help='set the number of target qubits')
apply_matrix.add_argument('--layout', type=str, choices=('row', 'column'), default='row',
help='set the gate matrix layout')
apply_matrix.add_argument('--adjoint', action='store_true', help='apply the matrix adjoint')
apply_matrix.add_argument('--location', type=str, choices=('device', 'host'), default='host',
help='set the location of the gate matrix')
apply_matrix.add_argument('--nqubits', type=int, required=True,
help='set the total number of qubits')
apply_matrix.add_argument('--flush-cache', action='store_true', help='flush the L2 cache for more accurate timing')
if target == 'apply_generalized_permutation_matrix':
apply_gen_perm_matrix = parser_api.add_argument_group('apply_generalized_permutation_matrix-specific options')
apply_gen_perm_matrix.add_argument('--nqubits', type=int, required=True,
help='set the total number of qubits')
targets = apply_gen_perm_matrix.add_mutually_exclusive_group(required=True)
targets.add_argument('--targets', type=str_to_seq,
help="set the (comma-separated) target qubit IDs")
targets.add_argument('--ntargets', type=int, help='set the number of target qubits')
controls = apply_gen_perm_matrix.add_mutually_exclusive_group(required=False)
controls.add_argument('--controls', type=str_to_seq,
help="set the (comma-separated) control qubit IDs")
controls.add_argument('--ncontrols', type=int, help='set the number of control qubits')
apply_gen_perm_matrix.add_argument('--adjoint', action='store_true',
help='apply the matrix adjoint')
apply_gen_perm_matrix.add_argument('--has-diag', action='store_true',
help='whether the diagonal matrix is nontrivial (not an identity)')
apply_gen_perm_matrix.add_argument('--location-diag', type=str, choices=('device', 'host'), default='host',
help='set the location of the diagonal matrix')
apply_gen_perm_matrix.add_argument('--precision-diag', type=str, choices=('single', 'double'), default='single',
help='set the floating-point precision of the diagonal matrix')
perm = apply_gen_perm_matrix.add_mutually_exclusive_group(required=False)
perm.add_argument('--has-perm', action='store_true',
help='whether the permutation matrix is nontrivial (not an identity)')
perm.add_argument('--perm-table', type=str_to_seq,
help='set the permutation table for constructing a permutation matrix')
apply_gen_perm_matrix.add_argument('--location-perm', type=str, choices=('device', 'host'), default='host',
help='set the location of the permutation matrix')
elif target == 'cusv_sampler':
sampler = parser_api.add_argument_group('cusv_sampler-specific options')
bitordering = sampler.add_mutually_exclusive_group(required=True)
bitordering.add_argument('--bit-ordering', type=str_to_seq,
help="set the (comma-separated) qubit IDs to sample")
bitordering.add_argument('--nbit-ordering', type=int,
help='set the number of qubits to sample')
sampler.add_argument('--nqubits', type=int, required=True,
help='set the total number of qubits')
sampler.add_argument('--nshots', type=int, default=1024,
help="set the number of shots")
sampler.add_argument('--output-order', choices=('random', 'ascending'), default='ascending',
help='set the order of bit strings in sampled outputs')
elif target == 'tensor_decompose':
tensor_decompose = parser_api.add_argument_group('tensor_decompose-specific options')
method = tensor_decompose.add_mutually_exclusive_group(required=True)
method.add_argument('--method', type=str, choices=('QR', 'SVD'),
help='the method for tensor decomposition; when SVD is chosen, gesvd will be used')
method.add_argument('--algorithm', type=str, choices=('gesvd', 'gesvdj', 'gesvdr', 'gesvdp'),
help='the algorithm for SVD decomposition')
tensor_decompose.add_argument('--expr', type=str, required=True,
help='an einsum-like expression describing the decomposition; '
'the expression must be quoted with \' or \"')
tensor_decompose.add_argument('--shape', type=str_to_seq, required=True,
help='the shape of the input tensor')
tensor_decompose.add_argument('--is-complex', action='store_true',
help='whether the input tensor is complex-valued')
tensor_decompose.add_argument('--check-reference', action='store_true', default=False)
_is_api_benchmark_options_added = True
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
# WAR: PennyLane mistakenly sets a stream handler to the root logger, so if PennyLane is
# installed, all of our logging is messed up. Let's just clean up the root logger. It's
# reported to & fixed in upstream (https://github.com/PennyLaneAI/pennylane/issues/3731).
root_logger = logging.getLogger()
for h in root_logger.handlers:
h.close()
root_logger.handlers = [] # this private interface has been stable since 2002
def run(args=None):
# we allow args to be a list of cmd options for potential private use cases and tests
add_api_benchmark_options(parser_api, args)
args = parser.parse_args(args)
# Since run() might be called multiple times, in such case we don't wanna make any changes
# to the handler in the 2nd time onward, this ensures we write to the same I/O stream and
# do not need to call hanlder.flush() manually.
if not logger.hasHandlers():
formatter = logging.Formatter(f"%(asctime)s %(levelname)-8s %(message)s")
handler = MPHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
level = logging.DEBUG if args.verbose else logging.INFO
except AttributeError:
# user forgot to set the subcommand, let argparse kick in and raise
pass
else:
logger.setLevel(level)
finally:
del args.verbose
# dispatch to subcommands
cmd = args.cmd
del args.cmd
if cmd == "circuit":
selected_benchmarks = benchmarks if args.benchmark == 'all' else {args.benchmark: benchmarks[args.benchmark]}
del args.benchmark
config = backend_config[args.backend]
if ((args.frontend == 'cirq' and args.backend not in ('cirq', 'cutn', *[k for k in backends.keys() if k.startswith('qsim')]))
or (args.frontend == 'qiskit' and args.backend not in ('cutn', *[k for k in backends.keys() if 'aer' in k]))
or (args.frontend == 'naive' and args.backend != 'naive')
or (args.frontend == 'pennylane' and not args.backend.startswith('pennylane'))
or (args.frontend == 'qulacs' and not args.backend.startswith('qulacs'))):
raise ValueError(f'frontend {args.frontend} does not work with backend {args.backend}')
if args.backend == 'cusvaer':
if args.cusvaer_global_index_bits == -1:
raise ValueError("backend cusvaer requires setting --cusvaer-global-index-bits")
if args.cusvaer_p2p_device_bits == -1:
raise ValueError("backend cusvaer requires setting --cusvaer-p2p-device-bits")
else:
if args.cusvaer_global_index_bits != -1:
raise ValueError(f"cannot set --cusvaer-global-index-bits for backend {args.backend}")
if args.cusvaer_p2p_device_bits != -1:
raise ValueError(f"cannot set --cusvaer-p2p-device-bits for backend {args.backend}")
runner = BenchCircuitRunner(
benchmarks=selected_benchmarks,
backend_config=config,
**vars(args))
# benchmark & dump result to cachedir
try:
runner.run()
except EarlyReturnError:
pass
elif cmd == "api":
runner = BenchApiRunner(**vars(args))
# benchmark & dump result to cachedir
try:
runner.run()
except EarlyReturnError:
pass
if __name__ == "__main__":
run()
| cuQuantum-main | benchmarks/cuquantum_benchmarks/run.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import multiprocessing
from .benchmarks.hidden_shift import HiddenShift
from .benchmarks.ghz import GHZ
from .benchmarks.qaoa import QAOA
from .benchmarks.qft import QFT
from .benchmarks.iqft import IQFT
from .benchmarks.qpe import QPE
from .benchmarks.quantum_volume import QuantumVolume
from .benchmarks.random import Random
from .benchmarks.simon import Simon
#########################################################################################################
########################################### Benchmarks Config ###########################################
#########################################################################################################
benchmarks = {
'qft': {
'benchmark': QFT,
'config': {
'measure': True,
},
},
'iqft': {
'benchmark': IQFT,
'config': {
'measure': True,
},
},
'ghz': {
'benchmark': GHZ,
'config': {
'measure': True,
},
},
'simon': {
'benchmark': Simon,
'config': {
'measure': True,
},
},
'hidden_shift': {
'benchmark': HiddenShift,
'config': {
'measure': True,
},
},
'qaoa': {
'benchmark': QAOA,
'config': {
'measure': True,
'p': 1,
},
},
'qpe': {
'benchmark': QPE,
'config': {
'measure': True,
'unfold': False,
},
},
'quantum_volume': {
'benchmark': QuantumVolume,
'config': {
'measure': True,
},
},
'random': {
'benchmark': Random,
'config': {
'measure': True,
},
},
}
#########################################################################################################
############################################ Backends Config ############################################
#########################################################################################################
backends = {
'cutn': {
'config': {
'nshots': 0,
'nfused': None,
'ngpus': 1,
# TODO: even this may not be a good default
'ncputhreads': multiprocessing.cpu_count() // 2,
'precision': 'single',
'nhypersamples': 32,
},
},
'aer': {
'config': {
'nshots': 1024,
'nfused': 5,
'ngpus': 0,
'ncputhreads': multiprocessing.cpu_count(),
'precision':'single',
},
},
'aer-cuda': {
'config': {
'nshots': 1024,
'nfused': 5,
'ngpus': 1,
'ncputhreads': multiprocessing.cpu_count(),
'precision':'single',
},
},
'aer-cusv': {
'config': {
'nshots': 1024,
'nfused': 5,
'ngpus': 1,
'ncputhreads': multiprocessing.cpu_count(),
'precision':'single',
},
},
'cusvaer': {
'config': {
'nshots': 1024,
'nfused': 4,
'ngpus': 1,
'ncputhreads': multiprocessing.cpu_count(),
'precision':'single',
},
},
'cirq': {
'config': {
'nshots': 1024,
'nfused': 4,
'ngpus': 0,
'ncputhreads': 1,
'precision':'single',
},
},
'qsim': {
'config': {
'nshots': 1024,
'nfused': 2,
'ngpus': 0,
'ncputhreads': multiprocessing.cpu_count(),
'precision':'single',
},
},
'qsim-cuda': {
'config': {
'nshots': 1024,
'nfused': 2,
'ngpus': 1,
'ncputhreads': 1,
'precision':'single',
},
},
'qsim-cusv': {
'config': {
'nshots': 1024,
'nfused': 2,
'ngpus': 1,
'ncputhreads': 1,
'precision':'single',
},
},
'qsim-mgpu': {
'config': {
'nshots': 1024,
'nfused': 4,
'ngpus': 1,
'ncputhreads': 1,
'precision':'single',
},
},
'naive': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 1,
'ncputhreads': 0,
'precision': 'single',
},
},
'pennylane': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 0,
'ncputhreads': 1,
'precision': 'single',
},
},
'pennylane-lightning-gpu': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 1,
'ncputhreads': 0,
'precision': 'single',
},
},
'pennylane-lightning-qubit': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 0,
'ncputhreads': 1,
'precision': 'single',
},
},
'pennylane-lightning-kokkos': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 1,
'ncputhreads': 0,
'precision': 'single',
},
},
# dummy
'pennylane-dumper': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 0,
'ncputhreads': 1,
'precision': 'single',
},
},
'qulacs-gpu': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 1,
'ncputhreads': 0,
'precision': 'double',
},
},
'qulacs-cpu': {
'config': {
'nshots': 1024,
'nfused': None,
'ngpus': 0,
'ncputhreads': 1,
'precision': 'double',
},
},
}
| cuQuantum-main | benchmarks/cuquantum_benchmarks/config.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
__version__ = '0.3.0'
| cuQuantum-main | benchmarks/cuquantum_benchmarks/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import logging
import math
import nvtx
import os
import pickle
import random
import time
import cupy as cp
from .backends import createBackend
from .frontends import createFrontend
from ._utils import (
call_by_root, create_cache, EarlyReturnError, gen_run_env, get_mpi_rank, HashableDict,
is_running_mpiexec, is_running_mpi, load_benchmark_data, report, reseed,
save_benchmark_data,
)
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
class BenchCircuitRunner:
# currently we assume the following subdirectories exist
required_subdirs = ('circuits', 'data')
def __init__(self, **kwargs):
# use default backend config unless users want to overwrite it
self.backend_config = backend_config = kwargs.pop("backend_config")
for k in (# generic backend options
"ngpus", "ncputhreads", "nshots", "nfused", "precision",
# cusvaer options
'cusvaer_global_index_bits', 'cusvaer_p2p_device_bits',
'cusvaer_data_transfer_buffer_bits', 'cusvaer_comm_plugin_type',
'cusvaer_comm_plugin_soname',
# cutn options
'nhypersamples'):
v = kwargs.pop(k)
if k.startswith('cusvaer') or v is not None:
setattr(self, k, v)
else:
setattr(self, k, backend_config['config'][k])
# To be parsed in run()
self._benchmarks = kwargs.pop("benchmarks")
self._nqubits = kwargs.pop("nqubits")
# other common benchmark args
self.frontend = kwargs.pop("frontend")
self.backend = kwargs.pop("backend")
self.cache_dir = kwargs.pop("cachedir")
self.nwarmups = kwargs.pop("nwarmups")
self.nrepeats = kwargs.pop("nrepeats")
self.new_circ = kwargs.pop("new")
self.save = True
assert len(kwargs) == 0, f"unhandled cmdline args: {kwargs}"
self.full_data = {}
self.benchmark_data = {}
# it could be that the cache dirs are not created yet
call_by_root(functools.partial(create_cache, self.cache_dir, self.required_subdirs))
def run(self):
if self._nqubits is None:
gpu_prop = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)
max_n_qubits = math.floor(math.log2(gpu_prop['totalGlobalMem'] / (8 if self.precision == 'single' else 16)))
nqubits_list = list(range(16, max_n_qubits + 4, 4))
else:
nqubits_list = [self._nqubits]
for benchmark_name in self._benchmarks.keys():
b = self._benchmarks[benchmark_name]
benchmark_object = b['benchmark']
benchmark_config = b['config']
benchmark_config['precision'] = self.precision # some frontends may need it
for nqubits in nqubits_list:
self.benchmark_name = benchmark_name
self.benchmark_object = benchmark_object
self.benchmark_config = benchmark_config
self.nqubits = nqubits
self._run()
def _load_or_generate_circuit(self, circuit_filename):
# We need a mechanism to ensure any incompatible gate_sequence generated
# and cached from the previous releases is invalidated. We do so by
# assigning a version number gate_seq_ver for the gate sequence and
# encoding it in the pickle filename.
#
# v0.1.0: the gate_sequence is a list of size-2 lists.
# v0.2.0: the gate_sequence is a list of Gate objects. gate_seq_ver = 1.
gate_seq_ver = 1
circuit_filename += f"_v{gate_seq_ver}.pickle"
frontend = createFrontend(self.frontend, self.nqubits, self.benchmark_config)
dump_only = bool(os.environ.get('CUQUANTUM_BENCHMARKS_DUMP_GATES', False))
if dump_only:
# hijack & discard user input
from .frontends.frontend_dumper import Dumper
frontend = Dumper(
self.nqubits,
{**self.benchmark_config, 'circuit_filename': circuit_filename})
try:
if self.new_circ:
raise ValueError
# If this circuit has been generated previously, load it
with open(os.path.join(self.cache_dir, circuit_filename), 'rb') as f:
gate_sequence = pickle.load(f)
circuit = frontend.generateCircuit(gate_sequence)
logger.debug(f'Circuit loaded from {circuit_filename}')
except: # Otherwise, generate the circuit and save it
gate_sequence = self.benchmark_object.generateGatesSequence(self.nqubits, self.benchmark_config)
circuit = frontend.generateCircuit(gate_sequence)
def dump():
with open(os.path.join(self.cache_dir, circuit_filename), 'wb') as f:
pickle.dump(gate_sequence, f, protocol=pickle.HIGHEST_PROTOCOL)
logger.debug(f'Circuit generated and saved to {circuit_filename}')
call_by_root(dump)
if dump_only:
logger.info("early exiting as the dumper task is completed")
raise EarlyReturnError
return circuit
def get_circuit(self, circuit_filename):
# This method ensures only the root process is responsible to generate/broadcast the circuit
# so that all processes see the same circuit.
MPI = is_running_mpi()
circuit = call_by_root(functools.partial(self._load_or_generate_circuit, circuit_filename))
if MPI:
comm = MPI.COMM_WORLD
circuit = comm.bcast(circuit)
return circuit
def timer(self, backend, circuit, nshots):
perf_time = 0
cuda_time = 0
post_time = 0
if self.ngpus > 0:
start_gpu = cp.cuda.Event()
end_gpu = cp.cuda.Event()
# warm up
for i in range(self.nwarmups):
backend.pre_run(circuit, nshots=nshots)
backend.run(circuit, nshots)
annotation_string = f"p{get_mpi_rank()}_run_"
# actual timing
for i in range(self.nrepeats):
backend.pre_run(circuit, nshots=nshots)
if self.ngpus > 0:
start_gpu.record()
pe1 = time.perf_counter()
with nvtx.annotate(annotation_string + str(i)):
run_dict = backend.run(circuit, nshots)
pe2 = time.perf_counter()
if self.ngpus > 0:
end_gpu.record()
perf_time += pe2 - pe1
if self.ngpus > 0:
end_gpu.synchronize()
cuda_time += cp.cuda.get_elapsed_time(start_gpu, end_gpu) / 1000 # ms->s
# TODO: remove results?
results = run_dict['results']
post_res = run_dict['post_results']
run_data = run_dict['run_data']
for k, v in run_data.items():
self.benchmark_data[k] = v
pe2 = time.perf_counter()
post_process = self.benchmark_object.postProcess(self.nqubits, post_res)
pe3 = time.perf_counter()
post_time += pe3 - pe2
return perf_time / self.nrepeats, cuda_time / self.nrepeats, post_time / self.nrepeats, post_process
def _fix_filename_for_cutn(self, circuit_filename, nqubits):
target = pauli = None
if self.backend == 'cutn':
target = os.environ.get('CUTENSORNET_BENCHMARK_TARGET', 'amplitude')
circuit_filename += f'_{target}'
if target == 'expectation':
pauli = random.choices(('I', 'X', 'Y', 'Z'), k=nqubits)
circuit_filename += f"_{''.join(pauli)}"
return circuit_filename, target, pauli
def extract_backend_version(self):
if 'aer' in self.backend:
import qiskit
version = qiskit.__qiskit_version__['qiskit-aer']
elif 'qsim' in self.backend:
import qsimcirq
version = qsimcirq.__version__
elif self.backend == 'cutn':
import cuquantum
version = cuquantum.cutensornet.get_version()
elif self.backend == 'cirq':
import cirq
version = cirq.__version__
elif self.backend == 'naive':
from .backends import backends
version = backends['naive'].version
elif self.backend == 'pennylane':
import pennylane
version = pennylane.__version__
elif self.backend == 'pennylane-lightning-gpu':
import pennylane_lightning_gpu
version = pennylane_lightning_gpu.__version__
elif self.backend == 'pennylane-lightning-qubit':
import pennylane_lightning
version = pennylane_lightning.__version__
elif self.backend == 'pennylane-lightning-kokkos':
import pennylane_lightning_kokkos
version = pennylane_lightning_kokkos.__version__
elif self.backend == 'pennylane-dumper':
version = '0' # dummy
elif self.backend in ('qulacs-gpu', 'qulacs-cpu'):
import qulacs
version = qulacs.__version__
else:
assert False
return version
def extract_frontend_version(self):
if self.frontend == 'qiskit':
import qiskit
version = qiskit.__qiskit_version__['qiskit-terra']
elif self.frontend == 'cirq':
import cirq
version = cirq.__version__
elif self.frontend == 'naive':
from .frontends import frontends
version = frontends['naive'].version
elif self.frontend == 'pennylane':
import pennylane
version = pennylane.__version__
elif self.frontend == 'qulacs':
import qulacs
version = qulacs.__version__
else:
assert False
return version
def extract_glue_layer_version(self):
if self.backend == 'cutn':
import cuquantum
glue_ver = f'cuquantum {cuquantum.__version__}'
else:
return None
return glue_ver
def _run(self):
reseed(1234) # TODO: use a global seed?
measure = self.benchmark_config['measure']
# try to load existing perf data, if any
data_filename = f'{self.benchmark_name}.json'
filepath = f'{self.cache_dir}/data/{data_filename}'
self.full_data = load_benchmark_data(filepath)
gpu_device_properties = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)
gpu_name = gpu_device_properties['name'].decode('utf-8').split(' ')[-1]
num_qubits = str(self.nqubits)
num_gpus = str(self.ngpus)
circuit_filename = f'circuits/{self.benchmark_name}_{self.nqubits}'
if 'unfold' in self.benchmark_config.keys() and self.benchmark_config['unfold']:
circuit_filename += '_unfold'
if 'p' in self.benchmark_config.keys():
p = self.benchmark_config['p']
circuit_filename += f'_p{p}'
if measure:
circuit_filename += '_measure'
circuit_filename, target, pauli = self._fix_filename_for_cutn(circuit_filename, self.nqubits)
self.cutn_target = target
# get circuit
circuit = self.get_circuit(circuit_filename)
# get backend
# TODO: use backend config to simplify this...
backend = createBackend(
self.backend, self.ngpus, self.ncputhreads, self.precision,
nqubits=self.nqubits,
# cusvaer options
cusvaer_global_index_bits=self.cusvaer_global_index_bits,
cusvaer_p2p_device_bits=self.cusvaer_p2p_device_bits,
cusvaer_data_transfer_buffer_bits=self.cusvaer_data_transfer_buffer_bits,
cusvaer_comm_plugin_type=self.cusvaer_comm_plugin_type,
cusvaer_comm_plugin_soname=self.cusvaer_comm_plugin_soname,
# qiskit and qsim
nfused=self.nfused,
# cutn
nhypersamples=self.nhypersamples,
)
# get versions; it's assumed up to this point, the existence of Python modules for
# both frontend and backend is confirmed
backend_version = self.extract_backend_version()
frontend_version = self.extract_frontend_version()
glue_layer_version = self.extract_glue_layer_version()
if glue_layer_version is not None:
ver_str = f'[{self.frontend}-v{frontend_version} | (glue ver: {glue_layer_version}) | {self.backend}-v{backend_version}]'
else:
ver_str = f'[{self.frontend}-v{frontend_version} | {self.backend}-v{backend_version}]'
if self.ngpus == 0:
logger.info(
f'* Running {self.benchmark_name} with {self.ncputhreads} CPU threads, and {self.nqubits} qubits {ver_str}:'
)
else:
logger.info(
f'* Running {self.benchmark_name} with {self.ngpus} GPUs, and {self.nqubits} qubits {ver_str}:'
)
preprocess_data = backend.preprocess_circuit(
circuit,
# only cutn needs these, TODO: backend config
circuit_filename=os.path.join(self.cache_dir, circuit_filename),
target=target,
pauli=pauli,
)
for k in preprocess_data.keys():
self.benchmark_data[k] = preprocess_data[k]
# run benchmark
perf_time, cuda_time, post_time, post_process = self.timer(backend, circuit, self.nshots) # nsamples -> nshots
# report the result
run_env = gen_run_env(gpu_device_properties)
report(perf_time, cuda_time, post_time if post_process else None, self.ngpus,
run_env, gpu_device_properties, self.benchmark_data)
# Save the new benchmark data
out = self.canonicalize_benchmark_data(frontend_version, backend_version, run_env, glue_layer_version)
save_benchmark_data(
*out,
self.full_data, filepath, self.save)
def canonicalize_benchmark_data(self, frontend_version, backend_version, run_env, glue_layer_version):
"""
json scheme: this is designed such that if any item in sim_config changes, the
benchmark data would be appended, not overwriting.
benchmark
|_ num_qubits
|_ sim_config_hash ( = hash string of sim_config )
|_ benchmark_data
|_ frontend (part of sim_config)
|_ name
|_ version
|_ backend (part of sim_config)
|_ name
|_ version
|_ ngpus
|_ ncputhreads
|_ nshots
|_ nfused
|_ precision
|_ ... (all backend-specific options go here)
|_ glue_layer (part of sim_config)
|_ name
|_ version
|_ run_env (part of sim_config)
|_ hostname
|_ cpu_name
|_ gpu_name
|_ gpu_driver_ver
|_ gpu_runtime_ver
|_ nvml_driver_ver
|_ cpu_time
|_ gpu_time
|_ ... (other timings, env info, ...)
"""
# TODO: consider recording cuquantum-benchmarks version?
# TODO: alternatively, version each individual benchmark and record it?
num_qubits = str(self.nqubits)
sim_config = HashableDict({
'frontend': HashableDict({
"name": self.frontend,
"version": frontend_version,
}),
'backend': HashableDict({
"name": self.backend,
"version": backend_version,
"ngpus": self.ngpus,
"ncputhreads": self.ncputhreads,
"nshots": self.nshots,
"nfused": self.nfused,
"precision": self.precision,
"with_mpi": is_running_mpiexec(),
}),
'glue_layer': HashableDict({
"name": None,
"version": glue_layer_version,
}),
'run_env': run_env,
})
# frontend-specific options
# TODO: record "measure"?
# backend-specific options
if self.backend == "cusvaer":
sim_config["backend"]["cusvaer_global_index_bits"] = self.cusvaer_global_index_bits
sim_config["backend"]["cusvaer_p2p_device_bits"] = self.cusvaer_p2p_device_bits
elif self.backend == "cutn":
sim_config["backend"]["target"] = self.cutn_target
sim_config_hash = sim_config.get_hash()
self.benchmark_data = {**self.benchmark_data, **sim_config}
return num_qubits, sim_config_hash, self.benchmark_data
class BenchApiRunner:
supported_cusv_apis = ('apply_matrix', 'apply_generalized_permutation_matrix', 'cusv_sampler', )
supported_cutn_apis = ('tensor_decompose',)
supported_apis = supported_cusv_apis + supported_cutn_apis
# currently we assume the following subdirectories exist
required_subdirs = ('data',)
def __init__(self, **kwargs):
self.benchmark = kwargs.pop("benchmark")
self.cache_dir = kwargs.pop("cachedir")
self.args = kwargs # just hold the entire group of parsed cmdline args, don't unpack all
# it could be that the cache dirs are not created yet
call_by_root(functools.partial(create_cache, self.cache_dir, self.required_subdirs))
# load existing json, if any
self.data_filename = f"{self.benchmark}.json"
self.file_path = f'{self.cache_dir}/data/{self.data_filename}'
self.full_data = load_benchmark_data(self.file_path)
def run(self):
# prep
if self.benchmark not in self.supported_apis:
raise NotImplementedError(f"only {self.supported_apis} is supported for now")
gpu_device_properties = cp.cuda.runtime.getDeviceProperties(cp.cuda.Device().id)
benchmark_data = {}
# time the api
bench_func = getattr(self, f"_run_{self.benchmark}")
perf_time, cuda_time = bench_func(benchmark_data) # update benchmark_data in-place
# report the result
run_env = gen_run_env(gpu_device_properties)
report(perf_time, cuda_time, None, 1,
run_env, gpu_device_properties, benchmark_data)
# Save the new benchmark data
out = self.canonicalize_benchmark_data(run_env, benchmark_data)
save_benchmark_data(*out, self.full_data, self.file_path)
def _run_apply_matrix(self, benchmark_data):
# TODO: It's better to move this method elsewhere, once we support more apis
from .benchmarks.apply_matrix import test_apply_matrix
args = self.args
self.num_qubits = args.pop("nqubits")
# create targets while keeping args clean for later use
ntargets = args.pop("ntargets")
targets = args.pop("targets")
targets = tuple(range(ntargets)) if targets is None else tuple(targets)
args["targets"] = targets
# create controls while keeping args clean for later use
ncontrols = args.pop("ncontrols")
controls = args.pop("controls")
if controls is None and ncontrols is None:
controls = ()
elif controls is None:
controls = tuple(range(ncontrols))
else:
controls = tuple(controls)
args["controls"] = controls
# run
return test_apply_matrix(
self.num_qubits,
targets,
controls,
args["precision"],
args["precision"], # TODO: allow different mat precision?
args["layout"],
int(args["adjoint"]),
args["nwarmups"],
args["nrepeats"],
args["location"],
flush_l2=args["flush_cache"],
benchmark_data=benchmark_data,
)
def _run_apply_generalized_permutation_matrix(self, benchmark_data):
# TODO: It's better to move this method elsewhere, once we support more apis
from .benchmarks.apply_gen_perm_matrix import test_apply_generalized_permutation_matrix
args = self.args
self.num_qubits = args.pop("nqubits")
# create targets while keeping args clean for later use
ntargets = args.pop("ntargets")
targets = args.pop("targets")
targets = tuple(range(ntargets)) if targets is None else tuple(targets)
args["targets"] = targets
# create controls while keeping args clean for later use
ncontrols = args.pop("ncontrols")
controls = args.pop("controls")
if controls is None and ncontrols is None:
controls = ()
elif controls is None:
controls = tuple(range(ncontrols))
else:
controls = tuple(controls)
args["controls"] = controls
# create perm_table while keeping args clean for later use
has_perm = args.pop("has_perm")
perm_table = args.pop("perm_table")
if has_perm is False and perm_table is None:
perm_table = []
elif perm_table is None:
# used as a flag to fill perm_table randomly later
perm_table = bool(has_perm)
else:
perm_table = list(perm_table)
args["perm_table"] = perm_table
# run
return test_apply_generalized_permutation_matrix(
self.num_qubits,
args["precision"],
targets,
controls,
int(args["adjoint"]),
args["has_diag"],
args["precision_diag"],
args["location_diag"],
args["perm_table"],
args["location_perm"],
args["nwarmups"],
args["nrepeats"],
benchmark_data=benchmark_data,
)
def _run_cusv_sampler(self, benchmark_data):
from .benchmarks.cusv_sampler import test_cusv_sampler
args = self.args
self.num_qubits = args.pop("nqubits")
# create bit_ordering while keeping args clean for later use
nbit_ordering = args.pop("nbit_ordering")
bit_ordering = args.pop("bit_ordering")
bit_ordering = tuple(range(nbit_ordering)) if bit_ordering is None else tuple(bit_ordering)
args["bit_ordering"] = bit_ordering
# run
return test_cusv_sampler(
self.num_qubits,
args["precision"],
bit_ordering,
args["nshots"],
args["output_order"],
args["nwarmups"],
args["nrepeats"],
benchmark_data=benchmark_data,
)
def _run_tensor_decompose(self, benchmark_data):
from .benchmarks.tensor_decompose import benchmark_tensor_decompose
args = self.args
self.num_qubits = 0 # WAR
# ensure the combination of method/algorithm is meaningful
if args["method"] == "SVD":
args["algorithm"] = "gesvd"
elif args["algorithm"] is not None:
# algorithm is set, must be doing SVD
args["method"] = "SVD"
# run
return benchmark_tensor_decompose(
args["expr"],
tuple(args["shape"]),
args["precision"],
args["is_complex"],
args["method"],
args["algorithm"],
args["nwarmups"],
args["nrepeats"],
args["check_reference"],
benchmark_data=benchmark_data,
)
def canonicalize_benchmark_data(self, run_env, benchmark_data):
"""
json scheme: this is designed such that if any item in sim_config changes, the
benchmark data would be appended, not overwriting.
benchmark
|_ num_qubits
|_ sim_config_hash ( = hash string of sim_config )
|_ benchmark_data
|_ api (part of sim_config)
|_ name
|_ cuqnt_py_ver
|_ lib_ver
|_ precision
|_ ... (all api-specific options go here)
|_ run_env (part of sim_config)
|_ hostname
|_ cpu_name
|_ gpu_name
|_ gpu_driver_ver
|_ gpu_runtime_ver
|_ nvml_driver_ver
|_ cpu_time
|_ gpu_time
|_ ... (other timings, env info, ...)
"""
# TODO: consider recording cuquantum-benchmarks version?
from cuquantum import __version__ as cuqnt_py_ver
num_qubits = str(self.num_qubits)
benchmark = self.benchmark
if benchmark in self.supported_cusv_apis:
from cuquantum import custatevec as lib
elif benchmark in self.supported_cutn_apis:
from cuquantum import cutensornet as lib
else:
assert False
# Note: be mindful that we unpack self.args here, as it's designed to be
# sensitive to any change in the cmdline options.
sim_config = HashableDict({
"api": HashableDict({**{
"name": benchmark,
"cuqnt_py_ver": cuqnt_py_ver,
"lib_ver": lib.get_version(),
}, **self.args}),
'run_env': run_env,
})
# TODO: remember to record cutn_target once we support it
#elif self.args.backend == "cutn":
# sim_config["backend"]["target"] = self.args.cutn_target
sim_config_hash = sim_config.get_hash()
benchmark_data = {**benchmark_data, **sim_config}
return num_qubits, sim_config_hash, benchmark_data
| cuQuantum-main | benchmarks/cuquantum_benchmarks/run_interface.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .run import run
if __name__ == '__main__':
run()
| cuQuantum-main | benchmarks/cuquantum_benchmarks/__main__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import argparse
import ctypes
from dataclasses import dataclass
import functools
import math
import json
import hashlib
import logging
import os
import platform
import random
import re
import time
from typing import Iterable, Optional, Union
import warnings
import cupy as cp
import numpy as np
import nvtx
from cuquantum import cudaDataType, ComputeType
from cuquantum.cutensornet._internal.einsum_parser import create_size_dict
import psutil
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
def wrap_with_nvtx(func, msg):
"""Add NVTX makers to a function with a message."""
@functools.wraps(func)
def inner(*args, **kwargs):
with nvtx.annotate(msg):
return func(*args, **kwargs)
return inner
def reseed(seed=1234):
random.seed(seed)
np.random.seed(seed)
# Q: How about CuPy?
def precision_str_to_dtype(precision, is_complex=True):
if precision == "single":
if is_complex:
return np.complex64
else:
return np.float32
elif precision == "double":
if is_complex:
return np.complex128
else:
return np.float64
else:
raise ValueError
def dtype_to_cuda_type(dtype):
if dtype == np.complex64:
return cudaDataType.CUDA_C_32F
elif dtype == np.complex128:
return cudaDataType.CUDA_C_64F
else:
raise ValueError
def dtype_to_compute_type(dtype):
if dtype == np.complex64:
return ComputeType.COMPUTE_32F
elif dtype == np.complex128:
return ComputeType.COMPUTE_64F
else:
raise ValueError
def generate_size_dict_from_operands(einsum, operands):
inputs = einsum.split("->")[0]
inputs = inputs.split(",")
assert len(inputs) == len(operands)
return create_size_dict(inputs, operands)
# TODO: clean up, this is copied from internal utils
def convert_einsum_to_txt(einsum, size_dict, filename):
def _gen_txt(content, idx_map, idx_counter, tn, size_dict, dump_extents=True):
# TODO: refactor this with the contraction_*.py utilities
for i in tn:
# dump indices
if i == '': continue
idx = idx_map.get(i, idx_counter)
assert idx is not None, f"got {idx} for {i} from {o}"
content += f"{idx} "
if idx == idx_counter:
idx_map[i] = idx_counter
idx_counter += 1
if dump_extents:
content += ' | '
for i in tn:
content += f"{size_dict[i]} "
content += '\n'
return content, idx_map, idx_counter
# TODO: refactor this with the contraction_*.py utilities
content = ''
idx_map = {}
idx_counter = 0
inputs, output = re.split("->", einsum)
inputs = re.split(",", inputs.strip())
for tn in inputs:
content, idx_map, idx_counter = _gen_txt(content, idx_map, idx_counter, tn, size_dict)
content += '---\n'
content, _, _ = _gen_txt(content, idx_map, None, output.strip(), size_dict, dump_extents=False)
assert filename.endswith('.txt')
def dump():
with open(filename, 'w') as f:
f.write(content)
call_by_root(dump)
def random_unitary(size, rng=None, dtype=np.float64, check=False):
# the same functionality can be done with scipy.stats.unitary_group.rvs(),
# but this is so simple that we just re-implement it here
rng = np.random.default_rng(1234) if rng is None else rng # TODO: honor a global seed?
m = rng.standard_normal(size=(size, size), dtype=dtype) \
+ 1j*rng.standard_normal(size=(size, size), dtype=dtype)
q, r = np.linalg.qr(m)
d = np.diag(r)
q *= d/abs(d)
if check:
is_unitary = np.allclose(
np.abs(np.dot(q, q.T.conj()) - np.eye(size, dtype=q.dtype)),
0,
)
if not is_unitary:
warnings.warn("generated random matrix might not be unitary")
return q
def is_running_mpiexec():
# This is not 100% robust but should cover MPICH, Open MPI and Slurm
# PMI_SIZE, Hydra(MPICH), OMPI_COMM_WORLD_SIZE(OpenMPI)
if 'PMI_SIZE' in os.environ or \
'OMPI_COMM_WORLD_SIZE' in os.environ:
return True
# SLURM_NPROCS is defined by Slurm
if 'SLURM_NPROCS' in os.environ:
nprocs = os.environ['SLURM_NPROCS']
return nprocs != '1'
# no environmental variable found
return False
def is_running_mpi():
if is_running_mpiexec():
try:
from mpi4py import MPI # init!
except ImportError as e:
raise RuntimeError(
'it seems you are running mpiexec/mpirun but mpi4py cannot be '
'imported, maybe you forgot to install it?') from e
else:
MPI = None
return MPI
def get_mpi_size():
MPI = is_running_mpi()
return MPI.COMM_WORLD.Get_size() if MPI else 1
def get_mpi_rank():
MPI = is_running_mpi()
return MPI.COMM_WORLD.Get_rank() if MPI else 0
def call_by_root(f, root=0):
""" Call the callable f only by the root process. """
MPI = is_running_mpi()
if MPI:
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
if rank == root:
return f()
else:
return f()
class MPHandler(logging.StreamHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
MPI = is_running_mpi()
if MPI:
self.rank = MPI.COMM_WORLD.Get_rank()
else:
self.rank = 0
def emit(self, record):
# don't log unless I am the root process
if self.rank == 0:
super().emit(record)
def str_to_seq(data):
data = data.split(',')
out = []
for i in data:
if i:
out.append(int(i))
return out
def get_cpu_name():
# This helper avoids the need of installing py-cpuinfo. This works
# because we only support Linux.
with open('/proc/cpuinfo', 'r') as f:
cpuinfo = f.read()
m = re.search(r"model name.*\n", cpuinfo)
if m:
return m.group(0).split(':')[-1].strip()
else:
assert False, f"getting cpu info failed"
def get_gpu_driver_version():
# this function will not raise
inited = False
try:
# nvml comes with the driver, so it'd be always available
from ctypes.util import find_library
lib_name = find_library('nvidia-ml')
lib = ctypes.CDLL(lib_name)
init = lib.nvmlInit_v2
func = lib.nvmlSystemGetDriverVersion
shutdown = lib.nvmlShutdown
out = ctypes.create_string_buffer(80)
status = init()
if status != 0:
raise RuntimeError('cannot init nvml')
inited = True
status = func(ctypes.byref(out), 80)
if status != 0:
raise RuntimeError('cannot get driver version')
except:
ver = "N/A"
else:
ver = out.value.decode()
finally:
if inited:
shutdown()
return ver
class RawTextAndDefaultArgFormatter(
argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter):
pass
class HashableDict(dict):
def get_hash(self):
# 1. we want a stable hash scheme, the built-in hash() is not
# 2. but hash() requires __hash__() returning an int, while this is a str
return hashlib.sha256(str(tuple(self.items())).encode()).hexdigest()
@dataclass
class Gate:
"""A data class for holding all gate-related information.
Attributes:
id: The gate identity.
targets: The target qubit(s).
controls: The control qubit(s), if any.
matrix: The gate matrix.
params: The gate parameter(s).
name: The gate name.
"""
id: str = ''
targets: Union[int, Iterable[int]] = None
controls: Optional[Union[int, Iterable[int]]] = None
matrix: Optional[Union[Iterable[float], Iterable[complex]]] = None
params: Optional[Union[float, Iterable[float]]] = None
name: Optional[str] = ''
def __post_init__(self):
if not self.id:
raise ValueError("gate id must be specified")
if self.targets is None:
raise ValueError("targets must be specified")
if self.matrix is not None:
if self.params is not None:
raise ValueError("gate matrix and gate parameters cannot coexist")
try:
n_targets = len(self.targets)
except TypeError:
n_targets = 1 # targets is int
try:
# 1D/2D ndarray-like objects
assert self.matrix.size == (2**n_targets)**2
except AttributeError:
# plain Python objects, must be 2D/nested (otherwise we'd have to
# assume there's a certain memory layout); we're being sloppy
# here and do not check the inner sequence lengths...
try:
assert len(self.matrix) == 2**n_targets
except Exception as e:
raise ValueError("gate matrix size must match targets") from e
def __repr__(self):
s = f"Gate(id={self.id}, targets={self.targets}"
if self.controls is not None:
s += f", controls={self.controls}"
if self.matrix is not None:
s += f", matrix={self.matrix}"
elif self.params is not None:
s += f", params={self.params}"
if self.name:
s += f", name={self.name}"
s += ")"
return s
def gen_run_env(gpu_device_properties):
run_env = HashableDict({
'hostname': platform.node(),
'cpu_name': get_cpu_name(),
'gpu_name': gpu_device_properties['name'].decode('utf-8'),
'gpu_driver_ver': cp.cuda.runtime.driverGetVersion(),
'gpu_runtime_ver': cp.cuda.runtime.runtimeGetVersion(),
'nvml_driver_ver': get_gpu_driver_version(),
})
return run_env
def report(perf_time, cuda_time, post_time, ngpus, run_env, gpu_device_properties, benchmark_data):
hostname = run_env['hostname']
cpu_name = run_env['cpu_name']
cpu_phy_mem = round(psutil.virtual_memory().total/1000000000, 2)
cpu_used_mem = round(psutil.virtual_memory().used/1000000000, 2)
cpu_phy_cores = psutil.cpu_count(logical=False)
cpu_log_cores = psutil.cpu_count(logical=True)
cpu_curr_freq = round(psutil.cpu_freq().current, 2)
cpu_min_freq = psutil.cpu_freq().min
cpu_max_freq = psutil.cpu_freq().max
gpu_name = run_env['gpu_name']
gpu_total_mem = round(gpu_device_properties['totalGlobalMem']/1000000000, 2)
gpu_clock_rate = round(gpu_device_properties['clockRate']/1000, 2)
gpu_multiprocessor_num = gpu_device_properties['multiProcessorCount']
gpu_driver_ver = run_env['gpu_driver_ver']
gpu_runtime_ver = run_env['gpu_runtime_ver']
nvml_driver_ver = run_env['nvml_driver_ver']
logger.debug(f' - hostname: {hostname}')
logger.info(f' - [CPU] Averaged elapsed time: {perf_time:.9f} s')
if post_time is not None:
logger.info(f' - [CPU] Averaged postprocessing Time: {post_time:.6f} s')
benchmark_data['cpu_post_time'] = post_time
logger.info(f' - [CPU] Processor type: {cpu_name}')
logger.debug(f' - [CPU] Total physical memory: {cpu_phy_mem} GB')
logger.debug(f' - [CPU] Total used memory: {cpu_used_mem} GB')
logger.debug(f' - [CPU] Number of physical cores: {cpu_phy_cores}, and logical cores: {cpu_log_cores}')
logger.debug(f' - [CPU] Frequency current (Mhz): {cpu_curr_freq}, min: {cpu_min_freq}, and max: {cpu_max_freq}')
logger.info(' -')
logger.info(f' - [GPU] Averaged elapsed time: {cuda_time:.9f} s {"(unused)" if ngpus == 0 else ""}')
logger.info(f' - [GPU] GPU device name: {gpu_name}')
logger.debug(f' - [GPU] Total global memory: {gpu_total_mem} GB')
logger.debug(f' - [GPU] Clock frequency (Mhz): {gpu_clock_rate}')
logger.debug(f' - [GPU] Multi processor count: {gpu_multiprocessor_num}')
logger.debug(f' - [GPU] CUDA driver version: {gpu_driver_ver} ({nvml_driver_ver})')
logger.debug(f' - [GPU] CUDA runtime version: {gpu_runtime_ver}')
logger.info('')
benchmark_data['cpu_time'] = perf_time
benchmark_data['cpu_phy_mem'] = cpu_phy_mem
benchmark_data['cpu_used_mem'] = cpu_used_mem
benchmark_data['cpu_phy_cores'] = cpu_phy_cores
benchmark_data['cpu_log_cores'] = cpu_log_cores
benchmark_data['cpu_current_freq'] = cpu_curr_freq
benchmark_data['gpu_time'] = cuda_time
benchmark_data['gpu_total_mem'] = gpu_total_mem
benchmark_data['gpu_clock_freq'] = gpu_clock_rate
benchmark_data['gpu_multiprocessor_num'] = gpu_multiprocessor_num
return benchmark_data
def save_benchmark_data(
num_qubits, sim_config_hash, benchmark_data, full_data, filepath, save=True):
try:
full_data[num_qubits][sim_config_hash] = benchmark_data
except KeyError:
if num_qubits not in full_data:
full_data[num_qubits] = {}
if sim_config_hash not in full_data[num_qubits]:
full_data[num_qubits][sim_config_hash] = {}
full_data[num_qubits][sim_config_hash] = benchmark_data
if save:
def dump():
with open(filepath, 'w') as f:
json.dump(full_data, f, indent=2)
call_by_root(dump)
logger.debug(f'Saved {filepath} as JSON')
return full_data
def load_benchmark_data(filepath):
try:
with open(filepath, 'r') as f:
full_data = json.load(f)
logger.debug(f'Loaded {filepath} as JSON')
# If the data file does not exist, we'll create it later
except FileNotFoundError:
full_data = {}
logger.debug(f'{filepath} not found')
return full_data
def create_cache(cache_dir, required_subdirs):
for subdir in required_subdirs:
path = os.path.join(cache_dir, subdir)
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
# TODO: upstream this to cupyx.profiler.benchmark
class L2flush:
""" Handly utility for flushing the current device's L2 cache.
This instance must be created and used on the same (CuPy's) current device/stream
as those used by the target workload.
Reimplementation of the l2flush class from NVBench, see
https://github.com/NVIDIA/nvbench/blob/main/nvbench/detail/l2flush.cuh.
"""
def __init__(self):
self.l2_size = cp.cuda.Device().attributes['L2CacheSize']
self.mem = cp.cuda.alloc(self.l2_size) if self.l2_size > 0 else None
def flush(self):
if self.mem:
self.mem.memset_async(0, self.l2_size)
# TODO: we should convince upstream to allow this use case.
def benchmark_with_prerun(
func, args=(), kwargs={}, *,
n_warmup=10, n_repeat=10000, pre_run=None):
"""A simplified version of cupyx.profiler.benchmark(), with the additional
support for a user-supplied function ("pre_run") that's run every time
before the target "func" is run.
This is simplifed to only permit using on single GPUs.
"""
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
try:
from cupyx.profiler._time import _PerfCaseResult
except ImportError:
_PerfCaseResult = None
class _Result: pass
cpu_times = []
gpu_times = [[]]
for _ in range(n_warmup):
func(*args, **kwargs)
for _ in range(n_repeat):
if pre_run:
pre_run(*args, **kwargs)
e1.record()
t1 = time.perf_counter()
func(*args, **kwargs)
t2 = time.perf_counter()
e2.record()
e2.synchronize()
cpu_times.append(t2-t1)
gpu_times[0].append(cp.cuda.get_elapsed_time(e1, e2)*1E-3)
if _PerfCaseResult:
result = _PerfCaseResult(
func.__name__,
np.asarray([cpu_times] + gpu_times, dtype=np.float64),
(cp.cuda.Device().id,))
else:
result = _Result()
result.cpu_times = cpu_times
result.gpu_times = gpu_times
return result
class EarlyReturnError(RuntimeError): pass
is_unique = lambda a: len(set(a)) == len(a)
is_disjoint = lambda a, b: not bool(set(a) & set(b))
def check_targets_controls(targets, controls, n_qubits):
# simple checks for targets and controls
assert len(targets) >= 1, "must have at least 1 target qubit"
assert is_unique(targets), "qubit indices in targets must be unique"
assert is_unique(controls), "qubit indices in controls must be unique"
assert is_disjoint(targets, controls), "qubit indices in targets and controls must be disjoint"
assert all(0 <= q and q < n_qubits for q in targets + controls), f"target and control qubit indices must be in range [0, {n_qubits})"
def check_sequence(seq, expected_size=None, max_size=None, name=''):
if expected_size is not None:
assert len(seq) == expected_size, f"the provided {name} must be of length {expected_size}"
size = expected_size
elif max_size is not None:
assert len(seq) <= max_size, f"the provided {name} must have length <= {max_size}"
size = max_size
else:
assert False
assert is_unique(seq), f"the provided {name} must have non-repetitve entries"
assert all(0 <= i and i < size for i in seq), f"entries in the {name} must be in [0, {size})"
| cuQuantum-main | benchmarks/cuquantum_benchmarks/_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import sys
from cmath import pi, exp
try:
import pennylane
except ImportError:
pennylane = None
from .frontend import Frontend
class Pennylane(Frontend):
def __init__(self, nqubits, config):
if pennylane is None:
raise RuntimeError('pennylane is not installed')
self.nqubits = nqubits
self.config = config
def generateCircuit(self, gateSeq):
last_g = gateSeq[-1]
assert last_g.id == "measure" # TODO: relax this?
def circuit():
measured_qs = None
for g in gateSeq:
if g.id =='h':
pennylane.Hadamard(wires=g.targets)
elif g.id =='x':
pennylane.PauliX(wires=g.targets)
elif g.id =='cnot':
pennylane.CNOT(wires=[g.controls, g.targets])
elif g.id =='cz':
pennylane.CZ(wires=[g.controls, g.targets])
elif g.id =='rz':
pennylane.RZ(g.params, g.targets)
elif g.id =='rx':
pennylane.RX(g.params, g.targets)
elif g.id =='ry':
pennylane.RY(g.params, g.targets)
elif g.id =='czpowgate':
CZPow_matrix = [[1,0],[0,exp(1j*pi*g.params)]]
pennylane.ControlledQubitUnitary(CZPow_matrix,control_wires=g.controls, wires=g.targets)
elif g.id =='swap':
pennylane.SWAP(wires=[g.targets[0], g.targets[1]])
elif g.id =='cu':
pennylane.ControlledQubitUnitary(g.matrix, control_wires=g.controls, wires=g.targets)
elif g.id == 'u':
pennylane.QubitUnitary(g.matrix, wires=g.targets)
elif g.id == "measure":
measured_qs = g.targets
else:
raise NotImplementedError(f"The gate type {g.id} is not defined")
return pennylane.sample(wires=measured_qs)
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend_pny.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cmath
import logging
from math import pi
import numpy as np
from .frontend import Frontend
from .._utils import call_by_root
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
class Dumper(Frontend):
"""Special frontend for dumping the gate sequence as pure text to disk.
Each gate (or operation) would be stored as 3 lines, with elements separated by 1 space:
1. n_targets n_controls
2. targets controls
3. contiguity actual_matrix_data
Note that the qubit IDs are zero-based. The matrix data is flattened to a 1D contiguous
array of length 2**(2*n_targets). The contiguity is a single character "C" (for C-major,
or row-major) or "F" (for Fortran-major, or column-major) for how to interpret the matrix.
All complex numbers are stored as two real numbers (ex: 0.5-0.1j -> "0.5 -0.1").
As an example, a CCX gate acting on qubit 0 and controlled by qubits 2 & 4 is stored as
'''
1 2\n
0 2 4\n
C 0.0 0.0 1.0 0.0 1.0 0.0 0.0 0.0\n
'''
Currently the measurement operation at the end of the gate sequence is not stored.
An empty line can be used to separate different gates/operations and improve readability,
but it is not required.
"""
def __init__(self, nqubits, config):
precision = config['precision']
self.dtype = np.complex64 if precision == 'single' else np.complex128
self.dtype = np.dtype(self.dtype)
circuit_filename = config['circuit_filename']
self.circuit_filename = circuit_filename.replace('.pickle', '_raw.txt')
self.nqubits = nqubits
self.order = 'C' # TODO
self.digits = 12 # TODO
def _dump_op(self, op, targets, controls=()):
op = np.array2string(
op.astype(self.dtype).reshape(-1, order=self.order).view(self.dtype.char.lower()),
max_line_width=np.inf,
precision=self.digits,
)
if isinstance(targets, int):
targets = (targets,)
if isinstance(controls, int):
controls = (controls,)
op_data = f"{len(targets)} {len(controls)}\n"
for t in targets:
op_data += f"{t} "
for c in controls:
op_data += f"{c} "
op_data += f"\n{self.order} "
op_data += f"{op[1:-1]}\n\n"
return op_data
def _get_rotation_matrix(self, theta, phi, lam):
matrix = np.empty((2, 2), dtype=self.dtype)
theta *= 0.5
matrix[0, 0] = cmath.cos(theta)
matrix[0, 1] = - cmath.sin(theta) * cmath.exp(1j*lam)
matrix[1, 0] = cmath.sin(theta) * cmath.exp(1j*phi)
matrix[1, 1] = cmath.cos(theta) * cmath.exp(1j*(phi+lam))
matrix = np.asarray(matrix)
return matrix
def generateCircuit(self, gateSeq):
circuit = ''
for g in gateSeq:
if g.id == 'h':
circuit += self._dump_op(
np.asarray([[1, 1], [1, -1]])/np.sqrt(2), g.targets)
elif g.id == 'x':
circuit += self._dump_op(
np.asarray([[0, 1], [1, 0]]), g.targets)
elif g.id == 'cnot':
# TODO: use 4*4 version (merge targets & controls)?
circuit += self._dump_op(
np.asarray([[0, 1], [1, 0]]), g.targets, g.controls)
elif g.id == 'cz':
# TODO: use 4*4 version (merge targets & controls)?
circuit += self._dump_op(
np.asarray([[1, 0], [0, -1]]), g.targets, g.controls)
elif g.id == 'rz':
circuit += self._dump_op(
self._get_rotation_matrix(0, g.params, 0), g.targets)
elif g.id == 'rx':
circuit += self._dump_op(
self._get_rotation_matrix(g.params, -pi/2, pi/2), g.targets)
elif g.id == 'ry':
circuit += self._dump_op(
self._get_rotation_matrix(g.params, 0, 0), g.targets)
elif g.id == 'czpowgate':
matrix = np.eye(2, dtype=self.dtype)
matrix[1, 1] = cmath.exp(1j*pi*g.params)
circuit += self._dump_op(matrix, g.targets, g.controls)
elif g.id == 'swap':
assert len(g.targets) == 2
matrix = np.eye(4, dtype=self.dtype)
matrix[1:3, 1:3] = [[0, 1], [1, 0]]
circuit += self._dump_op(matrix, g.targets)
elif g.id == 'cu':
circuit += self._dump_op(g.matrix, g.targets, g.controls)
elif g.id == 'u':
circuit += self._dump_op(g.matrix, g.targets)
elif g.id == 'measure':
pass # treated as no-op for now
else:
raise NotImplementedError(f"the gate type {g.id} is not defined")
def dump():
logger.info(f"dumping (raw) circuit as {self.circuit_filename} ...")
with open(self.circuit_filename, 'w') as f:
f.write(circuit)
call_by_root(dump)
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend_dumper.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
class Frontend:
# The current assumptions for measurement:
# 1. we only do measure once throughout a gate sequence, and itβs done at the end of the sequence
# 2. measure is applied to first x (most likely x=all) qubits in the circuit
# When we introduce benchmarks that do mid-circuit measurement, we must revisit the assumption!
def __init__(self, nqubits, config):
raise NotImplementedError
def generateCircuit(self, gateSeq):
raise NotImplementedError
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .frontend_cirq import Cirq
from .frontend_qiskit import Qiskit
from .frontend_pny import Pennylane
from .frontend_qulacs import Qulacs
try:
from .frontend_naive import Naive
except ImportError:
Naive = None
frontends = {
'cirq': Cirq,
'qiskit': Qiskit,
'pennylane': Pennylane,
'qulacs': Qulacs
}
if Naive:
frontends['naive'] = Naive
def createFrontend(frontend_name, nqubits, config):
return frontends[frontend_name](nqubits, config)
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
try:
import cirq
except ImportError:
cirq = None
from .frontend import Frontend
class Cirq(Frontend):
def __init__(self, nqubits, config):
if cirq is None:
raise RuntimeError('cirq is not installed')
self.nqubits = nqubits
self.config = config
def generateCircuit(self, gateSeq):
qubits = cirq.LineQubit.range(self.nqubits)
circuit = cirq.Circuit()
for g in gateSeq:
if g.id == 'h':
circuit.append(cirq.H(qubits[g.targets]))
elif g.id == 'x':
circuit.append(cirq.X(qubits[g.targets]))
elif g.id == 'cnot':
circuit.append(cirq.CNOT(qubits[g.controls], qubits[g.targets]))
elif g.id == 'cz':
circuit.append(cirq.CZ(qubits[g.controls], qubits[g.targets]))
elif g.id == 'rz':
circuit.append(cirq.rz(g.params).on(qubits[g.targets]))
elif g.id == 'rx':
circuit.append(cirq.rx(g.params).on(qubits[g.targets]))
elif g.id == 'ry':
circuit.append(cirq.ry(g.params).on(qubits[g.targets]))
elif g.id == 'czpowgate':
circuit.append(cirq.CZPowGate(exponent=g.params).on(qubits[g.controls], qubits[g.targets]))
elif g.id == 'swap':
assert len(g.targets) == 2
circuit.append(cirq.SWAP(qubits[g.targets[0]], qubits[g.targets[1]]))
elif g.id == 'cu':
U_gate = cirq.MatrixGate(g.matrix, name=g.name)
circuit.append(U_gate.on(*[qubits[i] for i in g.targets]).controlled_by(qubits[g.controls]))
elif g.id == 'u':
U_gate = cirq.MatrixGate(g.matrix, name=g.name)
circuit.append(U_gate.on(*[qubits[i] for i in g.targets]))
elif g.id == 'measure':
circuit.append(cirq.measure(*[qubits[i] for i in g.targets], key='result'))
else:
raise NotImplementedError(f"The gate type {g.id} is not defined")
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend_cirq.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from cmath import pi, exp
try:
import qulacs
except ImportError:
qulacs = None
from .frontend import Frontend
class Qulacs(Frontend):
def __init__(self, nqubits, config):
if qulacs is None:
raise RuntimeError('qulacs is not installed')
self.nqubits = nqubits
self.config = config
def generateCircuit(self, gateSeq):
circuit = qulacs.QuantumCircuit(self.nqubits)
for g in gateSeq:
if g.id == 'h':
circuit.add_H_gate(g.targets)
elif g.id == 'x':
circuit.add_X_gate(g.targets)
elif g.id == 'cnot':
circuit.add_CNOT_gate(g.controls, g.targets)
elif g.id == 'cz':
circuit.add_CZ_gate(g.controls, g.targets)
elif g.id == 'rz':
circuit.add_RZ_gate(g.targets, g.params)
elif g.id == 'rx':
circuit.add_RX_gate(g.targets, g.params)
elif g.id == 'ry':
circuit.add_RY_gate(g.targets, g.params)
elif g.id == 'czpowgate':
CZPow_matrix = [[1, 0], [0, exp(1j*pi*g.params)]]
CZPowgate = qulacs.gate.DenseMatrix(g.targets, CZPow_matrix)
CZPowgate.add_control_qubit(g.controls, 1)
circuit.add_gate(CZPowgate)
elif g.id == 'swap':
assert len(g.targets) == 2
circuit.add_SWAP_gate(g.targets[0], g.targets[1])
elif g.id == 'cu':
gate = qulacs.gate.DenseMatrix(g.targets, g.matrix)
gate.add_control_qubit(g.controls, 1)
circuit.add_gate(gate)
elif g.id == 'u':
gate = qulacs.gate.DenseMatrix(g.targets, g.matrix)
circuit.add_gate(gate)
elif g.id == 'measure':
for i in g.targets:
circuit.add_gate(qulacs.gate.Measurement(i, i))
else:
raise NotImplementedError(f"The gate type {g.id} is not defined")
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend_qulacs.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from math import pi
try:
import qiskit
from qiskit.extensions import UnitaryGate
except ImportError:
qiskit = UnitaryGate = None
from .frontend import Frontend
class Qiskit(Frontend):
def __init__(self, nqubits, config):
if qiskit is None:
raise RuntimeError("qiskit is not installed")
self.nqubits = nqubits
self.config = config
def generateCircuit(self, gateSeq):
last_g = gateSeq[-1]
assert last_g.id == "measure" # TODO: relax this?
circuit = qiskit.QuantumCircuit(self.nqubits, len(last_g.targets))
for g in gateSeq:
if g.id == 'h':
circuit.h(g.targets)
elif g.id == 'x':
circuit.x(g.targets)
elif g.id == 'cnot':
circuit.cnot(g.controls, g.targets)
elif g.id == 'cz':
circuit.cz(g.controls, g.targets)
elif g.id == 'rz':
circuit.rz(g.params, g.targets)
elif g.id == 'rx':
circuit.rx(g.params, g.targets)
elif g.id == 'ry':
circuit.ry(g.params, g.targets)
elif g.id == 'czpowgate':
circuit.cp(pi*g.params, g.controls, g.targets)
elif g.id == 'swap':
circuit.swap(*g.targets)
elif g.id == 'cu':
U_gate = UnitaryGate(g.matrix, g.name).control(1)
circuit.append(U_gate, [g.controls]+g.targets[::-1])
elif g.id == 'u':
# TODO: give the gate a name?
U_gate = UnitaryGate(g.matrix)
circuit.append(U_gate, g.targets[::-1])
elif g.id == 'measure':
circuit.measure(g.targets, g.targets)
else:
raise NotImplementedError(f"The gate type {g.id} is not defined")
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/frontends/frontend_qiskit.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import time
import warnings
import numpy as np
import cupy as cp
from cuquantum import contract, contract_path, CircuitToEinsum
from cuquantum import cutensornet as cutn
from .backend import Backend
from .._utils import convert_einsum_to_txt, generate_size_dict_from_operands, is_running_mpiexec
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
class cuTensorNet(Backend):
def __init__(self, ngpus, ncpu_threads, precision, **kwargs):
if ngpus != 1:
raise ValueError("the cutn backend must be run with --ngpus 1 (regardless if MPI is in use)")
self.ncpu_threads = ncpu_threads
self.precision = precision
self.nqubits = kwargs.pop('nqubits')
self.rank = 0
self.handle = cutn.create()
try:
# cuQuantum Python 22.11+ supports nonblocking & auto-MPI
opts = cutn.NetworkOptions(handle=self.handle, blocking="auto")
if is_running_mpiexec():
from mpi4py import MPI # init should be already done earlier
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
device_id = rank % cp.cuda.runtime.getDeviceCount()
cp.cuda.Device(device_id).use()
cutn.distributed_reset_configuration(
self.handle, *cutn.get_mpi_comm_pointer(comm)
)
logger.debug("enable MPI support for cuTensorNet")
self.rank = rank
except (TypeError, AttributeError):
# cuQuantum Python 22.07 or below
opts = cutn.NetworkOptions(handle=self.handle)
self.network_opts = opts
self.n_samples = kwargs.pop('nhypersamples')
def __del__(self):
cutn.destroy(self.handle)
def preprocess_circuit(self, circuit, *args, **kwargs):
circuit_filename = kwargs.pop('circuit_filename')
target = kwargs.pop('target')
pauli = kwargs.pop('pauli')
preprocess_data = {}
t1 = time.perf_counter()
if self.precision == 'single':
circuit_converter = CircuitToEinsum(circuit, dtype='complex64', backend=cp)
else:
circuit_converter = CircuitToEinsum(circuit, dtype='complex128', backend=cp)
t2 = time.perf_counter()
time_circ2einsum = t2 - t1
logger.info(f'CircuitToEinsum took {time_circ2einsum} s')
t1 = time.perf_counter()
if target == 'amplitude':
# any bitstring would give same TN topology, so let's just pick "000...0"
self.expression, self.operands = circuit_converter.amplitude('0'*self.nqubits)
elif target == 'state_vector':
self.expression, self.operands = circuit_converter.state_vector()
elif target == 'expectation':
# new in cuQuantum Python 22.11
assert pauli is not None
logger.info(f"compute expectation value for Pauli string: {pauli}")
self.expression, self.operands = circuit_converter.expectation(pauli)
else:
# TODO: add other CircuitToEinsum methods?
raise NotImplementedError(f"the target {target} is not supported")
t2 = time.perf_counter()
time_tn = t2 - t1
logger.info(f'{target}() took {time_tn} s')
tn_format = os.environ.get('CUTENSORNET_DUMP_TN')
if tn_format == 'txt':
size_dict = generate_size_dict_from_operands(
self.expression, self.operands)
convert_einsum_to_txt(
self.expression, size_dict, circuit_filename + '.txt')
elif tn_format is not None:
# TODO: dump expression & size_dict as plain unicode?
raise NotImplementedError(f"the TN format {tn_format} is not supported")
self.network = cutn.Network(
self.expression, *self.operands, options=self.network_opts)
t1 = time.perf_counter()
path, opt_info = self.network.contract_path(
# TODO: samples may be too large for small circuits
optimize={'samples': self.n_samples, 'threads': self.ncpu_threads})
t2 = time.perf_counter()
time_path = t2 - t1
logger.info(f'contract_path() took {time_path} s')
logger.debug(f'# samples: {self.n_samples}')
logger.debug(opt_info)
self.path = path
self.opt_info = opt_info
preprocess_data = {
'CircuitToEinsum': time_circ2einsum,
target: time_tn,
'contract_path': time_path,
}
return preprocess_data
def run(self, circuit, nshots=0):
if self.rank == 0 and nshots > 0:
warnings.warn("the cutn backend does not support sampling")
self.network.contract()
# TODO: support these return values?
return {'results': None, 'post_results': None, 'run_data': {}}
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_cutn.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
class Backend:
def __init__(self, ngpus, ncpu_threads, precision, *args, **kwargs):
raise NotImplementedError
def preprocess_circuit(self, circuit, *args, **kwargs):
return {}
def pre_run(self, circuit, *args, **kwargs):
pass
def run(self, circuit, nshots):
raise NotImplementedError
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import os
import time
import warnings
try:
import qulacs
except ImportError:
qulacs = None
from .backend import Backend
class Qulacs(Backend):
def __init__(self, ngpus, ncpu_threads, precision, *args, identifier=None, **kwargs):
if qulacs is None:
raise RuntimeError("qulacs is not installed")
if precision != 'double':
raise ValueError("qulacs backends only support double precision")
self.identifier = identifier
self.ngpus = ngpus
self.ncpu_threads = ncpu_threads
self.nqubits = kwargs.pop('nqubits')
self.state = self.create_qulacs_state()
def create_qulacs_state(self):
if self.identifier == 'qulacs-gpu':
if self.ngpus > 1:
raise ValueError(f"cannot specify --ngpus > 1 for the backend {self.identifier}")
try:
state = qulacs.QuantumStateGpu(self.nqubits)
except AttributeError as e:
raise RuntimeError("please clone Qulacs and build it from source via \"USE_GPU=Yes "
"pip install .\", or follow Qulacs instruction for customized "
"builds") from e
elif self.identifier == 'qulacs-cpu':
if self.ngpus != 0:
raise ValueError(f"cannot specify --ngpus for the backend {self.identifier}")
if self.ncpu_threads > 1 and self.ncpu_threads != (
int(os.environ.get("OMP_NUM_THREADS", "-1")) or int(os.environ.get("QULACS_NUM_THREADS", "-1"))):
warnings.warn(f"--ncputhreads is ignored, for {self.identifier} please set the env var OMP_NUM_THREADS or QULACS_NUM_THREADS instead",
stacklevel=2)
state = qulacs.QuantumState(self.nqubits)
else:
raise ValueError(f"the backend {self.identifier} is not recognized")
return state
def run(self, circuit, nshots=1024):
# init/reset sv
self.state.set_zero_state()
# actual circuit simulation
circuit.update_quantum_state(self.state)
run_data = {}
if nshots > 0:
results = self.state.sampling(nshots)
else:
results = self.state.get_vector() # TODO: too heavyweight?
return {'results': results, 'post_results': None, 'run_data': run_data}
QulacsGpu = functools.partial(Qulacs, identifier='qulacs-gpu')
QulacsCpu = functools.partial(Qulacs, identifier='qulacs-cpu')
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_qulacs.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import logging
import os
import time
import warnings
import sys
import numpy as np
try:
import pennylane
except ImportError:
pennylane = None
from .backend import Backend
from .._utils import call_by_root, EarlyReturnError, is_running_mpi
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
class Pennylane(Backend):
def __init__(self, ngpus, ncpu_threads, precision, *args, identifier=None, **kwargs):
if pennylane is None:
raise RuntimeError("pennylane is not installed")
self.dtype = np.complex64 if precision == "single" else np.complex128
self.identifier = identifier
self.ngpus = ngpus
self.ncpu_threads = ncpu_threads
self.nqubits = kwargs.pop('nqubits')
self.circuit = None
def _make_qnode(self, circuit, nshots=1024, **kwargs):
if self.identifier == "pennylane-lightning-gpu":
if self.ngpus == 1:
try:
import pennylane_lightning_gpu
except ImportError as e:
raise RuntimeError("PennyLane-Lightning-GPU plugin is not installed") from e
else:
raise ValueError(f"cannot specify --ngpus > 1 for the backend {self.identifier}")
dev = pennylane.device("lightning.gpu", wires=self.nqubits, shots=nshots, c_dtype=self.dtype)
elif self.identifier == "pennylane-lightning-kokkos":
# there's no way for us to query what execution space (=backend) that kokkos supports at runtime,
# so let's just set up Kokkos::InitArguments and hope kokkos to do the right thing...
try:
import pennylane_lightning_kokkos
except ImportError as e:
raise RuntimeError("PennyLane-Lightning-Kokkos plugin is not installed") from e
args = pennylane_lightning_kokkos.lightning_kokkos.InitArguments()
args.num_threads = self.ncpu_threads
args.disable_warnings = int(logger.getEffectiveLevel() != logging.DEBUG)
## Disable MPI because it's unclear if pennylane actually supports it (at least it's untested)
# # if we're running MPI, we want to know now and get it init'd before kokkos is
# MPI = is_running_mpi()
# if MPI:
# comm = MPI.COMM_WORLD
# args.ndevices = min(comm.Get_size(), self.ngpus) # note: kokkos uses 1 GPU per process
dev = pennylane.device(
"lightning.kokkos", wires=self.nqubits, shots=nshots, c_dtype=self.dtype,
sync=False,
kokkos_args=args)
elif self.identifier == "pennylane-lightning-qubit":
try:
import pennylane_lightning
except ImportError as e:
raise RuntimeError("PennyLane-Lightning plugin is not installed") from e
if self.ngpus != 0:
raise ValueError(f"cannot specify --ngpus for the backend {self.identifier}")
if self.ncpu_threads > 1 and self.ncpu_threads != int(os.environ.get("OMP_NUM_THREADS", "-1")):
warnings.warn(f"--ncputhreads is ignored, for {self.identifier} please set the env var OMP_NUM_THREADS instead",
stacklevel=2)
dev = pennylane.device("lightning.qubit", wires=self.nqubits, shots=nshots, c_dtype=self.dtype)
elif self.identifier == "pennylane":
if self.ngpus != 0:
raise ValueError(f"cannot specify --ngpus for the backend {self.identifier}")
dev = pennylane.device("default.qubit", wires=self.nqubits, shots=nshots, c_dtype=self.dtype)
elif self.identifier == "pennylane-dumper":
import cloudpickle
import cuquantum_benchmarks
cloudpickle.register_pickle_by_value(cuquantum_benchmarks)
# note: before loading the pickle, one should check if the Python version agrees
# (probably pennylane's version too)
py_major_minor = f'{sys.version_info.major}.{sys.version_info.minor}'
circuit_filename = kwargs.pop('circuit_filename')
circuit_filename += f"_pny_raw_py{py_major_minor}.pickle"
def dump():
logger.info(f"dumping pennylane (raw) circuit as {circuit_filename} ...")
with open(circuit_filename, 'wb') as f:
cloudpickle.dump(circuit, f) # use highest protocol
logger.info("early exiting as the dumper task is completed")
call_by_root(dump)
raise EarlyReturnError
else:
raise ValueError(f"the backend {self.identifier} is not recognized")
qnode = pennylane.QNode(circuit, device=dev)
return qnode
def preprocess_circuit(self, circuit, *args, **kwargs):
nshots = kwargs.get('nshots', 1024)
t1 = time.perf_counter()
self.circuit = self._make_qnode(circuit, nshots, **kwargs)
t2 = time.perf_counter()
time_make_qnode = t2 - t1
logger.info(f'make qnode took {time_make_qnode} s')
return {'make_qnode': time_make_qnode}
def run(self, circuit, nshots=1024):
# both circuit & nshots are set in preprocess_circuit()
results = self.circuit()
post_res = None # TODO
run_data = {}
return {'results': results, 'post_results': post_res, 'run_data': run_data}
PnyLightningGpu = functools.partial(Pennylane, identifier='pennylane-lightning-gpu')
PnyLightningCpu = functools.partial(Pennylane, identifier='pennylane-lightning-qubit')
PnyLightningKokkos = functools.partial(Pennylane, identifier='pennylane-lightning-kokkos')
Pny = functools.partial(Pennylane, identifier='pennylane')
PnyDumper = functools.partial(Pennylane, identifier='pennylane-dumper')
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_pny.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .backend_cirq import Cirq
from .backend_cutn import cuTensorNet
from .backend_pny import (Pny, PnyLightningGpu, PnyLightningCpu,
PnyLightningKokkos, PnyDumper)
from .backend_qsim import Qsim, QsimCuda, QsimCusv, QsimMgpu
from .backend_qiskit import Aer, AerCuda, AerCusv, CusvAer
from .backend_qulacs import QulacsGpu, QulacsCpu
try:
from .backend_naive import Naive
except ImportError:
Naive = None
backends = {
'aer': Aer,
'aer-cuda': AerCuda,
'aer-cusv': AerCusv,
'cusvaer': CusvAer,
'cirq': Cirq,
'cutn': cuTensorNet,
'qsim': Qsim,
'qsim-cuda': QsimCuda,
'qsim-cusv': QsimCusv,
'qsim-mgpu': QsimMgpu,
'pennylane': Pny,
'pennylane-lightning-gpu': PnyLightningGpu,
'pennylane-lightning-qubit': PnyLightningCpu,
'pennylane-lightning-kokkos': PnyLightningKokkos,
'pennylane-dumper': PnyDumper,
'qulacs-cpu': QulacsCpu,
'qulacs-gpu': QulacsGpu,
}
if Naive:
backends['naive'] = Naive
def createBackend(backend_name, ngpus, ncpu_threads, precision, *args, **kwargs):
return backends[backend_name](ngpus, ncpu_threads, precision, *args, **kwargs)
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import math
import functools
import logging
import time
from importlib.metadata import version
import numpy as np
import cupy as cp
try:
import qiskit
except ImportError:
qiskit = None
from .backend import Backend
from .._utils import get_mpi_size, get_mpi_rank
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
class Qiskit(Backend):
def __init__(self, ngpus, ncpu_threads, precision, *args, identifier=None, **kwargs):
if qiskit is None:
raise RuntimeError("qiskit is not installed")
self.precision = precision
self.identifier = identifier
self.nqubits = kwargs.pop('nqubits')
self.backend = self.create_aer_backend(identifier, ngpus, ncpu_threads, *args, **kwargs)
def preprocess_circuit(self, circuit, *args, **kwargs):
t0 = time.perf_counter()
self.transpiled_qc = qiskit.transpile(circuit, self.backend) # (circuit, basis_gates=['u3', 'cx'], backend=self.backend)
t1 = time.perf_counter()
time_transpile = t1 - t0
logger.info(f'transpile took {time_transpile} s')
return {'transpile': time_transpile}
def run(self, circuit, nshots=1024):
run_data = {}
transpiled_qc = self.transpiled_qc
if nshots > 0:
results = self.backend.run(transpiled_qc, shots=nshots, memory=True)
else:
results = self.backend.run(transpiled_qc, shots=0, memory=True)
# workaround for memory allocation failure for cusvaer 22.11/23.03
if self.identifier == 'cusvaer' and self._need_sync():
self._synchronize()
post_res_list = results.result().get_memory()
post_res_list = [list(i) for i in post_res_list]
post_res = np.array(post_res_list)
return {'results': results, 'post_results': post_res, 'run_data': run_data}
def create_aer_backend(self, identifier, ngpus, ncpu_threads, *args, **kwargs):
nfused = kwargs.pop('nfused')
try:
# we defer importing Aer as late as possible, due to a bug it has that
# could init all GPUs prematurely
from qiskit.providers.aer import AerSimulator
except ImportError as e:
raise RuntimeError("qiskit-aer (or qiskit-aer-gpu) is not installed") from e
if identifier == 'cusvaer':
import cusvaer
cusvaer_global_index_bits = kwargs.pop('cusvaer_global_index_bits')
cusvaer_p2p_device_bits = kwargs.pop('cusvaer_p2p_device_bits')
cusvaer_comm_plugin_type = kwargs.pop('cusvaer_comm_plugin_type')
cusvaer_comm_plugin_soname = kwargs.pop('cusvaer_comm_plugin_soname')
# convert comm plugin type to enum
if not cusvaer_comm_plugin_type:
cusvaer_comm_plugin_type = cusvaer.CommPluginType.MPI_AUTO
elif cusvaer_comm_plugin_type == 'self':
cusvaer_comm_plugin_type = cusvaer.CommPluginType.SELF
elif cusvaer_comm_plugin_type == 'mpi_auto':
cusvaer_comm_plugin_type = cusvaer.CommPluginType.MPI_AUTO
elif cusvaer_comm_plugin_type == 'mpi_openmpi':
cusvaer_comm_plugin_type = cusvaer.CommPluginType.MPI_OPENMPI
elif cusvaer_comm_plugin_type == 'mpi_mpich':
cusvaer_comm_plugin_type = cusvaer.CommPluginType.MPI_MPICH
elif cusvaer_comm_plugin_type == 'external':
cusvaer_comm_plugin_type = cusvaer.CommPluginType.EXTERNAL
else:
raise ValueError(f"Unknown cusvaer_comm_plugin_type, {cusvaer_comm_plugin_type}")
if not cusvaer_comm_plugin_soname: # empty string
if cusvaer_comm_plugin_type == cusvaer.CommPluginType.EXTERNAL:
raise ValueError("cusvaer_comm_plugin_soname should be specified "
"if cusvaer_comm_plugin_type=external is specified")
cusvaer_comm_plugin_soname = None
cusvaer_data_transfer_buffer_bits = kwargs.pop('cusvaer_data_transfer_buffer_bits')
if ngpus != 1:
raise ValueError("the cusvaer requires 1 GPU per process (--ngpus 1)")
try:
backend = AerSimulator(
method='statevector', device="GPU", cusvaer_enable=True, noise_model=None,
fusion_max_qubit=nfused,
cusvaer_global_index_bits=cusvaer_global_index_bits,
cusvaer_p2p_device_bits=cusvaer_p2p_device_bits,
precision=self.precision,
cusvaer_data_transfer_buffer_bits=cusvaer_data_transfer_buffer_bits,
cusvaer_comm_plugin_type=cusvaer_comm_plugin_type,
cusvaer_comm_plugin_soname=cusvaer_comm_plugin_soname
)
except: # AerError
raise RuntimeError(
"the cusvaer backend is only available in cuQuantum Appliance "
"container 22.11+")
elif identifier == "aer-cuda":
if ngpus >= 1:
blocking_enable, blocking_qubits = self.get_aer_blocking_setup(ngpus)
try:
# use cuQuantum Appliance interface
backend = AerSimulator(
method='statevector', device="GPU", cusvaer_enable=False, cuStateVec_enable=False,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
except: # AerError
# use public interface
backend = AerSimulator(
method='statevector', device="GPU", cuStateVec_enable=False,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
else:
raise ValueError(f"need to specify --ngpus for the backend {identifier}")
elif identifier == "aer-cusv":
if ngpus >= 1:
blocking_enable, blocking_qubits = self.get_aer_blocking_setup(ngpus)
try:
# use cuQuantum Appliance interface
backend = AerSimulator(
method='statevector', device="GPU", cusvaer_enable=False, cuStateVec_enable=True,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
except: # AerError
# use public interface
backend = AerSimulator(
method='statevector', device="GPU", cuStateVec_enable=True,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
else:
raise ValueError(f"need to specify --ngpus for the backend {identifier}")
elif identifier == 'aer':
if ngpus != 0:
raise ValueError(f"cannot specify --ngpus for the backend {identifier}")
blocking_enable, blocking_qubits = self.get_aer_blocking_setup()
try:
# use cuQuantum Appliance interface
backend = AerSimulator(
method='statevector', device="CPU", max_parallel_threads=ncpu_threads,
cusvaer_enable=False, cuStateVec_enable=False,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
except: # AerError
# use public interface
backend = AerSimulator(
method='statevector', device="CPU", max_parallel_threads=ncpu_threads,
blocking_enable=blocking_enable, blocking_qubits=blocking_qubits,
fusion_max_qubit=nfused, precision=self.precision)
else:
raise ValueError(f"the backend {identifier} is not recognized")
return backend
def get_aer_blocking_setup(self, ngpus=None):
size = get_mpi_size() # check if running MPI
if size > 1:
blocking_enable = True
if self.identifier == 'aer':
blocking_qubits = self.nqubits - int(math.log2(size))
else:
blocking_qubits = self.nqubits - int(math.log2(ngpus))
else:
# use default
blocking_enable = False
blocking_qubits = None
return blocking_enable, blocking_qubits
def _need_sync(self):
ver_str = version('cusvaer')
ver = [int(num) for num in ver_str.split('.')]
return ver[0] == 0 and ver[1] <= 2
def _synchronize(self):
my_rank = get_mpi_rank()
ndevices_in_node = cp.cuda.runtime.getDeviceCount()
# GPU selected in this process
device_id = my_rank % ndevices_in_node
cp.cuda.Device(device_id).synchronize()
CusvAer = functools.partial(Qiskit, identifier="cusvaer")
AerCuda = functools.partial(Qiskit, identifier="aer-cuda")
AerCusv = functools.partial(Qiskit, identifier="aer-cusv")
Aer = functools.partial(Qiskit, identifier="aer")
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_qiskit.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import warnings
try:
import cirq
except ImportError:
cirq = None
from .backend import Backend
class Cirq(Backend):
def __init__(self, ngpus, ncpu_threads, precision, *args, **kwargs):
if cirq is None:
raise RuntimeError("cirq is not installed")
if ngpus > 0:
raise ValueError("the cirq backend only runs on CPU")
if ncpu_threads > 1:
warnings.warn("cannot set the number of CPU threads for the cirq backend")
if precision != 'single':
raise ValueError("the cirq backend only supports single precision")
self.backend = cirq.Simulator()
def run(self, circuit, nshots=1024):
run_data = {}
if nshots > 0:
results = self.backend.run(circuit, repetitions=nshots)
else:
results = self.backend.simulate(circuit)
post_res = results.measurements['result']
return {'results': results, 'post_results': post_res, 'run_data': run_data}
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_cirq.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import cupy as cp
try:
import qsimcirq
except ImportError:
qsimcirq = None
from .backend import Backend
class QsimCirq(Backend):
def __init__(self, ngpus, ncpu_threads, precision, *args, identifier=None, **kwargs):
if qsimcirq is None:
raise RuntimeError("qsimcirq is not installed")
if precision != 'single':
raise ValueError("all qsim backends only support single precision")
self.identifier = identifier
qsim_options = self.create_qsim_options(identifier, ngpus, ncpu_threads, **kwargs)
self.backend = qsimcirq.QSimSimulator(qsim_options=qsim_options)
def run(self, circuit, nshots=1024):
run_data = {}
if self.identifier == "qsim-mgpu":
dev = cp.cuda.Device()
if nshots > 0:
results = self.backend.run(circuit, repetitions=nshots)
else:
results = self.backend.simulate(circuit)
if self.identifier == "qsim-mgpu":
# work around a bug
if dev != cp.cuda.Device():
dev.use()
post_res = None # TODO
return {'results': results, 'post_results': post_res, 'run_data': run_data}
@staticmethod
def create_qsim_options(identifier, ngpus, ncpu_threads, **kwargs):
nfused = kwargs.pop('nfused')
if identifier == "qsim-mgpu":
if ngpus >= 1:
# use cuQuantum Appliance interface
ops = qsimcirq.QSimOptions(gpu_mode=tuple(range(ngpus)), max_fused_gate_size=nfused)
else:
raise ValueError(f"need to specify --ngpus for the backend {identifier}")
elif identifier == "qsim-cuda":
if ngpus == 1:
try:
# use public interface
ops = qsimcirq.QSimOptions(gpu_mode=0, use_gpu=True, max_fused_gate_size=nfused)
except TypeError:
# use cuQuantum Appliance interface
ops = qsimcirq.QSimOptions(gpu_mode=0, disable_gpu=False, use_sampler=False, max_fused_gate_size=nfused)
else:
raise ValueError(f"need to specify --ngpus 1 for the backend {identifier}")
elif identifier == "qsim-cusv":
if ngpus == 1:
try:
# use public interface
ops = qsimcirq.QSimOptions(gpu_mode=1, use_gpu=True, max_fused_gate_size=nfused)
except TypeError:
# use cuQuantum Appliance interface
ops = qsimcirq.QSimOptions(gpu_mode=1, disable_gpu=False, use_sampler=False, max_fused_gate_size=nfused)
else:
raise ValueError(f"need to specify --ngpus 1 for the backend {identifier}")
elif identifier == "qsim":
if ngpus != 0:
raise ValueError(f"cannot specify --ngpus for the backend {identifier}")
try:
# use public interface
ops = qsimcirq.QSimOptions(use_gpu=False, cpu_threads=ncpu_threads, max_fused_gate_size=nfused)
except TypeError:
# use cuQuantum Appliance interface
ops = qsimcirq.QSimOptions(disable_gpu=True, use_sampler=False, cpu_threads=ncpu_threads, max_fused_gate_size=nfused,
gpu_mode=0)
else:
raise ValueError(f"the backend {identifier} is not recognized")
return ops
QsimMgpu = functools.partial(QsimCirq, identifier='qsim-mgpu')
QsimCuda = functools.partial(QsimCirq, identifier='qsim-cuda')
QsimCusv = functools.partial(QsimCirq, identifier='qsim-cusv')
Qsim = functools.partial(QsimCirq, identifier='qsim')
| cuQuantum-main | benchmarks/cuquantum_benchmarks/backends/backend_qsim.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .benchmark import Benchmark
from .._utils import Gate
class QFT(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
circuit = QFT._qft_component(nqubits)
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
def _qft_component(nqubits):
qft = []
for q in range(nqubits):
qft.append(Gate(id='h', targets=q))
for p in range(q+1, nqubits):
qft.append(Gate(id='czpowgate', params=1/(2**(p-q)), controls=q, targets=p))
for q in range(nqubits//2):
qft.append(Gate(id='swap', targets=(q, nqubits-q-1)))
return qft
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/qft.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
class Benchmark:
@staticmethod
def generateGatesSequence(nqubits, config):
raise NotImplementedError
@staticmethod
def postProcess(nqubits, results):
return False
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/benchmark.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .benchmark import Benchmark
from .._utils import Gate
class GHZ(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
circuit = [Gate(id='h', targets=0)]
circuit += [Gate(id='cnot', controls=idx, targets=idx+1) for idx in range(nqubits-1)]
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/ghz.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# Used for classical post-processing:
from collections import Counter
import numpy as np
import scipy as sp
from .benchmark import Benchmark
from .._utils import Gate, reseed
class Simon(Benchmark):
# Example instantiation of Simon circuit paramterized by nqubits
@staticmethod
def generateGatesSequence(nqubits, config):
reseed(1234) # TODO: use a global seed?
# "nqubits" here refers to the number of total qubits in the circuit, and we need
# it to be even so we can split the qubits into input/output
if nqubits % 2:
raise ValueError("the simon benchmark requires even number of qubits")
elif nqubits < 4:
# because the oracle needs to apply a swap gate, # output qubits needs to be
# at least 2, so total # of qubits needs to be 4
raise ValueError("the simon benchmark requires at least 4 qubits")
else:
nqubits //= 2
measure = config['measure']
# define a secret string:
secret_string = np.random.randint(2, size=nqubits)
# Choose qubits to use.
input_qubits = [i for i in range(nqubits)] # input x
output_qubits = [j for j in range(nqubits, 2*nqubits)] # output f(x)
# Pick coefficients for the oracle and create a circuit to query it.
oracle = Simon._make_oracle(input_qubits, output_qubits, secret_string)
# Embed oracle into special quantum circuit querying it exactly once
circuit = Simon._make_simon_circuit(input_qubits, output_qubits, oracle, measure)
return circuit
"""Demonstrates Simon's algorithm.
Simon's Algorithm solves the following problem:
Given a function f:{0,1}^n -> {0,1}^n, such that for some s β {0,1}^n,
f(x) = f(y) iff x β¨ y β {0^n, s},
find the n-bit string s.
A classical algorithm requires O(2^n/2) queries to find s, while Simonβs
algorithm needs only O(n) quantum queries.
=== REFERENCE ===
D. R. Simon. On the power of quantum cryptography. In35th FOCS, pages 116β123,
Santa Fe,New Mexico, 1994. IEEE Computer Society Press.
=== EXAMPLE OUTPUT ===
Secret string = [1, 0, 0, 1, 0, 0]
Circuit:
ββββββββ βββββββββββββ
(0, 0): ββββHββββ@ββββββββββ@βββββ@ββββββHβββM('result')βββ
β β β β
(1, 0): ββββHβββββΌ@ββββββββββΌββββββΌββββββHβββMβββββββββββββ
ββ β β β
(2, 0): ββββHβββββΌβΌ@βββββββββΌββββββΌββββββHβββMβββββββββββββ
βββ β β β
(3, 0): ββββHβββββΌβΌβΌ@ββββββββΌββββββΌββββββHβββMβββββββββββββ
ββββ β β β
(4, 0): ββββHβββββΌβΌβΌβΌ@βββββββΌββββββΌββββββHβββMβββββββββββββ
βββββ β β β
(5, 0): ββββHβββββΌβΌβΌβΌβΌ@ββββββΌββββββΌββββββHβββMβββββββββββββ
ββββββ β β
(6, 0): βββββββββXβΌβΌβΌβΌβΌβββββXββββββΌβββΓββββββββββββββββββββ
βββββ β β
(7, 0): ββββββββββXβΌβΌβΌβΌββββββββββββΌββββΌββββββββββββββββββββ
ββββ β β
(8, 0): βββββββββββXβΌβΌβΌββββββββββββΌββββΌββββββββββββββββββββ
βββ β β
(9, 0): ββββββββββββXβΌβΌβββββββββββXβββΓββββββββββββββββββββ
ββ
(10, 0): ββββββββββββXβΌββββββββββββββββββββββββββββββββββββ
β
(11, 0): βββββββββββββXββββββββββββββββββββββββββββββββββββ
ββββββββ βββββββββββββ
Most common Simon Algorithm answer is: ('[1 0 0 1 0 0]', 100)
***If the input string is s=0^n, no significant answer can be
distinguished (since the null-space of the system of equations
provided by the measurements gives a random vector). This will
lead to low frequency count in output string.
"""
def _make_oracle(input_qubits, output_qubits, secret_string):
"""Gates implementing the function f(a) = f(b) iff a β¨ b = s"""
# Copy contents to output qubits:
for control_qubit, target_qubit in zip(input_qubits, output_qubits):
yield Gate(id='cnot', controls=control_qubit, targets=target_qubit)
# Create mapping:
if sum(secret_string): # check if the secret string is non-zero
# Find significant bit of secret string (first non-zero bit)
significant = list(secret_string).index(1)
# Add secret string to input according to the significant bit:
for j in range(len(secret_string)):
if secret_string[j] > 0:
yield Gate(id='cnot', controls=input_qubits[significant], targets=output_qubits[j])
# Apply a random permutation:
pos = [0, len(secret_string) - 1,]
# Swap some qubits to define oracle. We choose first and last:
yield Gate(id='swap', targets=[output_qubits[pos[0]], output_qubits[pos[1]]])
def _make_simon_circuit(input_qubits, output_qubits, oracle, measure):
"""Solves for the secret period s of a 2-to-1 function such that
f(x) = f(y) iff x β¨ y = s
"""
circuit = []
# Initialize qubits.
init = [Gate(id='h', targets=idx) for idx in input_qubits]
circuit += init
# Query oracle.
circuit += oracle
if measure:
# Measure in X basis.
circuit += init
circuit.append(Gate(id='measure', targets=input_qubits))
return circuit
def _post_processing(data, results):
"""Solves a system of equations with modulo 2 numbers"""
sing_values = sp.linalg.svdvals(results)
tolerance = 1e-5
if sum(sing_values < tolerance) == 0: # check if measurements are linearly dependent
flag = True
null_space = sp.linalg.null_space(results).T[0]
solution = np.around(null_space, 3) # chop very small values
minval = abs(min(solution[np.nonzero(solution)], key=abs))
solution = (solution / minval % 2).astype(int) # renormalize vector mod 2
data.append(str(solution))
return flag
@staticmethod
def postProcess(nqubits, results):
if results is None:
return False
# only the input qubits are measured
nqubits //= 2
data = []
flag = False
classical_iter = 0
while not flag:
# Classical Post-Processing:
flag = Simon._post_processing(data, results[classical_iter * (nqubits - 1):(classical_iter + 1) * (nqubits - 1)])
classical_iter += 1
freqs = Counter(data)
return True
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/simon.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import cupy as cp
import numpy as np
from cupyx.profiler import benchmark
from cuquantum import custatevec as cusv
from .._utils import (check_sequence, check_targets_controls, dtype_to_cuda_type,
precision_str_to_dtype, wrap_with_nvtx)
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
def test_apply_generalized_permutation_matrix(
n_qubits, dtype_sv,
targets, controls, adjoint,
diag, dtype_diag, location_diag, # for D
perm_table, location_perm, # for P
n_warmup, n_repeat, *,
benchmark_data=None):
# TODO: allow controlling seed?
if diag is False and not perm_table:
raise ValueError("need to specify at least --has-diag or --has-perm/--perm-table")
logger.debug(f"{n_qubits=}")
logger.debug(f"{dtype_sv=}")
logger.debug(f"{targets=}")
logger.debug(f"{controls=}")
logger.debug(f"{adjoint=}")
logger.debug(f"{diag=}")
logger.debug(f"{dtype_diag=}")
logger.debug(f"{location_diag=}")
if isinstance(perm_table, bool) or len(perm_table) <= 16:
logger.debug(f"{perm_table=}")
else:
logger.debug("perm_table = (omitted due to length)")
logger.debug(f"{location_perm=}")
logger.debug(f"{n_warmup=}")
logger.debug(f"{n_repeat=}")
check_targets_controls(targets, controls, n_qubits)
n_targets = len(targets)
n_controls = len(controls)
# cuStateVec handle initialization
handle = cusv.create()
stream = cp.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
size_sv = (2 ** n_qubits)
dtype_sv = precision_str_to_dtype(dtype_sv)
sv = cp.ones((size_sv,), dtype=dtype_sv)
data_type_sv = dtype_to_cuda_type(dtype_sv)
# the diagonal matrix can live on either host (np) or device (cp)
matrix_dim = (2 ** n_targets)
dtype_diag = precision_str_to_dtype(dtype_diag)
xp_diag = cp if location_diag == 'device' else np
if diag:
# it's better to just call rng.uniform(), but it's not there until CuPy v12.0.0
# rng_diag = xp_diag.random.default_rng(seed=1234)
# diag = rng_diag.uniform(0.7, 1.3, size=matrix_dim).astype(dtype_diag)
diag = 0.6 * xp_diag.random.random(size=matrix_dim).astype(dtype_diag) + 0.7
if isinstance(diag, cp.ndarray):
diag_ptr = diag.data.ptr
elif isinstance(diag, np.ndarray):
diag_ptr = diag.ctypes.data
else:
raise ValueError
else:
diag_ptr = 0
data_type_diag = dtype_to_cuda_type(dtype_diag)
# the permutation table can live on either host (np) or device (cp)
xp_perm = cp if location_perm == 'device' else np
if perm_table:
if perm_table is True:
original_perm_table = xp_perm.arange(0, matrix_dim, dtype=xp_perm.int64)
perm_table = xp_perm.copy(original_perm_table)
# it'd have been nice to seed an rng and call rng.shuffle(), but CuPy does
# not support it yet...
while True:
xp_perm.random.shuffle(perm_table)
# check if the matrix is not diagonal
if not (original_perm_table == perm_table).all():
break
else: # a user-provided list
check_sequence(perm_table, expected_size=matrix_dim, name="perm_table")
perm_table = xp_perm.asarray(perm_table, dtype=xp_perm.int64)
if isinstance(perm_table, cp.ndarray):
perm_table_ptr = perm_table.data.ptr
elif isinstance(perm_table, np.ndarray):
perm_table_ptr = perm_table.ctypes.data
else:
raise ValueError
else:
perm_table_ptr = 0
cp.cuda.Device().synchronize() # ensure data prep is done before switching stream
####################################################################################
# manage the workspace
workspace_size = cusv.apply_generalized_permutation_matrix_get_workspace_size(
handle, data_type_sv, n_qubits, perm_table_ptr, diag_ptr,
data_type_diag, targets, n_targets, n_controls)
with stream:
if workspace_size > 0:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply diagonal/permutation gate
apply_generalized_permutation_matrix = wrap_with_nvtx(
cusv.apply_generalized_permutation_matrix,
"apply_generalized_permutation_matrix")
args = (
handle, sv.data.ptr, data_type_sv, n_qubits, perm_table_ptr,
diag_ptr, data_type_diag, adjoint, targets, n_targets,
controls, 0, # TODO: support control bit values
n_controls, workspace_ptr, workspace_size)
result = benchmark(
apply_generalized_permutation_matrix,
args,
n_warmup=n_warmup, n_repeat=n_repeat)
# destroy handle
cusv.destroy(handle)
logger.debug(str(result))
cpu_time = np.average(result.cpu_times)
gpu_time = np.average(result.gpu_times[0])
memory_footprint = (2. ** (n_qubits - n_controls)) * 2. * np.dtype(dtype_sv).itemsize
logger.debug(f"effective bandwidth = {memory_footprint / gpu_time * 1e-9} (GB/s)")
return cpu_time, gpu_time
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/apply_gen_perm_matrix.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import random
from .benchmark import Benchmark
from .._utils import Gate
class Random(Benchmark):
# TODO: this should be frontend's property
gate_types = ('h', 'x', 'rz', 'rx', 'ry', 'cnot', 'cz', 'swap')
@staticmethod
def generateGatesSequence(nqubits, config):
try:
depth = config['depth']
except KeyError:
depth = nqubits
gate_types = Random.gate_types
seed = np.iinfo(np.int32).max
rng = np.random.default_rng(seed)
circuit = []
# apply arbitrary random operations at every depth
for _ in range(depth):
# choose either 1, 2, or 3 qubits for the operation
qubits_shuffle = list(range(nqubits))
rng.shuffle(qubits_shuffle)
while qubits_shuffle:
max_possible_operands = min(len(qubits_shuffle), 2)
num_operands = rng.choice(range(max_possible_operands)) + 1
operands = [qubits_shuffle.pop() for _ in range(num_operands)]
# TODO: gate_type_num depends on the gate order in gate_types
if num_operands == 1:
gate_type_num = random.randint(0, 4)
if gate_type_num < 2:
circuit.append(Gate(id=gate_types[gate_type_num], targets=operands[0]))
else:
angle = rng.uniform(0, 2 * np.pi)
circuit.append(Gate(id=gate_types[gate_type_num], params=angle, targets=operands[0]))
elif num_operands == 2:
gate_type_num = random.randint(5, 7)
if gate_type_num in (5, 6):
circuit.append(Gate(id=gate_types[gate_type_num], controls=operands[0], targets=operands[1]))
else:
circuit.append(Gate(id=gate_types[gate_type_num], targets=[operands[0], operands[1]]))
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/random.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from .benchmark import Benchmark
from .._utils import Gate
class IQFT(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
circuit = IQFT._iqft_component(nqubits)
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
def _iqft_component(nqubits):
iqft = []
for q in range(nqubits//2):
iqft.append(Gate(id='swap', targets=(q, nqubits-q-1)))
pass
for q in range(nqubits-1, -1, -1):
for p in range(nqubits-1, q, -1):
iqft.append(Gate(id='czpowgate', params=-1/(2**(p-q)), controls=q, targets=p))
iqft.append(Gate(id='h', targets=q))
return iqft
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/iqft.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import random
from .benchmark import Benchmark
from .._utils import Gate, reseed
class HiddenShift(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
reseed(1234) # TODO: use a global seed?
# Define secret shift
shift = [random.randint(0, 1) for _ in range(nqubits)]
# Make oracles (black box)
oracle_f = HiddenShift._make_oracle_f(nqubits)
# Embed oracle into quantum circuit implementing the Hidden Shift Algorithm
circuit = HiddenShift._make_hs_circuit(nqubits, oracle_f, shift)
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
"""Example program that demonstrates a Hidden Shift algorithm.
The Hidden Shift Problem is one of the known problems whose quantum algorithm
solution shows exponential speedup over classical computing. Part of the
advantage lies on the ability to perform Fourier transforms efficiently. This
can be used to extract correlations between certain functions, as we will
demonstrate here:
Let f and g be two functions {0,1}^N -> {0,1} which are the same
up to a hidden bit string s:
g(x) = f(x β¨ s), for all x in {0,1}^N
The implementation in this example considers the following (so-called "bent")
functions:
f(x) = Ξ£_i x_(2i) x_(2i+1),
where x_i is the i-th bit of x and i runs from 0 to N/2 - 1.
While a classical algorithm requires 2^(N/2) queries, the Hidden Shift
Algorithm solves the problem in O(N) quantum operations. We describe below the
steps of the algorithm:
(1) Prepare the quantum state in the initial state |0β©^N
(2) Make a superposition of all inputs |xβ© with a set of Hadamard gates, which
act as a (Quantum) Fourier Transform.
(3) Compute the shifted function g(x) = f(x β¨ s) into the phase with a proper
set of gates. This is done first by shifting the state |xβ© with X gates, then
implementing the bent function as a series of Controlled-Z gates, and finally
recovering the |xβ© states with another set of X gates.
(4) Apply a Fourier Transform to generate another superposition of states with
an extra phase that is added to f(x β¨ s).
(5) Query the oracle f into the phase with a proper set of controlled gates.
One can then prove that the phases simplify giving just a superposition with
a phase depending directly on the shift.
(6) Apply another set of Hadamard gates which act now as an Inverse Fourier
Transform to get the state |sβ©
(7) Measure the resulting state to get s.
Note that we only query g and f once to solve the problem.
=== REFERENCES ===
[1] Wim van Dam, Sean Hallgreen, Lawrence Ip Quantum Algorithms for some
Hidden Shift Problems. https://arxiv.org/abs/quant-ph/0211140
[2] K Wrigth, et. a. Benchmarking an 11-qubit quantum computer.
Nature Communications, 107(28):12446β12450, 2010. doi:10.1038/s41467-019-13534-2
[3] RΓΆtteler, M. Quantum Algorithms for highly non-linear Boolean functions.
Proceedings of the 21st annual ACM-SIAM Symposium on Discrete Algorithms.
doi: 10.1137/1.9781611973075.37
=== EXAMPLE OUTPUT ===
Secret shift sequence: [1, 0, 0, 1, 0, 1]
Circuit:
(0, 0): βββHβββXβββ@βββXβββHβββ@βββHβββM('result')βββ
β β β
(1, 0): βββHβββββββ@βββββββHβββ@βββHβββMβββββββββββββ
β
(2, 0): βββHβββββββ@βββββββHβββ@βββHβββMβββββββββββββ
β β β
(3, 0): βββHβββXβββ@βββXβββHβββ@βββHβββMβββββββββββββ
β
(4, 0): βββHβββββββ@βββββββHβββ@βββHβββMβββββββββββββ
β β β
(5, 0): βββHβββXβββ@βββXβββHβββ@βββHβββMβββββββββββββ
Sampled results:
Counter({'100101': 100})
Most common bitstring: 100101
Found a match: True
"""
def _make_oracle_f(nqubits):
"""Implement function {f(x) = Ξ£_i x_(2i) x_(2i+1)}."""
oracle_circuit = [Gate(id='cz', controls=2*i, targets=2*i+1) for i in range(nqubits//2)]
return oracle_circuit
def _make_hs_circuit(nqubits, oracle_f, shift):
"""Find the shift between two almost equivalent functions."""
circuit = []
apply_h = [Gate(id='h', targets=idx) for idx in range(nqubits)]
apply_shift = [Gate(id='x', targets=k) for k in range(len(shift)) if shift[k]]
# Initialize qubits.
circuit += apply_h
# Query oracle g: It is equivalent to that of f, shifted before and after:
# Apply Shift:
circuit += apply_shift
# Query oracle.
circuit += oracle_f
# Apply Shift:
circuit += apply_shift
# Second Application of Hadamards.
circuit += apply_h
# Query oracle f (this simplifies the phase).
circuit += oracle_f
# Inverse Fourier Transform with Hadamards to go back to the shift state:
circuit += apply_h
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/hidden_shift.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .benchmark import Benchmark
from .._utils import Gate, random_unitary
class QuantumVolume(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
# This routine is roughly equivalent to Cirq's quantum_volume.generate_model_circuit()
# and Qiskit's QuantumVolume(..., classical_permutation=True).
# copied from cusvaer
n_variations = 10 # unused
depth = 30
seed = 1000
width = nqubits // 2
rng = np.random.default_rng(seed)
measure = config['measure']
circuit = []
for _ in range(depth):
perm = rng.permutation(nqubits)
# apply an su4 gate on each pair in the layer
for w in range(width):
su4 = random_unitary(4, rng)
assert su4.shape == (4, 4)
idx = [perm[2*w], perm[2*w+1]]
circuit.append(Gate(id='u', matrix=su4, targets=idx))
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/quantum_volume.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
from .benchmark import Benchmark
from .iqft import IQFT
from .._utils import Gate
class QPE(Benchmark):
@staticmethod
def generateGatesSequence(nqubits, config):
if nqubits < 2:
raise ValueError("for qpe the number of qubits should be >=2")
# Example instantiation of QPE circuit paramterized by nqubits
phase = 1/3
U = np.mat([[1, 0], [0, np.exp(np.pi * 1j * phase)]])
in_nqubits = 1
unfold = config['unfold']
measure = config['measure']
circuit = QPE._make_qpe_circuit(in_nqubits, nqubits-in_nqubits, U, unfold, 'P(1/3)')
if measure:
# Measure Counting Qubits
circuit.append(Gate(id='measure', targets=list(range(nqubits-in_nqubits))))
return circuit
def _make_qpe_component(in_qubits, t_qubits, U, unfold, U_name):
component = []
in_nqubits = len(in_qubits)
t_nqubits = len(t_qubits)
# 1. Setup Eigenstate
component.append(Gate(id='x', targets=in_qubits[0]))
# 2. Superposition Counting Qubits
all_h = [Gate(id='h', targets=idx) for idx in t_qubits]
component += all_h
# 3. Controlled-U
prev_U = np.identity(2 ** in_nqubits)
for t in range(t_nqubits):
if unfold:
for i in range(2 ** t):
component.append(Gate(id='cu', matrix=U, name=f'{U_name}', controls=t_qubits[t], targets=in_qubits))
else:
new_U = prev_U @ U
component.append(Gate(id='cu', matrix=new_U, name=f'{U_name}^(2^{t})', controls=t_qubits[t_nqubits-t-1], targets=in_qubits))
prev_U = new_U
# 4. Inverse QFT Counting Qubits
iqft_component = IQFT._iqft_component(t_nqubits)
component += iqft_component
return component
# Input:
# :- U is a numpy unitary array 2*2
# :- unfold implements the powers of U using U, rather than create new power gates
def _make_qpe_circuit(in_nqubits, t_nqubits, U, unfold, U_name):
assert 2 ** in_nqubits == U.shape[0] == U.shape[1], "Mismatched number of qubits between U and in_nqubits"
t_qubits = [idx for idx in range(t_nqubits)]
in_qubits = [idx for idx in range(t_nqubits, t_nqubits + in_nqubits)]
circuit = QPE._make_qpe_component(in_qubits, t_qubits, U, unfold, U_name)
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/qpe.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import os
import sys
import cupy as cp
import numpy as np
from cupyx.profiler import benchmark
import cuquantum.cutensornet as cutn
from cuquantum.cutensornet import tensor
from .._utils import precision_str_to_dtype, wrap_with_nvtx
try:
path = os.environ.get('CUTENSORNET_APPROX_TN_UTILS_PATH', '')
if path and os.path.isfile(path):
sys.path.insert(1, os.path.dirname(path))
from approxTN_utils import tensor_decompose
except ImportError:
tensor_decompose = None
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
def benchmark_tensor_decompose(
expr, shape, precision, is_complex, method, algorithm, n_warmup, n_repeats, check_ref, *,
benchmark_data=None):
logger.debug(f"{expr=}")
logger.debug(f"{shape=}")
logger.debug(f"{precision=}")
logger.debug(f"{is_complex=}")
logger.debug(f"{method=}")
logger.debug(f"{algorithm=}")
logger.debug(f"{n_warmup=}")
logger.debug(f"{n_repeats=}")
logger.debug(f"{check_ref=}")
cp.random.seed(5678) # TODO: set me
handle = cutn.create()
options = {'handle': handle}
decomp_subscripts = expr
# sanity checks
expr_in = expr.split('->')[0]
assert len(expr_in) == len(shape), \
f"the input shape {shape} mismatches with the input modes {expr_in}"
if check_ref and tensor_decompose is None:
raise RuntimeError("--check-reference is not supported")
dtype_r = precision_str_to_dtype(precision, False)
t_in = cp.random.random(shape, dtype=dtype_r)
if is_complex:
dtype = precision_str_to_dtype(precision)
t_in = t_in.astype(dtype)
t_in += 1j*cp.random.random(shape, dtype=dtype_r)
assert t_in.dtype == dtype
t_numpy = t_in.get()
if method == "QR":
kwargs = {'options': options}
if check_ref:
options_ref = {'method':'qr'}
elif method == "SVD":
try:
kwargs = {'options': options, 'method': tensor.SVDMethod(algorithm=algorithm)}
except TypeError as e:
if algorithm != "gesvd":
raise ValueError(f"{algorithm} requires cuQuantum v23.06+") from e
else:
kwargs = {'options': options, 'method': tensor.SVDMethod()}
if check_ref:
options_ref = {'method':'svd'}
else:
assert False
cp.cuda.Device().synchronize() # ensure data prep is done
decompose = wrap_with_nvtx(tensor.decompose, "decompose")
results = benchmark(decompose,
(decomp_subscripts, t_in), kwargs=kwargs,
n_repeat=n_repeats, n_warmup=n_warmup)
if check_ref:
decompose_ref = wrap_with_nvtx(tensor_decompose, "tensor_decompose")
results_cupy = benchmark(decompose_ref,
(decomp_subscripts, t_in), kwargs=options_ref,
n_repeat=n_repeats, n_warmup=n_warmup)
results_numpy = benchmark(decompose_ref,
(decomp_subscripts, t_numpy), kwargs=options_ref,
n_repeat=n_repeats, n_warmup=n_warmup)
cutn.destroy(handle)
logger.debug(str(results))
if check_ref:
logger.debug("ref (CuPy):")
logger.debug(str(results_cupy))
benchmark_data['cupy_time'] = max(
np.average(results_cupy.cpu_times), np.average(results_cupy.gpu_times[0]))
logger.debug("ref (NumPy):")
logger.debug(str(results_numpy))
benchmark_data['numpy_time'] = np.average(results_numpy.cpu_times)
cpu_time = np.average(results.cpu_times)
gpu_time = np.average(results.gpu_times[0])
return cpu_time, gpu_time
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/tensor_decompose.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import networkx as nx
import numpy as np
from .benchmark import Benchmark
from .._utils import Gate
class QAOA(Benchmark):
# Example instantiation of QAOA circuit for MaxCut paramterized by nqubits
@staticmethod
def generateGatesSequence(nqubits, config):
p = config['p']
graph = nx.complete_graph(nqubits)
gammas = [np.pi for _ in range(p)]
betas = [np.pi for _ in range(p)]
circuit = QAOA._make_qaoa_maxcut_circuit(nqubits, graph, gammas, betas)
measure = config['measure']
if measure:
circuit.append(Gate(id='measure', targets=list(range(nqubits))))
return circuit
def _make_qaoa_maxcut_mixer_circuit(nqubits, beta):
mixer_circuit = [Gate(id='rx', params=2*beta, targets=q) for q in range(nqubits)]
return mixer_circuit
def _make_qaoa_maxcut_problem_circuit(nqubits, graph, gamma):
problem_circuit = []
for v1, v2 in graph.edges():
problem_circuit.append(Gate(id='cnot', controls=v1, targets=v2))
problem_circuit.append(Gate(id='rz', params=gamma, targets=v2))
problem_circuit.append(Gate(id='cnot', controls=v1, targets=v2))
return problem_circuit
def _make_qaoa_maxcut_circuit(nqubits, graph, gammas, betas):
# Initial circuit
circuit = [Gate(id='h', targets=idx) for idx in range(nqubits)]
for p in range(len(gammas)):
circuit += QAOA._make_qaoa_maxcut_problem_circuit(nqubits, graph, gammas[p])
circuit += QAOA._make_qaoa_maxcut_mixer_circuit(nqubits, betas[p])
return circuit
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/qaoa.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import numpy as np
import cupy as cp
from cupyx.profiler import benchmark
from cuquantum import custatevec as cusv
from .._utils import (check_sequence, dtype_to_cuda_type, precision_str_to_dtype,
wrap_with_nvtx)
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
def test_cusv_sampler(
n_qubits, dtype_sv, bit_ordering, n_shots, output_order, n_warmup, n_repeat, *,
benchmark_data=None):
logger.debug(f"{n_qubits=}")
logger.debug(f"{dtype_sv=}")
logger.debug(f"{bit_ordering=}")
logger.debug(f"{n_shots=}")
logger.debug(f"{output_order}")
logger.debug(f"{n_warmup=}")
logger.debug(f"{n_repeat=}")
check_sequence(bit_ordering, max_size=n_qubits, name="bit_ordering")
dtype_sv = precision_str_to_dtype(dtype_sv)
size_sv = (1 << n_qubits)
# the statevector must reside on device
sv = cp.ones((size_sv,), dtype=dtype_sv)
sv /= np.sqrt(size_sv)
# assert cp.allclose(cp.sum(cp.abs(sv)**2), 1)
data_type_sv = dtype_to_cuda_type(dtype_sv)
# the output bitstrings must reside on host
bit_strings = np.empty((n_shots,), dtype=np.int64)
# the random seeds must be a host array
randnums = np.random.random((n_shots,)).astype(np.float64)
cp.cuda.Device().synchronize() # ensure data prep is done before switching stream
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
stream = cp.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
# create sampler and check the size of external workspace
sampler, workspace_size = cusv.sampler_create(
handle, sv.data.ptr, data_type_sv, n_qubits, n_shots)
with stream:
# manage the workspace
if workspace_size > 0:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# sample preprocess
sampler_preprocess = wrap_with_nvtx(
cusv.sampler_preprocess, "sampler_preprocess")
args = (handle, sampler, workspace_ptr, workspace_size)
result1 = benchmark(
sampler_preprocess,
args,
n_warmup=n_warmup, n_repeat=n_repeat)
logger.debug(str(result1))
# sample bit strings
sampler_sample = wrap_with_nvtx(
cusv.sampler_sample, "sampler_sample")
args = (
handle, sampler, bit_strings.ctypes.data, bit_ordering, len(bit_ordering),
randnums.ctypes.data, n_shots,
cusv.SamplerOutput.RANDNUM_ORDER if output_order == "random" else cusv.SamplerOutput.ASCENDING_ORDER)
result2 = benchmark(
sampler_sample,
args,
n_warmup=n_warmup, n_repeat=n_repeat)
logger.debug(str(result2))
# clean up
cusv.sampler_destroy(sampler)
cusv.destroy(handle)
cpu_time = np.average(result1.cpu_times) + np.average(result2.cpu_times)
gpu_time = np.average(result1.gpu_times[0]) + np.average(result2.gpu_times[0])
return cpu_time, gpu_time
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/cusv_sampler.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import numpy as np
import cupy as cp
from cupyx.profiler import benchmark
from cuquantum import custatevec as cusv
from .._utils import (benchmark_with_prerun, check_targets_controls, dtype_to_cuda_type,
dtype_to_compute_type, L2flush, precision_str_to_dtype,
random_unitary, wrap_with_nvtx)
# set up a logger
logger_name = "cuquantum-benchmarks"
logger = logging.getLogger(logger_name)
def test_apply_matrix(
n_qubits, targets, controls, dtype_sv, dtype_mat, layout, adjoint,
n_warmup, n_repeat, location, *,
flush_l2=False, benchmark_data=None):
logger.debug(f"{n_qubits=}")
logger.debug(f"{targets=}")
logger.debug(f"{controls=}")
logger.debug(f"{dtype_sv=}")
logger.debug(f"{dtype_mat=}")
logger.debug(f"{layout=}")
logger.debug(f"{adjoint=}")
logger.debug(f"{location=}")
logger.debug(f"{n_warmup=}")
logger.debug(f"{n_repeat=}")
logger.debug(f"{flush_l2=}")
dtype_sv = precision_str_to_dtype(dtype_sv)
dtype_mat = precision_str_to_dtype(dtype_mat)
xp = cp if location == 'device' else np
layout = cusv.MatrixLayout.ROW if layout == "row" else cusv.MatrixLayout.COL
check_targets_controls(targets, controls, n_qubits)
size_sv = 2**n_qubits
n_targets = len(targets)
n_controls = len(controls)
# passing data ptr is slightly faster
targets_data = np.asarray(targets, dtype=np.int32)
targets = targets_data.ctypes.data
controls_data = np.asarray(controls, dtype=np.int32)
controls = controls_data.ctypes.data
# the statevector must reside on device
sv = cp.ones((size_sv,), dtype=dtype_sv)
sv /= np.sqrt(size_sv)
# assert cp.allclose(cp.sum(cp.abs(sv)**2), 1)
data_type_sv = dtype_to_cuda_type(dtype_sv)
# the gate matrix can live on either host (np) or device (cp)
matrix_dim = 2**n_targets
matrix = xp.asarray(random_unitary(matrix_dim), dtype=dtype_mat)
data_type_mat = dtype_to_cuda_type(dtype_mat)
if isinstance(matrix, cp.ndarray):
matrix_ptr = matrix.data.ptr
elif isinstance(matrix, np.ndarray):
matrix_ptr = matrix.ctypes.data
else:
raise ValueError
compute_type = dtype_to_compute_type(dtype_mat) # TODO: make this independent?
cp.cuda.Device().synchronize() # ensure data prep is done before switching stream
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
stream = cp.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
# get the workspace size
workspace_size = cusv.apply_matrix_get_workspace_size(
handle,
data_type_sv, n_qubits,
matrix_ptr, data_type_mat, layout, adjoint, n_targets, n_controls,
compute_type)
# apply gate
with stream:
# manage workspace
if workspace_size > 0:
workspace = cp.cuda.memory.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
args = (handle,
sv.data.ptr, data_type_sv, n_qubits,
matrix_ptr, data_type_mat, layout, adjoint,
targets, n_targets,
controls, 0, n_controls, # TODO: support control bit values
compute_type, workspace_ptr, workspace_size)
apply_matrix = wrap_with_nvtx(cusv.apply_matrix, "apply_matrix")
if flush_l2:
l2flusher = L2flush()
def f(*args, **kwargs):
l2flusher.flush() # clear L2 cache
result = benchmark_with_prerun(
apply_matrix,
args,
n_warmup=n_warmup, n_repeat=n_repeat,
pre_run=f)
else:
result = benchmark(
apply_matrix,
args,
n_warmup=n_warmup, n_repeat=n_repeat)
# destroy handle
cusv.destroy(handle)
logger.debug(str(result)) # this is nice-looking, if _PerfCaseResult.__repr__ is there
#logger.debug(f"(CPU times: {result.cpu_times}")
#logger.debug(f"(GPU times: {result.gpu_times[0]}")
cpu_time = np.average(result.cpu_times)
gpu_time = np.average(result.gpu_times[0])
mem_access = (2. ** (n_qubits - n_controls)) * 2. * np.dtype(dtype_sv).itemsize
logger.debug(f"effective bandwidth = {mem_access/gpu_time*1e-9} (GB/s)")
return cpu_time, gpu_time
| cuQuantum-main | benchmarks/cuquantum_benchmarks/benchmarks/apply_matrix.py |
#!python3
# Copyright (C) 2022, Frank Richter. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Split a monolithic sky_clusters.txt file into per-map sky cluster files.
import argparse
import os
arguments = argparse.ArgumentParser()
arguments.add_argument('gamedir', help='path to game directory')
arg_values = arguments.parse_args()
header = """# This file is part of the Q2RTX lighting system.
# For this map, it lists BSP clusters with skybox and lava polygons
# that have to be converted to analytic area lights.
# For more information, see comments in the `path_tracer.h` file
# in Q2RTX source code.
"""
gamedir = arg_values.gamedir
line_iter = iter(open(os.path.join(gamedir, "sky_clusters.txt"), "r"))
def next_line():
try:
line = next(line_iter)
line = line.rstrip()
comment_pos = line.find('#')
if comment_pos != -1:
line_no_comment = line[:comment_pos].rstrip()
else:
line_no_comment = line
return line, line_no_comment
except StopIteration:
return None, None # Indicate EOF
# Track current per-map sky file
current_sky_file = None
line, line_no_comment = next_line()
while line is not None:
if line_no_comment.isidentifier(): # recognize map names by first char being a letter
# We have a map file name, open sky file
map_name = line_no_comment
current_sky_file = open(os.path.join(gamedir, "maps", "sky", f"{map_name}.txt"), "w")
current_sky_file.write(header)
elif current_sky_file is not None:
# Write non-empty lines to sky file
if len(line) > 0:
print(line, file=current_sky_file)
line, line_no_comment = next_line()
# Close current per-map sky file
current_sky_file = None
| Q2RTX-master | scripts/split_sky_clusters.py |
#!python3
# Copyright (C) 2022, Frank Richter. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# This is a simple IES file parser that outputs a PNG file usable
# as a spotlight emission profile in Q2RTX.
import argparse
import math
import numpngw
import numpy
import scipy.interpolate
arguments = argparse.ArgumentParser()
arguments.add_argument('iesfile', help='input IES file name')
arguments.add_argument('outfile', help='output PNG file name')
arguments.add_argument('--width', '-W', type=int, default=256, help='output image width')
arg_values = arguments.parse_args()
class IesFile:
def __init__(self):
self.keyword_lines = None
self.horz_angles = None
self.vert_angles = None
self.light_values = None
def read(self, f):
# Helper method to get a number of floats from the file
current_values = [] # Hold values from the currently parsed line
def get_floats(num):
nonlocal current_values
result = []
n = 0
while n < num:
if len(current_values) == 0:
line = f.readline().strip()
if len(line) == 0: return
current_values = line.split()
result.append(float(current_values[0]))
current_values = current_values[1:]
n += 1
return result
header_line = f.readline().strip()
if not header_line in ["IESNA91", "IESNA:LM-63-1995", "IESNA:LM-63-2002"]:
raise Exception(f"Unexpected header line: {header_line}")
# Collect IES keywords for inclusion in .png, to allow identification of source data later
self.keyword_lines = []
TILT = None
while not TILT:
line = f.readline()
if len(line) == 0: break
line = line.strip()
if len(line) > 0:
self.keyword_lines.append(line)
if line.startswith("TILT="):
TILT = line[5:]
break
if TILT is None:
raise Exception("TILT= line missing!")
elif TILT != "NONE":
raise Exception("Unsupported TILT value: {TILT}")
lamps_line = f.readline().strip()
num_lamps, lumens_per_lamp, candela_mul, num_vert_angles, num_horz_angles, photometric_type, units_type, width, length, height = map(lambda s: float(s), lamps_line.split())
self.num_vert_angles = int(num_vert_angles)
self.num_horz_angles = int(num_horz_angles)
if num_lamps != 1:
raise Exception(f"Unsupported number of lamps: {num_lamps}")
if photometric_type != 1:
raise Exception(f"Unsupported photometric type: {photometric_type}")
# "ballast factor", "future use", "input watts": skip
f.readline()
self.vert_angles = get_floats(self.num_vert_angles)
self.horz_angles = get_floats(self.num_horz_angles)
self.light_values = []
for _ in range(0, self.num_horz_angles):
self.light_values.append(get_floats(self.num_vert_angles))
ies_file = IesFile()
ies_file.read(open(arg_values.iesfile, "r"))
if ies_file.num_horz_angles != 1:
raise Exception("Only 1 horizontal angle is currently supported")
# An alternative could be to just average over multiple horizontal angles,
# might work for lights which are almost, but not quite symmetric
max_angle = ies_file.vert_angles[-1]
if max_angle > 180:
raise Exception(f"Unexpected last vertical angle {max_angle}")
elif max_angle > 90:
print(f"Last vertical angle {max_angle} is > 90 deg, values beyond that will be ignored")
if ies_file.horz_angles[0] != 0:
raise Exception(f"Unexpected horizontal angle: {ies_file.horz_angles[0]}")
# Output resolution
res = arg_values.width
# Q2RTX looks up the emission factor by using the cosine of the angle as the coordinate,
# so interpolate for these from the per-angle light values
angle_function = scipy.interpolate.interp1d(ies_file.vert_angles, ies_file.light_values[0], kind='cubic')
output_values = numpy.zeros(res)
for x in range(0, res):
angle = (x / (res - 1)) * 90
if angle <= max_angle:
interp = angle_function(angle)
else:
interp = 0
output_values[x] = interp
# Write image: normalized light values, 16bpc gray scale
max_value = max(output_values)
values_ui16 = ((65535 / max_value) * output_values).astype(numpy.uint16)
values_ui16 = numpy.reshape(values_ui16, (1, -1))
numpngw.write_png(arg_values.outfile, values_ui16, text_list=[('ies_keywords', '\n'.join(ies_file.keyword_lines))])
| Q2RTX-master | scripts/ies_to_texture.py |
#!/usr/bin/python3
import re
import sys
pointers = [
'prethink', 'think', 'blocked', 'touch', 'use', 'pain', 'die',
'moveinfo_endfunc', 'monsterinfo_currentmove', 'monsterinfo_stand',
'monsterinfo_idle', 'monsterinfo_search', 'monsterinfo_walk',
'monsterinfo_run', 'monsterinfo_dodge', 'monsterinfo_attack',
'monsterinfo_melee', 'monsterinfo_sight', 'monsterinfo_checkattack'
]
if __name__ == "__main__":
if len(sys.argv) < 2:
print('Usage: genptr.py <file> [...]')
sys.exit(1)
exprs = '|'.join(p.replace('_', '\\.') for p in pointers if not p == 'moveinfo_endfunc')
regex = re.compile(r'->\s*(%s)\s*=\s*&?\s*(\w+)' % exprs, re.ASCII)
regex2 = re.compile(r'\b(?:Angle)?Move_Calc\s*\(.+,\s*(\w+)\s*\)', re.ASCII)
types = {}
for p in pointers:
types[p] = []
for a in sys.argv[1:]:
with open(a) as f:
for line in f:
if not line.lstrip().startswith('//'):
match = regex.search(line)
if match:
t = types[match[1].replace('.', '_')]
p = match[2]
if not p in t and p != 'NULL':
t.append(p)
continue
match = regex2.search(line)
if match:
t = types['moveinfo_endfunc']
p = match[1]
if not p in t:
t.append(p)
print('// generated by genptr.py, do not modify')
print('#include "g_ptrs.h"')
decls = []
for k, v in types.items():
for p in v:
if k == 'monsterinfo_currentmove':
decls.append(f'extern int {p};')
else:
decls.append(f'extern void {p}(void);')
for d in sorted(decls, key=str.lower):
print(d)
print('const save_ptr_t save_ptrs[] = {')
for k, v in types.items():
for p in sorted(v, key=str.lower):
amp = '&' if k == 'monsterinfo_currentmove' else ''
print('{ %s, %s%s },' % ('P_' + k, amp, p))
print('};')
print('const int num_save_ptrs = sizeof(save_ptrs) / sizeof(save_ptrs[0]);')
| Q2RTX-master | src/baseq2/genptr.py |
##
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
import os
from pyspark.worker import local_connect_and_auth, main as worker_main
def initialize_gpu_mem():
# CUDA device(s) info
cuda_devices_str = os.environ.get('CUDA_VISIBLE_DEVICES')
python_gpu_disabled = os.environ.get('RAPIDS_PYTHON_ENABLED', 'false').lower() == 'false'
if python_gpu_disabled or not cuda_devices_str:
# Skip gpu initialization due to no CUDA device or python on gpu is disabled.
# One case to come here is the test runs with cpu session in integration tests.
return
print("INFO: Process {} found CUDA visible device(s): {}".format(
os.getpid(), cuda_devices_str))
# Initialize RMM only when requiring to enable pooled or managed memory.
pool_enabled = os.environ.get('RAPIDS_POOLED_MEM_ENABLED', 'false').lower() == 'true'
uvm_enabled = os.environ.get('RAPIDS_UVM_ENABLED', 'false').lower() == 'true'
if pool_enabled:
from cudf import rmm
'''
RMM will be initialized with default configures (pool disabled) when importing cudf
as above. So overwrite the initialization when asking for pooled memory,
along with a pool size and max pool size.
Meanwhile, the above `import` precedes the `import` in UDF, make our initialization
not be overwritten again by the `import` in UDF, since Python will ignore duplicated
`import`.
'''
import sys
max_size = sys.maxint if sys.version_info.major == 2 else sys.maxsize
pool_size = int(os.environ.get('RAPIDS_POOLED_MEM_SIZE', 0))
pool_max_size = int(os.environ.get('RAPIDS_POOLED_MEM_MAX_SIZE', 0))
if 0 < pool_max_size < pool_size:
raise ValueError("Value of `RAPIDS_POOLED_MEM_MAX_SIZE` should not be less than "
"`RAPIDS_POOLED_MEM_SIZE`.")
if pool_max_size == 0:
pool_max_size = max_size
pool_max_size = pool_max_size >> 8 << 8
pool_size = pool_size >> 8 << 8
print("DEBUG: Pooled memory, pool size: {} MiB, max size: {} MiB".format(
pool_size / 1024.0 / 1024,
('unlimited' if pool_max_size == max_size else pool_max_size / 1024.0 / 1024)))
base_t = rmm.mr.ManagedMemoryResource if uvm_enabled else rmm.mr.CudaMemoryResource
rmm.mr.set_current_device_resource(rmm.mr.PoolMemoryResource(base_t(), pool_size, pool_max_size))
elif uvm_enabled:
from cudf import rmm
rmm.mr.set_current_device_resource(rmm.mr.ManagedMemoryResource())
else:
# Do nothing, whether to use RMM (default mode) or not depends on UDF definition.
pass
if __name__ == '__main__':
# GPU context setup
initialize_gpu_mem()
# Code below is all copied from Pyspark/worker.py
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, sock) = local_connect_and_auth(java_port, auth_secret)
# Use the `sock_file` as both input and output will cause EOFException in JVM side,
# So open a new file object on the same socket as output, similar behavior
# with that in `pyspark/daemon.py`.
buffer_size = int(os.environ.get("SPARK_BUFFER_SIZE", 65536))
outfile = os.fdopen(os.dup(sock.fileno()), "wb", buffer_size)
worker_main(sock_file, outfile)
| spark-rapids-branch-23.10 | python/rapids/worker.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.