python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
from . import bench
| cub-master | benchmarks/scripts/cub/__init__.py |
class Build:
def __init__(self, code, elapsed):
self.code = code
self.elapsed = elapsed
def __repr__(self):
return "Build(code = {}, elapsed = {:.4f}s)".format(self.code, self.elapsed)
| cub-master | benchmarks/scripts/cub/bench/build.py |
import os
import sys
import itertools
class Range:
def __init__(self, definition, label, low, high, step):
self.definition = definition
self.label = label
self.low = low
self.high = high
self.step = step
class RangePoint:
def __init__(self, definition, label, value):
self.definition = definition
self.label = label
self.value = value
class VariantPoint:
def __init__(self, range_points):
self.range_points = range_points
def label(self):
if self.is_base():
return 'base'
return '.'.join(["{}_{}".format(point.label, point.value) for point in self.range_points])
def is_base(self):
return len(self.range_points) == 0
def tuning(self):
if self.is_base():
return ""
tuning = "#pragma once\n\n"
for point in self.range_points:
tuning += "#define {} {}\n".format(point.definition, point.value)
return tuning
class BasePoint(VariantPoint):
def __init__(self):
VariantPoint.__init__(self, [])
def parse_ranges(columns):
ranges = []
for column in columns:
definition, label_range = column.split('|')
label, range = label_range.split('=')
start, end, step = [int(x) for x in range.split(':')]
ranges.append(Range(definition, label, start, end + 1, step))
return ranges
def parse_meta():
if not os.path.isfile("cub_bench_meta.csv"):
print("cub_bench_meta.csv not found", file=sys.stderr)
print("make sure to run the script from the CUB build directory",
file=sys.stderr)
benchmarks = {}
ctk_version = "0.0.0"
cub_revision = "0.0-0-0000"
with open("cub_bench_meta.csv", "r") as f:
lines = f.readlines()
for line in lines:
columns = line.split(',')
name = columns[0]
if name == "ctk_version":
ctk_version = columns[1].rstrip()
elif name == "cub_revision":
cub_revision = columns[1].rstrip()
else:
benchmarks[name] = parse_ranges(columns[1:])
return ctk_version, cub_revision, benchmarks
class Config:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.ctk, cls._instance.cub, cls._instance.benchmarks = parse_meta()
return cls._instance
def label_to_variant_point(self, algname, label):
if label == "base":
return BasePoint()
label_to_definition = {}
for param_space in self.benchmarks[algname]:
label_to_definition[param_space.label] = param_space.definition
points = []
for point in label.split('.'):
label, value = point.split('_')
points.append(RangePoint(label_to_definition[label], label, int(value)))
return VariantPoint(points)
def variant_space(self, algname):
variants = []
for param_space in self.benchmarks[algname]:
variants.append([])
for value in range(param_space.low, param_space.high, param_space.step):
variants[-1].append(RangePoint(param_space.definition, param_space.label, value))
return (VariantPoint(points) for points in itertools.product(*variants))
def variant_space_size(self, algname):
num_variants = 1
for param_space in self.benchmarks[algname]:
num_variants = num_variants * len(range(param_space.low, param_space.high, param_space.step))
return num_variants
| cub-master | benchmarks/scripts/cub/bench/config.py |
import os
import time
import signal
import subprocess
from .build import Build
from .config import Config
from .storage import Storage
from .logger import *
def create_builds_table(conn):
with conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS builds (
ctk TEXT NOT NULL,
cub TEXT NOT NULL,
bench TEXT NOT NULL,
code TEXT NOT NULL,
elapsed REAL
);
""")
class CMakeCache:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
create_builds_table(Storage().connection())
return cls._instance
def pull_build(self, bench):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
query = "SELECT code, elapsed FROM builds WHERE ctk = ? AND cub = ? AND bench = ?;"
result = conn.execute(query, (ctk, cub, bench.label())).fetchone()
if result:
code, elapsed = result
return Build(int(code), float(elapsed))
return result
def push_build(self, bench, build):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
conn.execute("INSERT INTO builds (ctk, cub, bench, code, elapsed) VALUES (?, ?, ?, ?, ?);",
(ctk, cub, bench.label(), build.code, build.elapsed))
class CMake:
def __init__(self):
pass
def do_build(self, bench, timeout):
logger = Logger()
try:
if not bench.is_base():
with open(bench.exe_name() + ".h", "w") as f:
f.writelines(bench.definitions())
cmd = ["cmake", "--build", ".", "--target", bench.exe_name()]
logger.info("starting build for {}: {}".format(bench.label(), " ".join(cmd)))
begin = time.time()
p = subprocess.Popen(cmd,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.wait(timeout=timeout)
elapsed = time.time() - begin
logger.info("finished build for {} ({}) in {}s".format(bench.label(), p.returncode, elapsed))
return Build(p.returncode, elapsed)
except subprocess.TimeoutExpired:
logger.info("build for {} reached timeout of {}s".format(bench.label(), timeout))
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
return Build(424242, float('inf'))
def build(self, bench):
logger = Logger()
timeout = None
cache = CMakeCache()
if bench.is_base():
# Only base build can be pulled from cache
build = cache.pull_build(bench)
if build:
logger.info("found cached base build for {}".format(bench.label()))
if bench.is_base():
if not os.path.exists("bin/{}".format(bench.exe_name())):
self.do_build(bench, None)
return build
else:
base_build = self.build(bench.get_base())
if base_build.code != 0:
raise Exception("Base build failed")
timeout = base_build.elapsed * 10
build = self.do_build(bench, timeout)
cache.push_build(bench, build)
return build
def clean():
cmd = ["cmake", "--build", ".", "--target", "clean"]
p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.wait()
if p.returncode != 0:
raise Exception("Unable to clean build directory")
| cub-master | benchmarks/scripts/cub/bench/cmake.py |
from .config import *
from .storage import *
from .bench import Bench
from .cmake import CMake
from .score import *
from .search import *
| cub-master | benchmarks/scripts/cub/bench/__init__.py |
import logging
class Logger:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('cub_bench_meta.log')
file_handler.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
cls._instance.logger = logger
return cls._instance
def info(self, message):
self.logger.info(message)
| cub-master | benchmarks/scripts/cub/bench/logger.py |
import os
import json
import time
import fpzip
import signal
import itertools
import subprocess
import numpy as np
from .cmake import CMake
from .config import *
from .storage import Storage
from .score import *
from .logger import *
class JsonCache:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.bench_cache = {}
cls._instance.device_cache = {}
return cls._instance
def get_bench(self, algname):
if algname not in self.bench_cache:
result = subprocess.check_output(
[os.path.join('.', 'bin', algname + '.base'), "--jsonlist-benches"])
self.bench_cache[algname] = json.loads(result)
return self.bench_cache[algname]
def get_device(self, algname):
if algname not in self.device_cache:
result = subprocess.check_output(
[os.path.join('.', 'bin', algname + '.base'), "--jsonlist-devices"])
devices = json.loads(result)["devices"]
if len(devices) != 1:
raise Exception(
"NVBench doesn't work well with multiple GPUs, use `CUDA_VISIBLE_DEVICES`")
self.device_cache[algname] = devices[0]
return self.device_cache[algname]
def json_benches(algname):
return JsonCache().get_bench(algname)
def create_benches_tables(conn, bench_axes):
for algorithm_name in bench_axes:
axes = bench_axes[algorithm_name]
columns = ", ".join(["\"{}\" TEXT".format(name) for name in axes])
if axes:
columns = ", " + columns
with conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS "{0}" (
ctk TEXT NOT NULL,
cub TEXT NOT NULL,
gpu TEXT NOT NULL,
variant TEXT NOT NULL,
elapsed REAL,
center REAL,
samples BLOB
{1}
);
""".format(algorithm_name, columns))
def read_json(filename):
with open(filename, "r") as f:
file_root = json.load(f)
return file_root
def extract_filename(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "filename", summary_data))
assert (value_data["type"] == "string")
return value_data["value"]
def extract_size(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "size", summary_data))
assert (value_data["type"] == "int64")
return int(value_data["value"])
def parse_samples_meta(state):
summaries = state["summaries"]
if not summaries:
return None, None
summary = next(filter(lambda s: s["tag"] == "nv/json/bin:nv/cold/sample_times",
summaries),
None)
if not summary:
return None, None
sample_filename = extract_filename(summary)
sample_count = extract_size(summary)
return sample_count, sample_filename
def parse_samples(state):
sample_count, samples_filename = parse_samples_meta(state)
if not sample_count or not samples_filename:
return np.array([], dtype=np.float32)
with open(samples_filename, "rb") as f:
samples = np.fromfile(f, "<f4")
samples.sort()
assert (sample_count == len(samples))
return samples
def read_samples(json_path):
result = {}
try:
meta = read_json(json_path)
benches = meta["benchmarks"]
if len(benches) != 1:
raise Exception("Executable should contain exactly one benchmark")
for bench in benches:
bench_name = bench["name"]
result[bench_name] = {}
states = bench["states"]
if len(states) != 1:
return np.array([], dtype=np.float32)
for state in states:
return parse_samples(state)
except Exception:
print("??")
pass
return np.array([], dtype=np.float32)
def device_json(algname):
return JsonCache().get_device(algname)
def get_device_name(device):
gpu_name = device["name"]
bw = device["global_memory_bus_width"]
sms = device["number_of_sms"]
ecc = "eccon" if device["ecc_state"] else "eccoff"
name = "{} ({}, {}, {})".format(gpu_name, bw, sms, ecc)
return name.replace('NVIDIA ', '')
class BenchCache:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.existing_tables = set()
return cls._instance
def create_table_if_not_exists(self, conn, bench):
bench_base = bench.get_base()
if bench_base.algorithm_name() not in self.existing_tables:
create_benches_tables(conn, {bench_base.algorithm_name(): bench_base.axes_names()})
self.existing_tables.add(bench_base.algorithm_name())
def push_bench(self, bench, workload_point, elapsed, distribution_samples, distribution_center):
config = Config()
ctk = config.ctk
cub = config.cub
gpu = get_device_name(device_json(bench.algname))
conn = Storage().connection()
self.create_table_if_not_exists(conn, bench)
columns = ""
placeholders = ""
values = []
if workload_point:
for axis in workload_point:
name, value = axis.split('=')
columns = columns + ", \"{}\"".format(name)
placeholders = placeholders + ", ?"
values.append(value)
values = tuple(values)
samples = fpzip.compress(distribution_samples)
to_insert = (ctk, cub, gpu, bench.variant_name(),
elapsed, distribution_center, samples) + values
with conn:
query = """
INSERT INTO "{0}" (ctk, cub, gpu, variant, elapsed, center, samples {1})
VALUES (?, ?, ?, ?, ?, ?, ? {2})
""".format(bench.algorithm_name(), columns, placeholders)
conn.execute(query, to_insert)
def pull_column(self, column, bench, workload_point):
config = Config()
ctk = config.ctk
cub = config.cub
gpu = get_device_name(device_json(bench.algname))
conn = Storage().connection()
self.create_table_if_not_exists(conn, bench)
with conn:
point_checks = ""
if workload_point:
for axis in workload_point:
name, value = axis.split('=')
point_checks = point_checks + \
" AND \"{}\" = \"{}\"".format(name, value)
query = """
SELECT {0} FROM "{1}" WHERE ctk = ? AND cub = ? AND gpu = ? AND variant = ?{2};
""".format(column, bench.algorithm_name(), point_checks)
return conn.execute(query, (ctk, cub, gpu, bench.variant_name())).fetchone()
def pull_center(self, bench, workload_point):
return self.pull_column('center', bench, workload_point)
def pull_elapsed(self, bench, workload_point):
return self.pull_column('elapsed', bench, workload_point)
class Bench:
def __init__(self, algorithm_name, variant, ct_workload):
self.algname = algorithm_name
self.variant = variant
self.ct_workload = ct_workload
def label(self):
return self.algname + '.' + self.variant.label()
def variant_name(self):
return self.variant.label()
def algorithm_name(self):
return self.algname
def is_base(self):
return self.variant.is_base()
def get_base(self):
return BaseBench(self.algorithm_name())
def exe_name(self):
if self.is_base():
return self.algorithm_name() + '.base'
return self.algorithm_name() + '.variant'
def axes_names(self):
result = json_benches(self.algname)
if len(result["benchmarks"]) != 1:
raise Exception("Executable should contain exactly one benchmark")
names = []
for axis in result["benchmarks"][0]["axes"]:
name = axis["name"]
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
names.append(name)
return names
def axes_values(self, sub_space, ct):
result = json_benches(self.algname)
if len(result["benchmarks"]) != 1:
raise Exception("Executable should contain exactly one benchmark")
space = []
for axis in result["benchmarks"][0]["axes"]:
name = axis["name"]
if ct:
if not '{ct}' in name:
continue
else:
if '{ct}' in name:
continue
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
axis_space = []
if name in sub_space:
for value in sub_space[name]:
axis_space.append("{}={}".format(name, value))
else:
for value in axis["values"]:
axis_space.append("{}={}".format(name, value["input_string"]))
space.append(axis_space)
return space
def axes_value_descriptions(self):
result = json_benches(self.algname)
if len(result["benchmarks"]) != 1:
raise Exception("Executable should contain exactly one benchmark")
descriptions = {}
for axis in result["benchmarks"][0]["axes"]:
name = axis["name"]
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
descriptions[name] = {}
for value in axis["values"]:
descriptions[name][value["input_string"]] = value["description"]
return descriptions
def axis_values(self, axis_name):
result = json_benches(self.algname)
if len(result["benchmarks"]) != 1:
raise Exception("Executable should contain exactly one benchmark")
for axis in result["benchmarks"][0]["axes"]:
name = axis["name"]
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
if name != axis_name:
continue
values = []
for value in axis["values"]:
values.append(value["input_string"])
return values
return []
def build(self):
if not self.is_base():
self.get_base().build()
build = CMake().build(self)
return build.code == 0
def definitions(self):
definitions = self.variant.tuning()
definitions = definitions + "\n"
descriptions = self.axes_value_descriptions()
for ct_component in self.ct_workload:
ct_axis_name, ct_value = ct_component.split('=')
description = descriptions[ct_axis_name][ct_value]
ct_axis_name = ct_axis_name.replace('{ct}', '')
definitions = definitions + "#define TUNE_{} {}\n".format(ct_axis_name, description)
return definitions
def do_run(self, point, timeout):
logger = Logger()
try:
result_path = 'result.json'
if os.path.exists(result_path):
os.remove(result_path)
bench_path = os.path.join('.', 'bin', self.exe_name())
cmd = [bench_path]
for value in point:
cmd.append('-a')
cmd.append(value)
cmd.append('--jsonbin')
cmd.append(result_path)
# Allow noise because we rely on min samples
cmd.append("--max-noise")
cmd.append("100")
# Need at least 70 samples
cmd.append("--min-samples")
cmd.append("70")
# NVBench is currently broken for multiple GPUs, use `CUDA_VISIBLE_DEVICES`
cmd.append("-d")
cmd.append("0")
logger.info("starting benchmark {} with {}: {}".format(self.label(), point, " ".join(cmd)))
begin = time.time()
p = subprocess.Popen(cmd,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.wait(timeout=timeout)
elapsed = time.time() - begin
logger.info("finished benchmark {} with {} ({}) in {}s".format(self.label(), point, p.returncode, elapsed))
return read_samples(result_path), elapsed
except subprocess.TimeoutExpired:
logger.info("benchmark {} with {} reached timeout of {}s".format(self.label(), point, timeout))
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
return np.array([], dtype=np.float32), float('inf')
def ct_workload_space(self, sub_space):
if not self.build():
raise Exception("Unable to build benchmark: " + self.label())
return list(itertools.product(*self.axes_values(sub_space, True)))
def rt_workload_space(self, sub_space):
if not self.build():
raise Exception("Unable to build benchmark: " + self)
return list(itertools.product(*self.axes_values(sub_space, False)))
def elapsed(self, workload_point, estimator):
self.run(workload_point, estimator)
cache = BenchCache()
cached_elapsed = cache.pull_elapsed(self, workload_point)
if cached_elapsed:
return float(cached_elapsed[0])
return float('inf')
def run(self, workload_point, estimator):
logger = Logger()
cache = BenchCache()
cached_center = cache.pull_center(self, workload_point)
if cached_center:
logger.info("found benchmark {} ({}) in cache".format(self.label(), workload_point))
return float(cached_center[0])
timeout = None
if not self.is_base():
base = self.get_base()
base_elapsed = base.elapsed(workload_point, estimator)
timeout = base_elapsed * 50
distribution_samples, elapsed = self.do_run(workload_point, timeout)
distribution_center = estimator(distribution_samples)
cache.push_bench(self, workload_point, elapsed,
distribution_samples, distribution_center)
return distribution_center
def speedup(self, workload_point, base_estimator, variant_estimator):
if self.is_base():
return 1.0
base = self.get_base()
base_center = base.run(workload_point, base_estimator)
self_center = self.run(workload_point, variant_estimator)
return base_center / self_center
def score(self, ct_workload, rt_workload_space, base_estimator, variant_estimator):
if self.is_base():
return 1.0
# Score should not be cached in the database because the number of values
# on a given axis can change between runs, which would affect the score.
rt_axes_values = {}
for rt_workload in rt_workload_space:
for pair in rt_workload:
name, value = pair.split('=')
if not name in rt_axes_values:
rt_axes_values[name] = set()
rt_axes_values[name].add(value)
for rt_axis in rt_axes_values:
filtered_values = rt_axes_values[rt_axis]
rt_axes_values[rt_axis] = []
for value in self.axis_values(rt_axis):
if value in filtered_values:
rt_axes_values[rt_axis].append(value)
rt_axes_ids = compute_axes_ids(rt_axes_values)
weight_matrix = compute_weight_matrix(rt_axes_values, rt_axes_ids)
score = 0
for rt_workload in rt_workload_space:
weight = get_workload_weight(rt_workload, rt_axes_values, rt_axes_ids, weight_matrix)
speedup = self.speedup(ct_workload + rt_workload, base_estimator, variant_estimator)
score = score + weight * speedup
return score
class BaseBench(Bench):
def __init__(self, algname):
super().__init__(algname, BasePoint(), [])
| cub-master | benchmarks/scripts/cub/bench/bench.py |
import os
import fpzip
import sqlite3
import numpy as np
import pandas as pd
db_name = "cub_bench_meta.db"
def blob_to_samples(blob):
return np.squeeze(fpzip.decompress(blob))
class StorageBase:
def __init__(self, db_path):
self.conn = sqlite3.connect(db_path)
def connection(self):
return self.conn
def exists(self):
return os.path.exists(db_name)
def algnames(self):
with self.conn:
result = self.conn.execute("""
SELECT name FROM sqlite_master WHERE type='table' AND name LIKE 'cub.bench.%';
""").fetchall()
algnames = [algname[0] for algname in result]
return algnames
def alg_to_df(self, algname):
with self.conn:
df = pd.read_sql_query("SELECT * FROM \"{}\"".format(algname), self.conn)
df['samples'] = df['samples'].apply(blob_to_samples)
return df
def store_df(self, algname, df):
df['samples'] = df['samples'].apply(fpzip.compress)
df.to_sql(algname, self.conn, if_exists='replace', index=False)
class Storage:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.base = StorageBase(db_name)
return cls._instance
def connection(self):
return self.base.connection()
def exists(self):
return self.base.exists()
def algnames(self):
return self.base.algnames()
def alg_to_df(self, algname):
return self.base.alg_to_df(algname)
| cub-master | benchmarks/scripts/cub/bench/storage.py |
import re
import argparse
import numpy as np
from .bench import Bench, BaseBench
from .config import Config
from .storage import Storage
from .cmake import CMake
def list_axes(benchmarks, sub_space):
print("### Axes")
axes = {}
for algname in benchmarks:
bench = BaseBench(algname)
for axis in bench.axes_values(sub_space, True) + bench.axes_values(sub_space, False):
for point in axis:
axis, value = point.split('=')
if axis in axes:
axes[axis].add(value)
else:
axes[axis] = {value}
for axis in axes:
print(" * `{}`".format(axis))
for value in axes[axis]:
print(" * `{}`".format(value))
def list_benches():
print("### Benchmarks")
config = Config()
for algname in config.benchmarks:
space_size = config.variant_space_size(algname)
print(" * `{}`: {} variants: ".format(algname, space_size))
for param_space in config.benchmarks[algname]:
param_name = param_space.label
param_rng = (param_space.low, param_space.high, param_space.step)
print(" * `{}`: {}".format(param_name, param_rng))
def parse_sub_space(args):
sub_space = {}
for axis in args:
name, value = axis.split('=')
if '[' in value:
value = value.replace('[', '').replace(']', '')
values = value.split(',')
else:
values = [value]
sub_space[name] = values
return sub_space
def parse_arguments():
parser = argparse.ArgumentParser(
description="Runs benchmarks and stores results in a database.")
parser.add_argument('-R', type=str, default='.*',
help="Regex for benchmarks selection.")
parser.add_argument('-a', '--args', action='append',
type=str, help="Parameter in the format `Param=Value`.")
parser.add_argument(
'--list-axes', action=argparse.BooleanOptionalAction, help="Show available parameters.")
parser.add_argument(
'--list-benches', action=argparse.BooleanOptionalAction, help="Show available benchmarks.")
return parser.parse_args()
def run_benches(benchmarks, workload_sub_space, regex, seeker):
pattern = re.compile(regex)
for algname in benchmarks:
if pattern.match(algname):
bench = BaseBench(algname)
ct_workload_space = bench.ct_workload_space(workload_sub_space)
rt_workload_space = bench.rt_workload_space(workload_sub_space)
seeker(algname, ct_workload_space, rt_workload_space)
def search(seeker):
args = parse_arguments()
if not Storage().exists():
CMake().clean()
config = Config()
print("ctk: ", config.ctk)
print("cub: ", config.cub)
workload_sub_space = {}
if args.args:
workload_sub_space = parse_sub_space(args.args)
if args.list_axes:
list_axes(config.benchmarks, workload_sub_space)
return
if args.list_benches:
list_benches()
return
run_benches(config.benchmarks, workload_sub_space, args.R, seeker)
class MedianCenterEstimator:
def __init__(self):
pass
def __call__(self, samples):
if len(samples) == 0:
return float("inf")
return float(np.median(samples))
class BruteForceSeeker:
def __init__(self, base_center_estimator, variant_center_estimator):
self.base_center_estimator = base_center_estimator
self.variant_center_estimator = variant_center_estimator
def __call__(self, algname, ct_workload_space, rt_workload_space):
variants = Config().variant_space(algname)
for ct_workload in ct_workload_space:
for variant in variants:
bench = Bench(algname, variant, list(ct_workload))
if bench.build():
score = bench.score(ct_workload,
rt_workload_space,
self.base_center_estimator,
self.variant_center_estimator)
print(bench.label(), score)
| cub-master | benchmarks/scripts/cub/bench/search.py |
import math
import numpy as np
def importance_function(x):
return 1 - math.exp(-x)
def x_by_importance(y):
return -math.log(1 - y)
def compute_weights(num_values):
least_importance = 0.6
most_importance = 0.999
assert(least_importance < most_importance)
assert(least_importance >= 0 and least_importance < 1)
assert(most_importance > 0 and most_importance < 1)
begin = x_by_importance(least_importance)
end = x_by_importance(most_importance)
rng = end - begin
step = rng / num_values
weights = np.array([begin + x * step for x in range(num_values)])
weights = weights / sum(weights)
return weights
def io_weights(values):
return compute_weights(len(values))
def ei_weights(values):
return np.ones(len(values))
def compute_axes_ids(rt_axes_values):
rt_axes_ids = {}
axis_id = 0
for rt_axis in rt_axes_values:
rt_axes_ids[rt_axis] = axis_id
axis_id = axis_id + 1
return rt_axes_ids
def compute_weight_matrix(rt_axes_values, rt_axes_ids):
rt_axes_weights = {}
first_rt_axis = True
first_rt_axis_name = None
for rt_axis in rt_axes_values:
if first_rt_axis:
first_rt_axis_name = rt_axis
first_rt_axis = False
values = rt_axes_values[rt_axis]
rt_axes_values[rt_axis] = values
if '{io}' in rt_axis:
rt_axes_weights[rt_axis] = io_weights(values)
else:
rt_axes_weights[rt_axis] = ei_weights(values)
num_rt_axes = len(rt_axes_ids)
for rt_axis in rt_axes_weights:
shape = [1] * num_rt_axes
shape[rt_axes_ids[rt_axis]] = -1
rt_axes_weights[rt_axis] = rt_axes_weights[rt_axis].reshape(*shape)
weights_matrix = rt_axes_weights[first_rt_axis_name]
for rt_axis in rt_axes_weights:
if rt_axis == first_rt_axis_name:
continue
weights_matrix = weights_matrix * rt_axes_weights[rt_axis]
return weights_matrix / np.sum(weights_matrix)
def get_workload_coordinates(rt_workload, rt_axes_values, rt_axes_ids):
coordinates = [0] * len(rt_axes_ids)
for point in rt_workload:
rt_axis, rt_value = point.split('=')
coordinates[rt_axes_ids[rt_axis]] = rt_axes_values[rt_axis].index(rt_value)
return coordinates
def get_workload_weight(rt_workload, rt_axes_values, rt_axes_ids, weights_matrix):
coordinates = get_workload_coordinates(rt_workload, rt_axes_values, rt_axes_ids)
return weights_matrix[tuple(coordinates)]
| cub-master | benchmarks/scripts/cub/bench/score.py |
import pyniNVCategory
import nvstrings as nvs
def to_device(strs):
"""Create a nvcategory object from a list of strings."""
cptr = pyniNVCategory.n_createCategoryFromHostStrings(strs)
return nvcategory(cptr)
def from_strings(*args):
"""Create a nvcategory object from a nvstrings object."""
strs = []
for arg in args:
strs.append(arg)
cptr = pyniNVCategory.n_createCategoryFromNVStrings(strs)
return nvcategory(cptr)
def from_strings_list(list):
"""Create a nvcategory object from a list of nvstrings."""
cptr = pyniNVCategory.n_createCategoryFromNVStrings(list)
return nvcategory(cptr)
class nvcategory:
"""
Instance manages a dictionary of strings (keys) in device memory
and a mapping of indexes (values).
"""
#
m_cptr = 0
def __init__(self, cptr):
"""For internal use only."""
self.m_cptr = cptr
def __del__(self):
pyniNVCategory.n_destroyCategory(self.m_cptr)
def __str__(self):
return str(self.keys())
def __repr__(self):
return "<nvcategory keys={},values={}>".format(
self.keys_size(), self.size())
def size(self):
"""
The number of values.
Returns
-------
int: number of values
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.values())
print(c.size())
Output:
.. code-block:: python
[2, 0, 2, 1]
4
"""
return pyniNVCategory.n_size(self.m_cptr)
def keys_size(self):
"""
The number of keys.
Returns
-------
int: number of keys
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.keys())
print(c.keys_size())
Output:
.. code-block:: python
['aaa','dddd','eee']
3
"""
return pyniNVCategory.n_keys_size(self.m_cptr)
def keys(self):
"""
Return the unique strings for this category as nvstrings instance.
Returns
-------
nvstrings: keys
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.keys())
Output:
.. code-block:: python
['aaa','dddd','eee']
"""
rtn = pyniNVCategory.n_get_keys(self.m_cptr)
if rtn is not None:
rtn = nvs.nvstrings(rtn)
return rtn
def indexes_for_key(self, key, devptr=0):
"""
Return all index values for given key.
Parameters
----------
key : str
key whose values should be returned
devptr : GPU memory pointer
Where index values will be written.
Must be able to hold int32 values for this key.
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.indexes_for_key('aaa'))
print(c.indexes_for_key('eee'))
Output:
.. code-block:: python
[1]
[0, 2]
"""
return pyniNVCategory.n_get_indexes_for_key(self.m_cptr, key, devptr)
def value_for_index(self, idx):
"""
Return the category value for the given index.
Parameters
----------
idx : int
index value to retrieve
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.value_for_index(3))
Output:
.. code-block:: python
1
"""
return pyniNVCategory.n_get_value_for_index(self.m_cptr, idx)
def value(self, str):
"""
Return the category value for the given string.
Parameters
----------
str : str
key to retrieve
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.value('aaa'))
print(c.value('eee'))
Output:
.. code-block:: python
0
2
"""
return pyniNVCategory.n_get_value_for_string(self.m_cptr, str)
def values(self, devptr=0):
"""
Return all values for this instance.
Parameters
----------
devptr : GPU memory pointer
Where index values will be written.
Must be able to hold size() of int32 values.
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.values())
Output:
.. code-block:: python
[2, 0, 2, 1]
"""
return pyniNVCategory.n_get_values(self.m_cptr, devptr)
def add_strings(self, nvs):
"""
Create new category incorporating specified strings.
This will return a new nvcategory with new key values.
The index values will appear as if appended.
Parameters
----------
nvs : nvstrings
New strings to be added.
Examples
--------
.. code-block:: python
import nvcategory, nvstrings
s1 = nvstrings.to_device(["eee","aaa","eee","dddd"])
s2 = nvstrings.to_device(["ggg","eee","aaa"])
c1 = nvcategory.from_strings(s1)
c2 = c1.add_strings(s2)
print(c1.keys())
print(c1.values())
print(c2.keys())
print(c2.values())
Output:
.. code-block:: python
['aaa','dddd','eee']
[2, 0, 2, 1]
['aaa','dddd','eee','ggg']
[2, 0, 2, 1, 3, 2, 0]
"""
rtn = pyniNVCategory.n_add_strings(self.m_cptr, nvs)
if rtn is not None:
rtn = nvcategory(rtn)
return rtn
def remove_strings(self, nvs):
"""
Create new category without the specified strings.
The returned category will have new set of key values and indexes.
Parameters
----------
nvs : nvstrings
strings to be removed.
Examples
--------
.. code-block:: python
import nvcategory, nvstrings
s1 = nvstrings.to_device(["eee","aaa","eee","dddd"])
s2 = nvstrings.to_device(["aaa"])
c1 = nvcategory.from_strings(s1)
c2 = c1.remove_strings(s2)
print(c1.keys())
print(c1.values())
print(c2.keys())
print(c2.values())
Output:
.. code-block:: python
['aaa','dddd','eee']
[2, 0, 2, 1]
['dddd', 'eee']
[1, 1, 0]
"""
rtn = pyniNVCategory.n_remove_strings(self.m_cptr, nvs)
if rtn is not None:
rtn = nvcategory(rtn)
return rtn
def to_strings(self):
"""
Return nvstrings instance represented by the values in this instance.
Returns
-------
nvstrings: full strings list based on values indexes
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.keys())
print(c.values())
print(c.to_strings())
Output:
.. code-block:: python
['aaa','dddd','eee']
[2, 0, 2, 1]
['eee','aaa','eee','dddd']
"""
rtn = pyniNVCategory.n_to_strings(self.m_cptr)
if rtn is not None:
rtn = nvs.nvstrings(rtn)
return rtn
def gather_strings(self, indexes, count=0):
"""
Return nvstrings instance represented using the specified indexes.
Parameters
----------
indexes : List of ints or GPU memory pointer
0-based indexes of keys to return as an nvstrings object
count : int
Number of ints if indexes parm is a device pointer.
Otherwise it is ignored.
Returns
-------
nvstrings: strings list based on indexes
Examples
--------
.. code-block:: python
import nvcategory
c = nvcategory.to_device(["eee","aaa","eee","dddd"])
print(c.keys())
print(c.values())
print(c.gather_strings([0,2,0]))
Output:
.. code-block:: python
['aaa','dddd','eee']
[2, 0, 2, 1]
['aaa','eee','aaa']
"""
rtn = pyniNVCategory.n_gather_strings(self.m_cptr, indexes, count)
if rtn is not None:
rtn = nvs.nvstrings(rtn)
return rtn
| nvstrings-master | nvcategory.py |
import pyniNVStrings
def to_device(strs):
"""Create nvstrings instance from list of Python strings."""
cptr = pyniNVStrings.n_createFromHostStrings(strs)
return nvstrings(cptr)
def from_csv(csv, column, lines=0, flags=0):
"""
Reads a column of values from a CSV file into a new nvstrings instance.
The CSV file must be formatted as UTF-8.
Parameters
----------
csv : str
Path to the csv file from which to load data
column : int
0-based index of the column to read into an nvstrings object
lines : int
maximum number of lines to read from the file
flags : int
values may be combined
1 - sort by length
2 - sort by name
8 - nulls are empty strings
Returns
-------
A new nvstrings instance pointing to strings loaded onto the GPU
Examples
--------
For CSV file (file.csv) containing 2 rows and 3 columns:
header1,header2,header3
r1c1,r1c2,r1c3
r2c1,r2c2,r2c3
.. code-block:: python
import nvstrings
s = nvstrings.from_csv("file.csv",2)
print(s)
Output:
.. code-block:: python
['r1c3','r2c3']
"""
rtn = pyniNVStrings.n_createFromCSV(csv, column, lines, flags)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def free(dstrs):
"""Force free resources for the specified instance."""
pyniNVStrings.n_destroyStrings(dstrs.m_cptr)
dstrs.m_cptr = 0
def bind_cpointer(cptr):
"""Bind an NVStrings C-pointer to a new instance."""
rtn = None
if cptr != 0:
rtn = nvstrings(cptr)
return rtn
# this will be documented with all the public methods
class nvstrings:
"""
Instance manages a list of strings in device memory.
Operations are across all of the strings and their results reside in
device memory. Strings in the list are immutable.
Methods that modify any string will create a new nvstrings instance.
"""
#
m_cptr = 0
def __init__(self, cptr):
"""
Use to_device() to create new instance from Python array of strings.
"""
self.m_cptr = cptr
def __del__(self):
pyniNVStrings.n_destroyStrings(self.m_cptr)
self.m_cptr = 0
def __str__(self):
return str(pyniNVStrings.n_createHostStrings(self.m_cptr))
def __repr__(self):
return "<nvstrings count={}>".format(self.size())
def to_host(self):
"""
Copies strings back to CPU memory into a Python array.
Returns
-------
A list of strings
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
h = s.upper().to_host()
print(h)
Output:
.. code-block:: python
["HELLO","WORLD"]
"""
return pyniNVStrings.n_createHostStrings(self.m_cptr)
def size(self):
"""
The number of strings managed by this instance.
Returns
-------
int: number of strings
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
print(s.size())
Output:
.. code-block:: python
2
"""
return pyniNVStrings.n_size(self.m_cptr)
def len(self, devptr=0):
"""
Returns the number of characters of each string.
Parameters
----------
devptr : GPU memory pointer
Where string length values will be written.
Must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
import numpy as np
from librmm_cffi import librmm
# example passing device memory pointer
s = nvstrings.to_device(["abc","d","ef"])
arr = np.arange(s.size(),dtype=np.int32)
d_arr = librmm.to_device(arr)
s.len(d_arr.device_ctypes_pointer.value)
print(d_arr.copy_to_host())
Output:
.. code-block:: python
[3,1,2]
"""
rtn = pyniNVStrings.n_len(self.m_cptr, devptr)
return rtn
def compare(self, str, devptr=0):
"""
Compare each string to the supplied string.
Returns value of 0 for strings that match str.
Returns < 0 when first different character is lower
than argument string or argument string is shorter.
Returns > 0 when first different character is greater
than the argument string or the argument string is longer.
Parameters
----------
str : str
String to compare all strings in this instance.
devptr : GPU memory pointer
Where string result values will be written.
Must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
print(s.compare('hello'))
Output:
.. code-block:: python
[0,15]
"""
rtn = pyniNVStrings.n_compare(self.m_cptr, str, devptr)
return rtn
def hash(self, devptr=0):
"""
Returns hash values represented by each string.
Parameters
----------
devptr : GPU memory pointer
Where string hash values will be written.
Must be able to hold at least size() of uint32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
s.hash()
Output:
.. code-block:: python
[99162322, 113318802]
"""
rtn = pyniNVStrings.n_hash(self.m_cptr, devptr)
return rtn
def stoi(self, devptr=0):
"""
Returns integer value represented by each string.
Parameters
----------
devptr : GPU memory pointer
Where resulting integer values will be written.
Memory must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
import numpy as np
s = nvstrings.to_device(["1234","-876","543.2","-0.12",".55""])
print(s.stoi())
Output:
.. code-block:: python
[1234, -876, 543, 0, 0]
"""
rtn = pyniNVStrings.n_stoi(self.m_cptr, devptr)
return rtn
def stof(self, devptr=0):
"""
Returns float values represented by each string.
Parameters
----------
devptr : GPU memory pointer
Where resulting float values will be written.
Memory must be able to hold at least size() of float32 values
Examples
--------
.. code-block:: python
import nvstrings
import numpy as np
from librmm_cffi import librmm
s = nvstrings.to_device(["1234","-876","543.2","-0.12",".55"])
print(s.stof())
Output:
.. code-block:: python
[1234.0, -876.0, 543.2000122070312,
-0.11999999731779099, 0.550000011920929]
"""
rtn = pyniNVStrings.n_stof(self.m_cptr, devptr)
return rtn
def cat(self, others=None, sep=None, na_rep=None):
"""
Appends the given strings to this list of strings and
returns as new nvstrings.
Parameters
----------
others : List of str
Strings to be appended.
The number of strings must match size() of this instance.
This must be either a Python array of strings or another
nvstrings instance.
sep : str
If specified, this separator will be appended to each string
before appending the others.
na_rep : char
This character will take the place of any null strings
(not empty strings) in either list.
Examples
--------
.. code-block:: python
import nvstrings
s1 = nvstrings.to_device(['hello', None,'goodbye'])
s2 = nvstrings.to_device(['world','globe', None])
print(s1.cat(s2,sep=':', na_rep='_'))
Output:
.. code-block:: python
["hello:world","_:globe","goodbye:_"]
"""
rtn = pyniNVStrings.n_cat(self.m_cptr, others, sep, na_rep)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def join(self, sep=''):
"""
Concatentate this list of strings into a single string.
Parameters
----------
sep : str
This separator will be appended to each string before
appending the next.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye"])
s.join(sep=':')
Output:
.. code-block:: python
['hello:goodbye']
"""
rtn = pyniNVStrings.n_join(self.m_cptr, sep)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def split(self, delimiter=None, n=-1):
"""
Returns an array of nvstrings each representing the split
of each individual string.
Parameters
----------
delimiter : str
The character used to locate the split points of
each string. Default is space.
n : int
Maximum number of strings to return for each split.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello world","goodbye","well said"])
for result in s.split(' '):
print(result)
Output:
.. code-block:: python
["hello","world"]
["goodbye"]
["well","said"]
"""
strs = pyniNVStrings.n_split(self.m_cptr, delimiter, n)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def rsplit(self, delimiter=None, n=-1):
"""
Returns an array of nvstrings each representing the split of each
individual string. The delimiter is searched for from the end of
each string.
Parameters
----------
delimiter : str
The character used to locate the split points of each
string. Default is space.
n : int
Maximum number of strings to return for each split.
Examples
--------
.. code-block:: python
import nvstrings
strs = nvstrings.to_device(["hello world","goodbye","up in arms"])
for s in strs.rsplit(' ',2):
print(s)
Output:
.. code-block:: python
['hello', 'world']
['goodbye']
['up in', 'arms']
"""
strs = pyniNVStrings.n_rsplit(self.m_cptr, delimiter, n)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def partition(self, delimiter=' '):
"""
Each string is split into two strings on the first delimiter found.
Three strings are returned for each string:
beginning, delimiter, end.
Parameters
----------
delimiter : str
The character used to locate the split points of each
string. Default is space.
Examples
--------
.. code-block:: python
import nvstrings
strs = nvstrings.to_device(["hello world","goodbye","up in arms"])
for s in strs.partition(' '):
print(s)
Output:
.. code-block:: python
['hello', ' ', 'world']
['goodbye', '', '']
['up', ' ', 'in arms']
"""
strs = pyniNVStrings.n_partition(self.m_cptr, delimiter)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def rpartition(self, delimiter=' '):
"""
Each string is split into two strings on the first delimiter found.
Delimiter is searched for from the end.
Three strings are returned for each string: beginning, delimiter, end.
Parameters
----------
delimiter : str
The character used to locate the split points of each string.
Default is space.
Examples
--------
.. code-block:: python
import nvstrings
strs = nvstrings.to_device(["hello world","goodbye","up in arms"])
for s in strs.rpartition(' '):
print(s)
Output:
.. code-block:: python
['hello', ' ', 'world']
['', '', 'goodbye']
['up in', ' ', 'arms']
"""
strs = pyniNVStrings.n_rpartition(self.m_cptr, delimiter)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def split_column(self, delimiter=' ', n=-1):
"""
A new set of columns (nvstrings) is created by splitting
the strings vertically.
Parameters
----------
delimiter : str
The character used to locate the split points of each string.
Default is space.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello world","goodbye","well said"])
for result in s.split_column(' '):
print(result)
Output:
.. code-block:: python
["hello","goodbye","well"]
["world",None,"said"]
"""
strs = pyniNVStrings.n_split_column(self.m_cptr, delimiter, n)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def rsplit_column(self, delimiter=' ', n=-1):
"""
A new set of columns (nvstrings) is created by splitting
the strings vertically. Delimiter is searched from the end.
Parameters
----------
delimiter : str
The character used to locate the split points of each string.
Default is space.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello world","goodbye","well said"])
for result in s.rsplit_column(' '):
print(result)
Output:
.. code-block:: python
["hello","goodbye","well"]
["world",None,"said"]
"""
strs = pyniNVStrings.n_rsplit_column(self.m_cptr, delimiter, n)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def get(self, i):
"""
Returns the character specified in each string as a new string.
The nvstrings returned contains a list of single character strings.
Parameters
----------
i : int
The character position identifying the character
in each string to return.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello world","goodbye","well said"])
print(s.get(0))
Output:
.. code-block:: python
['h', 'g', 'w']
"""
rtn = pyniNVStrings.n_get(self.m_cptr, i)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def repeat(self, repeats):
"""
Appends each string with itself the specified number of times.
This returns a nvstrings instance with the new strings.
Parameters
----------
repeats : int
The number of times each string should be repeated.
Repeat count of 0 or 1 will just return copy of each string.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye","well"])
print(s.repeat(2))
Output:
.. code-block:: python
['hellohello', 'goodbyegoodbye', 'wellwell']
"""
rtn = pyniNVStrings.n_repeat(self.m_cptr, repeats)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def pad(self, width, side='left', fillchar=' '):
"""
Add specified padding to each string.
Side:{'left','right','both'}, default is 'left'.
Parameters
----------
fillchar : char
The character used to do the padding.
Default is space character. Only the first character is used.
side : str
Either one of "left", "right", "both". The default is "left"
"left" performs a padding on the left – same as rjust()
"right" performs a padding on the right – same as ljust()
"both" performs equal padding on left and right
– same as center()
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye","well"])
print(s.pad(' ', side='left'))
Output:
.. code-block:: python
[" hello"," goodbye"," well"]
"""
rtn = pyniNVStrings.n_pad(self.m_cptr, width, side, fillchar)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def ljust(self, width, fillchar=' '):
"""
Pad the end of each string to the minimum width.
Parameters
----------
width : int
The minimum width of characters of the new string.
If the width is smaller than the existing string,
no padding is performed.
fillchar : char
The character used to do the padding.
Default is space character. Only the first character is used.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye","well"])
print(s.ljust(width=6))
Output:
.. code-block:: python
['hello ', 'goodbye', 'well ']
"""
rtn = pyniNVStrings.n_ljust(self.m_cptr, width)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def center(self, width, fillchar=' '):
"""
Pad the beginning and end of each string to the minimum width.
Parameters
----------
width : int
The minimum width of characters of the new string.
If the width is smaller than the existing string,
no padding is performed.
fillchar : char
The character used to do the padding.
Default is space character. Only the first character is used.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye","well"])
for result in s.center(width=6):
print(result)
Output:
.. code-block:: python
['hello ', 'goodbye', ' well ']
"""
rtn = pyniNVStrings.n_center(self.m_cptr, width, fillchar)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def rjust(self, width, fillchar=' '):
"""
Pad the beginning of each string to the minimum width.
Parameters
----------
width : int
The minimum width of characters of the new string.
If the width is smaller than the existing string,
no padding is performed.
fillchar : char
The character used to do the padding.
Default is space character. Only the first character is used.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye","well"])
print(s.ljust(width=6))
Output:
.. code-block:: python
[' hello', 'goodbye', ' well']
"""
rtn = pyniNVStrings.n_rjust(self.m_cptr, width)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def zfill(self, width):
"""
Pads the strings with leading zeros.
It will handle prefix sign characters correctly for strings
containing leading number characters.
Parameters
----------
width : int
The minimum width of characters of the new string.
If the width is smaller than the existing string,
no padding is performed.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","1234","-9876","+5.34"])
print(s.zfill(width=6))
Output:
.. code-block:: python
['0hello', '001234', '-09876', '+05.34']
"""
rtn = pyniNVStrings.n_zfill(self.m_cptr, width)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def wrap(self, width):
"""
This will place new-line characters in whitespace so each line
is no more than width characters. Lines will not be truncated.
Parameters
----------
width : int
The maximum width of characters per newline in the new string.
If the width is smaller than the existing string, no newlines
will be inserted.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello there","goodbye all","well ok"])
print(s.wrap(3))
Output:
.. code-block:: python
['hello\\nthere', 'goodbye\\nall', 'well\\nok']
"""
rtn = pyniNVStrings.n_wrap(self.m_cptr, width)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def slice(self, start, stop=None, step=None):
"""
Returns a substring of each string.
Parameters
----------
start : int
Beginning position of the string to extract.
Default is beginning of the each string.
stop : int
Ending position of the string to extract.
Default is end of each string.
step : str
Characters that are to be captured within the specified section.
Default is every character.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye"])
print(s.slice(2,5))
Output:
.. code-block:: python
['llo', 'odb']
"""
rtn = pyniNVStrings.n_slice(self.m_cptr, start, stop, step)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def slice_from(self, starts=0, stops=0):
"""
Return substring of each string using positions for each string.
The starts and stops parameters are device memory pointers.
If specified, each must contain size() of int32 values.
Parameters
----------
starts : GPU memory pointer
Beginning position of each the string to extract.
Default is beginning of the each string.
stops : GPU memory pointer
Ending position of the each string to extract.
Default is end of each string.
Use -1 to specify to the end of that string.
Examples
--------
.. code-block:: python
import nvstrings
import numpy as np
from numba import cuda
s = nvstrings.to_device(["hello","there"])
darr = cuda.to_device(np.asarray([2,3],dtype=np.int32))
print(s.slice_from(starts=darr.device_ctypes_pointer.value))
Output:
.. code-block:: python
['llo','re']
"""
rtn = pyniNVStrings.n_slice_from(self.m_cptr, starts, stops)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def slice_replace(self, start=None, stop=None, repl=None):
"""
Replace the specified section of each string with a new string.
Parameters
----------
start : int
Beginning position of the string to replace.
Default is beginning of the each string.
stop : int
Ending position of the string to replace.
Default is end of each string.
repl : str
String to insert into the specified position values.
Examples
--------
.. code-block:: python
import nvstrings
strs = nvstrings.to_device(["abcdefghij","0123456789"])
print(strs.slice_replace(2,5,'z'))
Output:
.. code-block:: python
['abzfghij', '01z56789']
"""
rtn = pyniNVStrings.n_slice_replace(self.m_cptr, start, stop, repl)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def replace(self, pat, repl, n=-1, regex=True):
"""
Replace a string (pat) in each string with another string (repl).
Parameters
----------
pat : str
String to be replaced.
This can also be a regex expression -- not a compiled regex.
repl : str
String to replace `strng` with
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","goodbye"])
print(s.replace('e', ''))
Output:
.. code-block:: python
['hllo', 'goodby']
"""
rtn = pyniNVStrings.n_replace(self.m_cptr, pat, repl, n, regex)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def lstrip(self, to_strip=None):
"""
Strip leading characters from each string.
Parameters
----------
to_strip : str
Characters to be removed from leading edge of each string
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["oh","hello","goodbye"])
print(s.lstrip('o'))
Output:
.. code-block:: python
['h', 'hello', 'goodbye']
"""
rtn = pyniNVStrings.n_lstrip(self.m_cptr, to_strip)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def strip(self, to_strip=None):
"""
Strip leading and trailing characters from each string.
Parameters
----------
to_strip : str
Characters to be removed from both ends of each string
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["oh, hello","goodbye"])
print(s.strip('o'))
Output:
.. code-block:: python
['h, hell', 'goodbye']
"""
rtn = pyniNVStrings.n_strip(self.m_cptr, to_strip)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def rstrip(self, to_strip=None):
"""
Strip trailing characters from each string.
Parameters
----------
to_strip : str
Characters to be removed from trailing edge of each string
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["oh","hello","goodbye"])
print(s.rstrip('o'))
Output:
.. code-block:: python
['oh', 'hell', 'goodbye']
"""
rtn = pyniNVStrings.n_rstrip(self.m_cptr, to_strip)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def lower(self):
"""
Convert each string to lowercase.
This only applies to ASCII characters at this time.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["Hello, Friend","Goodbye, Friend"])
print(s.lower())
Output:
.. code-block:: python
['hello, friend', 'goodbye, friend']
"""
rtn = pyniNVStrings.n_lower(self.m_cptr)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def upper(self):
"""
Convert each string to uppercase.
This only applies to ASCII characters at this time.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["Hello, friend","Goodbye, friend"])
print(s.lower())
Output:
.. code-block:: python
['HELLO, FRIEND', 'GOODBYE, FRIEND']
"""
rtn = pyniNVStrings.n_upper(self.m_cptr)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def capitalize(self):
"""
Capitalize first character of each string.
This only applies to ASCII characters at this time.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello, friend","goodbye, friend"])
print(s.lower())
Output:
.. code-block:: python
['Hello, friend", "Goodbye, friend"]
"""
rtn = pyniNVStrings.n_capitalize(self.m_cptr)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def swapcase(self):
"""
Change each lowercase character to uppercase and vice versa.
This only applies to ASCII characters at this time.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["Hello, Friend","Goodbye, Friend"])
print(s.lower())
Output:
.. code-block:: python
['hELLO, fRIEND', 'gOODBYE, fRIEND']
"""
rtn = pyniNVStrings.n_swapcase(self.m_cptr)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def title(self):
"""
Uppercase the first letter of each letter after a space
and lowercase the rest.
This only applies to ASCII characters at this time.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["Hello friend","goodnight moon"])
print(s.title())
Output:
.. code-block:: python
['Hello Friend', 'Goodnight Moon']
"""
rtn = pyniNVStrings.n_title(self.m_cptr)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def index(self, sub, start=0, end=None, devptr=0):
"""
Same as find but throws an error if arg is not found in all strings.
Parameters
----------
sub : str
String to find
start : int
Beginning of section to replace.
Default is beginning of each string.
end : int
End of section to replace. Default is end of each string.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
print(s.index('l'))
Output:
.. code-block:: python
[2,3]
"""
rtn = pyniNVStrings.n_index(self.m_cptr, sub, start, end, devptr)
return rtn
def rindex(self, sub, start=0, end=None, devptr=0):
"""
Same as rfind but throws an error if arg is not found in all strings.
Parameters
----------
sub : str
String to find
start : int
Beginning of section to replace.
Default is beginning of each string.
end : int
End of section to replace. Default is end of each string.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","world"])
print(s.rindex('l'))
Output:
.. code-block:: python
[3,3]
"""
rtn = pyniNVStrings.n_rindex(self.m_cptr, sub, start, end, devptr)
return rtn
def find(self, sub, start=0, end=None, devptr=0):
"""
Find the specified string sub within each string.
Return -1 for those strings where sub is not found.
Parameters
----------
sub : str
String to find
start : int
Beginning of section to replace.
Default is beginning of each string.
end : int
End of section to replace. Default is end of each string.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.find('o'))
Output:
.. code-block:: python
[4,-1,1]
"""
rtn = pyniNVStrings.n_find(self.m_cptr, sub, start, end, devptr)
return rtn
def find_from(self, sub, starts=0, ends=0, devptr=0):
"""
Find the specified string within each string starting at the
specified character positions.
The starts and ends parameters are device memory pointers.
If specified, each must contain size() of int32 values.
Returns -1 for those strings where sub is not found.
Parameters
----------
sub : str
String to find
starts : GPU memory pointer
Pointer to GPU array of int32 values of beginning of sections to
search, one per string.
ends : GPU memory pointer
Pointer to GPU array of int32 values of end of sections to search.
Use -1 to specify to the end of that string.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
import numpy as np
from numba import cuda
s = nvstrings.to_device(["hello","there"])
darr = cuda.to_device(np.asarray([2,3],dtype=np.int32))
print(s.find_from('e',starts=darr.device_ctypes_pointer.value))
Output:
.. code-block:: python
[-1,4]
"""
rtn = pyniNVStrings.n_find_from(self.m_cptr, sub, starts, ends, devptr)
return rtn
def rfind(self, sub, start=0, end=None, devptr=0):
"""
Find the specified string within each string.
Search from the end of the string.
Return -1 for those strings where sub is not found.
Parameters
----------
sub : str
String to find
start : int
Beginning of section to replace.
Default is beginning of each string.
end : int
End of section to replace. Default is end of each string.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.rfind('o'))
Output:
.. code-block:: python
[4, -1, 1]
"""
rtn = pyniNVStrings.n_rfind(self.m_cptr, sub, start, end, devptr)
return rtn
def findall(self, pat):
"""
Find all occurrences of regular expression pattern in each string.
A new array of nvstrings is created for each string in this instance.
Parameters
----------
pat : str
The regex pattern used to search for substrings
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hare","bunny","rabbit"])
for result in s.findall('[ab]'):
print(result)
Output:
.. code-block:: python
["a"]
["b"]
["a","b","b"]
"""
strs = pyniNVStrings.n_findall(self.m_cptr, pat)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def findall_column(self, pat):
"""
A new set of nvstrings is created by organizing substring
results vertically.
Parameters
----------
pat : str
The regex pattern to search for substrings
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hare","bunny","rabbit"])
for result in s.findall_column('[ab]'):
print(result)
Output:
.. code-block:: python
["a","b","a"]
[None,None,"b"]
[None,None,"b"]
"""
strs = pyniNVStrings.n_findall_column(self.m_cptr, pat)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def contains(self, pat, regex=True, devptr=0):
"""
Find the specified string within each string.
Default expects regex pattern.
Returns an array of boolean values where
True if `pat` is found, False if not.
Parameters
----------
pat : str
Pattern or string to search for in each string of this instance.
regex : bool
If `True`, pat is interpreted as a regex string.
If `False`, pat is a string to be searched for in each instance.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Must be able to hold at least size() of np.byte values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.contains('o'))
Output:
.. code-block:: python
[True, False, True]
"""
rtn = pyniNVStrings.n_contains(self.m_cptr, pat, regex, devptr)
return rtn
def match(self, pat, devptr=0):
"""
Return array of boolean values where True is set if the specified
pattern matches the beginning of the corresponding string.
Parameters
----------
pat : str
Pattern to find
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size() of
np.byte values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.match('h'))
Output:
.. code-block:: python
[True, False, True]
"""
rtn = pyniNVStrings.n_match(self.m_cptr, pat, devptr)
return rtn
def count(self, pat, devptr=0):
"""
Count occurrences of pattern in each string.
Parameters
----------
pat : str
Pattern to find
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory must be able to hold at least size() of int32 values.
"""
rtn = pyniNVStrings.n_count(self.m_cptr, pat, devptr)
return rtn
def startswith(self, pat, devptr=0):
"""
Return array of boolean values with True for the strings where the
specified string is at the beginning.
Parameters
----------
pat : str
Pattern to find. Regular expressions are not accepted.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory must be able to hold at least size() of np.byte values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.startswith('h'))
Output:
.. code-block:: python
[True, False, False]
"""
rtn = pyniNVStrings.n_startswith(self.m_cptr, pat, devptr)
return rtn
def endswith(self, pat, devptr=0):
"""
Return array of boolean values with True for the strings
where the specified string is at the end.
Parameters
----------
pat : str
Pattern to find. Regular expressions are not accepted.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory must be able to hold at least size() of np.byte values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.endsswith('d'))
Output:
.. code-block:: python
[False, False, True]
"""
rtn = pyniNVStrings.n_endswith(self.m_cptr, pat, devptr)
return rtn
def extract(self, pat):
"""
Extract string from the first match of regular expression pat.
A new array of nvstrings is created for each string in this instance.
Parameters
----------
pat : str
The regex pattern with group capture syntax
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["a1","b2","c3"])
for result in s.extract('([ab])(\d)'):
print(result)
Output:
.. code-block:: python
["a","1"]
["b","2"]
[None,None]
"""
strs = pyniNVStrings.n_extract(self.m_cptr, pat)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def extract_column(self, pat):
"""
Extract string from the first match of regular expression pat.
A new array of nvstrings is created by organizing group results
vertically.
Parameters
----------
pat : str
The regex pattern with group capture syntax
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["a1","b2","c3"])
for result in s.extract_column('([ab])(\d)'):
print(result)
Output:
.. code-block:: python
["a","b"]
["1","2"]
[None,None]
"""
strs = pyniNVStrings.n_extract_column(self.m_cptr, pat)
rtn = []
for cptr in strs:
if cptr != 0:
rtn.append(nvstrings(cptr))
else:
rtn.append(None)
return rtn
def isalnum(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only alpha-numeric characters.
Equivalent to: isalpha() or isdigit() or isnumeric() or isdecimal()
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isalnum())
Output:
.. code-block:: python
[True, True, False, False, False, False]
"""
rtn = pyniNVStrings.n_isalnum(self.m_cptr, devptr)
return rtn
def isalpha(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only alphabetic characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isalpha())
Output:
.. code-block:: python
[False, True, False, False, False, False]
"""
rtn = pyniNVStrings.n_isalpha(self.m_cptr, devptr)
return rtn
def isdigit(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only decimal and digit characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isdigit())
Output:
.. code-block:: python
[True, False, False, False, False, False]
"""
rtn = pyniNVStrings.n_isdigit(self.m_cptr, devptr)
return rtn
def isspace(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only whitespace characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isspace())
Output:
.. code-block:: python
[False, False, False, False, False, True]
"""
rtn = pyniNVStrings.n_isspace(self.m_cptr, devptr)
return rtn
def isdecimal(self, devptr=0):
"""
Return array of boolean values with True for strings that contain only
decimal characters -- those that can be used to extract base10 numbers.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isdecimal())
Output:
.. code-block:: python
[True, False, False, False, False, False]
"""
rtn = pyniNVStrings.n_isdecimal(self.m_cptr, devptr)
return rtn
def isnumeric(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only numeric characters. These include digit and numeric characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['1234', 'de', '1.75', '-34', '+9.8', ' '])
print(s.isnumeric())
Output:
.. code-block:: python
[True, False, False, False, False, False]
"""
rtn = pyniNVStrings.n_isnumeric(self.m_cptr, devptr)
return rtn
def islower(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only lowercase characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['hello', 'Goodbye'])
print(s.islower())
Output:
.. code-block:: python
[True, False]
"""
rtn = pyniNVStrings.n_islower(self.m_cptr, devptr)
return rtn
def isupper(self, devptr=0):
"""
Return array of boolean values with True for strings that contain
only uppercase characters.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(['hello', 'Goodbye'])
print(s.isupper())
Output:
.. code-block:: python
[False, True]
"""
rtn = pyniNVStrings.n_isupper(self.m_cptr, devptr)
return rtn
def translate(self, table):
"""
Translate individual characters to new characters using
the provided table.
Parameters
----------
pat : dict
Use str.maketrans() to build the mapping table.
Unspecified characters are unchanged.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.translate(str.maketrans('elh','ELH')))
Output:
.. code-block:: python
['HELLo', 'tHErE', 'worLd]
"""
rtn = pyniNVStrings.n_translate(self.m_cptr, table)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def sort(self, stype, asc=True):
"""
Sort this list by name (2) or length (1) or both (3).
Sorting can help improve performance for other operations.
Parameters
----------
stype : int
Type of sort to use.
If stype is 1, strings will be sorted by length
If stype is 2, strings will be sorted alphabetically by name
If stype is 3, strings will be sorted by length and then
alphabetically
asc : bool
Whether to sort ascending (True) or descending (False)
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["aaa", "bb", "aaaabb"])
print(s.sort(3))
Output:
.. code-block:: python
['bb', 'aaa', 'aaaabb']
"""
rtn = pyniNVStrings.n_sort(self.m_cptr, stype, asc)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def order(self, stype, asc=True, devptr=0):
"""
Sort this list by name (2) or length (1) or both (3).
This sort only provides the new indexes and does not reorder the
managed strings.
Parameters
----------
stype : int
Type of sort to use.
If stype is 1, strings will be sorted by length
If stype is 2, strings will be sorted alphabetically by name
If stype is 3, strings will be sorted by length and then
alphabetically
asc : bool
Whether to sort ascending (True) or descending (False)
devptr : GPU memory pointer
Where index values will be written.
Must be able to hold at least size() of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["aaa", "bb", "aaaabb"])
print(s.order(2))
Output:
.. code-block:: python
[1, 0, 2]
"""
rtn = pyniNVStrings.n_order(self.m_cptr, stype, asc, devptr)
return rtn
def sublist(self, indexes, count=0):
"""
Return a sublist of strings from this instance.
Parameters
----------
indexes : List of ints or GPU memory pointer
0-based indexes of strings to return from an nvstrings object
count : int
Number of ints if indexes parm is a device pointer.
Otherwise it is ignored.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.sublist([0, 2]))
Output:
.. code-block:: python
['hello', 'world']
"""
rtn = pyniNVStrings.n_sublist(self.m_cptr, indexes, count)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def remove_strings(self, indexes, count=0):
"""
Remove the specified strings and return a new instance.
Parameters
----------
indexes : List of ints
0-based indexes of strings to remove from an nvstrings object
If this parameter is pointer to device memory, count parm is
required.
count : int
Number of ints if indexes parm is a device pointer.
Otherwise it is ignored.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hello","there","world"])
print(s.remove_strings([0, 2]))
Output:
.. code-block:: python
['there']
"""
rtn = pyniNVStrings.n_remove_strings(self.m_cptr, indexes, count)
if rtn is not None:
rtn = nvstrings(rtn)
return rtn
def find_multiple(self, strs, devptr=0):
"""
Return a 'matrix' of find results for each of the string in the
strs parameter.
Each row is an array of integers identifying the first location
of the corresponding provided string.
Parameters
----------
strs : nvstrings
Strings to find in each of the strings in this instance.
devptr : GPU memory pointer
Optional device memory pointer to hold the results.
Memory size must be able to hold at least size()*strs.size()
of int32 values.
Examples
--------
.. code-block:: python
import nvstrings
s = nvstrings.to_device(["hare","bunny","rabbit"])
t = nvstrings.to_device(["a","e","i","o","u"])
print(s.find_multiple(t))
Output:
.. code-block:: python
[[1, 3, -1, -1, -1], [-1, -1, -1, -1, 1], [1, -1, 4, -1, -1]]
"""
rtn = pyniNVStrings.n_find_multiple(self.m_cptr, strs, devptr)
return rtn
| nvstrings-master | nvstrings.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, NVIDIA CORPORATION.
#
# nvstrings documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 3 10:59:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath('../'))
# Support .md files
source_parsers = {'.md': CommonMarkParser}
source_suffix = ['.rst', '.md']
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'numpydoc',
'sphinx_markdown_tables'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nvstrings'
copyright = '2019, nvidia'
author = 'nvidia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
# only import and set the theme if we're building docs locally
# otherwise, readthedocs.org uses their theme by default,
# so no need to specify it
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'nvstringsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nvstrings.tex', 'nvstrings Documentation',
'nvidia', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nvstrings', 'nvstrings Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nvstrings', 'nvstrings Documentation',
author, 'nvstrings', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# Config numpydoc
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
| nvstrings-master | docs/source/conf.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
from Cython.Build import cythonize
from setuptools import setup, Extension, find_packages
# this is tricky: sys.path gets overwritten at different stages of the build
# flow, so we need to hack sys.path ourselves...
source_root = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(source_root, 'builder'))
import utils # this is builder.utils
# Use README for the project long description
with open(os.path.join(source_root, "README.md")) as f:
long_description = f.read()
# Get test requirements
with open(os.path.join(source_root, "tests/requirements.txt")) as f:
tests_require = f.read().split('\n')
# Runtime dependencies
# - cuTENSOR version is constrained in the cutensornet-cuXX package, so we don't
# need to list it
install_requires = [
'numpy>=1.21',
# 'torch', # <-- PyTorch is optional; also, the PyPI version does not support GPU...
f'custatevec-cu{utils.cuda_major_ver}~=1.4', # ">=1.4.0,<2"
f'cutensornet-cu{utils.cuda_major_ver}~=2.2', # ">=2.2.0,<3"
]
if utils.cuda_major_ver == '11':
# CuPy has 3+ wheels for CUDA 11.x, only the cuquantum-python meta package has
# a chance to resolve the ambiguity properly
pass
elif utils.cuda_major_ver == '12':
install_requires.append('cupy-cuda12x>=10.0') # no ambiguity
# Note: the extension attributes are overwritten in build_extension()
ext_modules = [
Extension(
"cuquantum.custatevec.custatevec",
sources=["cuquantum/custatevec/custatevec.pyx"],
),
Extension(
"cuquantum.cutensornet.cutensornet",
sources=["cuquantum/cutensornet/cutensornet.pyx"],
),
Extension(
"cuquantum.utils",
sources=["cuquantum/utils.pyx"],
include_dirs=[os.path.join(utils.cuda_path, 'include')],
),
]
cmdclass = {
'build_ext': utils.build_ext,
'bdist_wheel': utils.bdist_wheel,
}
cuda_classifier = []
if utils.cuda_major_ver == '11':
cuda_classifier.append("Environment :: GPU :: NVIDIA CUDA :: 11")
elif utils.cuda_major_ver == '12':
cuda_classifier.append("Environment :: GPU :: NVIDIA CUDA :: 12")
# TODO: move static metadata to pyproject.toml
setup(
name=f"cuquantum-python-cu{utils.cuda_major_ver}",
version=utils.cuqnt_py_ver,
description="NVIDIA cuQuantum Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://developer.nvidia.com/cuquantum-sdk",
project_urls={
"Bug Tracker": "https://github.com/NVIDIA/cuQuantum/issues",
"User Forum": "https://github.com/NVIDIA/cuQuantum/discussions",
"Documentation": "https://docs.nvidia.com/cuda/cuquantum/latest/python/",
"Source Code": "https://github.com/NVIDIA/cuQuantum",
},
author="NVIDIA Corporation",
author_email="[email protected]",
license="BSD-3-Clause",
license_files = ('LICENSE',),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Topic :: Education",
"Topic :: Scientific/Engineering",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: Implementation :: CPython",
"Environment :: GPU :: NVIDIA CUDA",
] + cuda_classifier,
ext_modules=cythonize(ext_modules,
verbose=True, language_level=3,
compiler_directives={'embedsignature': True}),
packages=find_packages(include=['cuquantum', 'cuquantum.*']),
package_data={"": ["*.pxd", "*.pyx", "*.py"],},
zip_safe=False,
python_requires='>=3.9',
install_requires=install_requires,
tests_require=install_requires+tests_require,
cmdclass=cmdclass,
)
| cuQuantum-main | python/setup.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# The following configs are needed to deselect/ignore collected tests for
# various reasons, see pytest-dev/pytest#3730. In particular, this strategy
# is borrowed from https://github.com/pytest-dev/pytest/issues/3730#issuecomment-567142496.
def pytest_configure(config):
config.addinivalue_line(
"markers", "uncollect_if(*, func): function to unselect tests from parametrization"
)
def pytest_collection_modifyitems(config, items):
removed = []
kept = []
for item in items:
m = item.get_closest_marker('uncollect_if')
if m:
func = m.kwargs['func']
if func(**item.callspec.params):
removed.append(item)
continue
kept.append(item)
if removed:
config.hook.pytest_deselected(items=removed)
items[:] = kept
| cuQuantum-main | python/tests/conftest.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import sys
import pytest
class cuQuantumSampleTestError(Exception):
pass
def run_sample(samples_path, filename):
fullpath = os.path.join(samples_path, filename)
with open(fullpath, "r", encoding='utf-8') as f:
script = f.read()
try:
old_argv = sys.argv
sys.argv = [fullpath]
exec(script, {})
except ImportError as e:
if 'torch' not in str(e):
raise
else:
pytest.skip('PyTorch uninstalled, skipping related tests')
except Exception as e:
msg = "\n"
msg += f'Got error ({filename}):\n'
msg += str(e)
raise cuQuantumSampleTestError(msg) from e
finally:
sys.argv = old_argv
| cuQuantum-main | python/tests/samples_tests/test_utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
| cuQuantum-main | python/tests/samples_tests/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import glob
import os
import re
import pytest
from ..test_utils import run_sample
samples_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'samples', 'custatevec')
sample_files = glob.glob(samples_path+'**/*.py', recursive=True)
# Handle MPI tests separately.
mpi_re = r".*_mpi[_]?.*\.py"
sample_files = list(filter(lambda f: not re.search(mpi_re, f), sample_files))
@pytest.mark.parametrize(
'sample', sample_files
)
class TestcuStateVecSamples:
def test_sample(self, sample):
run_sample(samples_path, sample)
| cuQuantum-main | python/tests/samples_tests/custatevec_tests/test_custatevec_samples.py |
cuQuantum-main | python/tests/samples_tests/custatevec_tests/__init__.py |
|
cuQuantum-main | python/tests/samples_tests/cutensornet_tests/__init__.py |
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import glob
import os
import re
import sys
try:
import nbmake
except ImportError:
nbmake = None
# we could use packaging.version.Version too, but NumPy is our required
# dependency, packaging is not.
from numpy.lib import NumpyVersion as Version
import pytest
from ..test_utils import cuQuantumSampleTestError, run_sample
circuit_versions = dict()
try:
import cirq
circuit_versions['cirq'] = Version(cirq.__version__)
except ImportError:
circuit_versions['cirq'] = Version('0.0.0') # no cirq
try:
import qiskit
circuit_versions['qiskit'] = Version(qiskit.__qiskit_version__['qiskit']) # meta package version
except ImportError:
circuit_versions['qiskit'] = Version('0.0.0') # no qiskit
# minimal versions to run samples/circuit_converter/cirq/qsikit_advanced.ipynb
# slightly higher than the minimal versions for CircuitToEinsum to work with cirq/qiskit
NOTEBOOK_MIN_VERSIONS = {'cirq': Version('0.7.0'),
'qiskit': Version('0.25.0')}
notebook_skip_messages = dict()
for circuit_type, current_version in circuit_versions.items():
min_version = NOTEBOOK_MIN_VERSIONS[circuit_type]
if current_version < min_version:
notebook_skip_messages[circuit_type] = (
f"testing {circuit_type} notebooks requires "
f"{circuit_type}>={NOTEBOOK_MIN_VERSIONS[circuit_type].version}"
)
samples_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'samples', 'cutensornet')
sample_files = glob.glob(samples_path+'/**/*.py', recursive=True)
# Handle MPI tests separately.
mpi_re = r".*_mpi[_]?.*\.py"
sample_files = list(filter(lambda f: not re.search(mpi_re, f), sample_files))
@pytest.mark.parametrize(
'sample', sample_files
)
class TestcuTensorNetSamples:
def test_sample(self, sample):
run_sample(samples_path, sample)
notebook_files = glob.glob(samples_path+'/**/*.ipynb', recursive=True)
@pytest.mark.skipif(
nbmake is None,
reason="testing Jupyter notebooks requires nbmake"
)
@pytest.mark.parametrize(
'notebook', notebook_files
)
class TestNotebooks:
def test_notebook(self, notebook):
circuit_type = os.path.basename(notebook).split('_')[0]
if circuit_type in notebook_skip_messages:
pytest.skip(notebook_skip_messages[circuit_type])
else:
status = pytest.main(['--nbmake', notebook])
if status != 0:
raise cuQuantumSampleTestError(f'{notebook} failed')
| cuQuantum-main | python/tests/samples_tests/cutensornet_tests/test_cutensornet_samples.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import atexit
import glob
import os
import sys
import tempfile
try:
import cffi
except ImportError:
cffi = None
import cupy
import numpy
import pytest
from cuquantum import ComputeType, cudaDataType
if cffi:
# if the Python binding is not installed in the editable mode (pip install
# -e .), the cffi tests would fail as the modules cannot be imported
sys.path.append(os.getcwd())
def clean_up_cffi_files():
files = glob.glob(os.path.join(os.getcwd(), "cuquantum_test_cffi*"))
for f in files:
try:
os.remove(f)
except FileNotFoundError:
pass
dtype_to_data_type = {
numpy.float16: cudaDataType.CUDA_R_16F,
numpy.float32: cudaDataType.CUDA_R_32F,
numpy.float64: cudaDataType.CUDA_R_64F,
numpy.complex64: cudaDataType.CUDA_C_32F,
numpy.complex128: cudaDataType.CUDA_C_64F,
}
dtype_to_compute_type = {
numpy.float16: ComputeType.COMPUTE_16F,
numpy.float32: ComputeType.COMPUTE_32F,
numpy.float64: ComputeType.COMPUTE_64F,
numpy.complex64: ComputeType.COMPUTE_32F,
numpy.complex128: ComputeType.COMPUTE_64F,
}
# we don't wanna recompile for every test case...
_cffi_mod1 = None
_cffi_mod2 = None
def _can_use_cffi():
if cffi is None or os.environ.get('CUDA_PATH') is None:
return False
else:
return True
class MemoryResourceFactory:
def __init__(self, source):
self.source = source
def get_dev_mem_handler(self):
if self.source == "py-callable":
return (*self._get_cuda_callable(), self.source)
elif self.source == "cffi":
# ctx is not needed, so set to NULL
return (0, *self._get_functor_address(), self.source)
elif self.source == "cffi_struct":
return self._get_handler_address()
# TODO: add more different memory sources
else:
raise NotImplementedError
def _get_cuda_callable(self):
def alloc(size, stream):
return cupy.cuda.runtime.mallocAsync(size, stream)
def free(ptr, size, stream):
cupy.cuda.runtime.freeAsync(ptr, stream)
return alloc, free
def _get_functor_address(self):
if not _can_use_cffi():
raise RuntimeError
global _cffi_mod1
if _cffi_mod1 is None:
import importlib
mod_name = f"cuquantum_test_{self.source}"
ffi = cffi.FFI()
ffi.set_source(mod_name, """
#include <cuda_runtime.h>
// cffi limitation: we can't use the actual type cudaStream_t because
// it's considered an "incomplete" type and we can't get the functor
// address by doing so...
int my_alloc(void* ctx, void** ptr, size_t size, void* stream) {
return (int)cudaMallocAsync(ptr, size, stream);
}
int my_free(void* ctx, void* ptr, size_t size, void* stream) {
return (int)cudaFreeAsync(ptr, stream);
}
""",
include_dirs=[os.environ['CUDA_PATH']+'/include'],
library_dirs=[os.environ['CUDA_PATH']+'/lib64'],
libraries=['cudart'],
)
ffi.cdef("""
int my_alloc(void* ctx, void** ptr, size_t size, void* stream);
int my_free(void* ctx, void* ptr, size_t size, void* stream);
""")
ffi.compile(verbose=True)
self.ffi = ffi
_cffi_mod1 = importlib.import_module(mod_name)
self.ffi_mod = _cffi_mod1
atexit.register(clean_up_cffi_files)
alloc_addr = self._get_address("my_alloc")
free_addr = self._get_address("my_free")
return alloc_addr, free_addr
def _get_handler_address(self):
if not _can_use_cffi():
raise RuntimeError
global _cffi_mod2
if _cffi_mod2 is None:
import importlib
mod_name = f"cuquantum_test_{self.source}"
ffi = cffi.FFI()
ffi.set_source(mod_name, """
#include <cuda_runtime.h>
// cffi limitation: we can't use the actual type cudaStream_t because
// it's considered an "incomplete" type and we can't get the functor
// address by doing so...
int my_alloc(void* ctx, void** ptr, size_t size, void* stream) {
return (int)cudaMallocAsync(ptr, size, stream);
}
int my_free(void* ctx, void* ptr, size_t size, void* stream) {
return (int)cudaFreeAsync(ptr, stream);
}
typedef struct {
void* ctx;
int (*device_alloc)(void* ctx, void** ptr, size_t size, void* stream);
int (*device_free)(void* ctx, void* ptr, size_t size, void* stream);
char name[64];
} myHandler;
myHandler* init_myHandler(myHandler* h, const char* name) {
h->ctx = NULL;
h->device_alloc = my_alloc;
h->device_free = my_free;
memcpy(h->name, name, 64);
return h;
}
""",
include_dirs=[os.environ['CUDA_PATH']+'/include'],
library_dirs=[os.environ['CUDA_PATH']+'/lib64'],
libraries=['cudart'],
)
ffi.cdef("""
typedef struct {
...;
} myHandler;
myHandler* init_myHandler(myHandler* h, const char* name);
""")
ffi.compile(verbose=True)
self.ffi = ffi
_cffi_mod2 = importlib.import_module(mod_name)
self.ffi_mod = _cffi_mod2
atexit.register(clean_up_cffi_files)
h = self.handler = self.ffi_mod.ffi.new("myHandler*")
self.ffi_mod.lib.init_myHandler(h, self.source.encode())
return self._get_address(h)
def _get_address(self, func_name_or_ptr):
if isinstance(func_name_or_ptr, str):
func_name = func_name_or_ptr
data = str(self.ffi_mod.ffi.addressof(self.ffi_mod.lib, func_name))
else:
ptr = func_name_or_ptr # ptr to struct
data = str(self.ffi_mod.ffi.addressof(ptr[0]))
# data has this format: "<cdata 'int(*)(void *, void * *, size_t, void *)' 0x7f6c5da37300>"
return int(data.split()[-1][:-1], base=16)
class MemHandlerTestBase:
mod = None
prefix = None
error = None
def _test_set_get_device_mem_handler(self, source, handle):
if (isinstance(source, str) and source.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
if source is not None:
mr = MemoryResourceFactory(source)
handler = mr.get_dev_mem_handler()
self.mod.set_device_mem_handler(handle, handler)
# round-trip test
queried_handler = self.mod.get_device_mem_handler(handle)
if source == 'cffi_struct':
# I'm lazy, otherwise I'd also fetch the functor addresses here...
assert queried_handler[0] == 0 # ctx is NULL
assert queried_handler[-1] == source
else:
assert queried_handler == handler
else:
with pytest.raises(self.error) as e:
queried_handler = self.mod.get_device_mem_handler(handle)
assert f'{self.prefix.upper()}_STATUS_NO_DEVICE_ALLOCATOR' in str(e.value)
class LoggerTestBase:
mod = None
prefix = None
def test_logger_set_level(self):
self.mod.logger_set_level(6) # on
self.mod.logger_set_level(0) # off
def test_logger_set_mask(self):
self.mod.logger_set_mask(16) # should not raise
def test_logger_set_callback_data(self):
# we also test logger_open_file() here to avoid polluting stdout
def callback(level, name, message, my_data, is_ok=False):
log = f"{level}, {name}, {message} (is_ok={is_ok}) -> logged\n"
my_data.append(log)
handle = None
my_data = []
is_ok = True
with tempfile.TemporaryDirectory() as temp:
file_name = os.path.join(temp, f"{self.prefix}_test")
self.mod.logger_open_file(file_name)
self.mod.logger_set_callback_data(callback, my_data, is_ok=is_ok)
self.mod.logger_set_level(6)
try:
handle = self.mod.create()
self.mod.destroy(handle)
except:
if handle:
self.mod.destroy(handle)
raise
finally:
self.mod.logger_force_disable() # to not affect the rest of tests
with open(file_name) as f:
log_from_f = f.read()
# check the log file
assert f'[{self.prefix}Create]' in log_from_f
assert f'[{self.prefix}Destroy]' in log_from_f
# check the captured data (note we log 2 APIs)
log = ''.join(my_data)
assert log.count("-> logged") >= 2
assert log.count("is_ok=True") >= 2
| cuQuantum-main | python/tests/cuquantum_tests/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import subprocess
import sys
import pytest
# TODO: mark this test as slow and don't run it every time
class TestModuleUtils:
@pytest.mark.parametrize(
'includes', (True, False)
)
@pytest.mark.parametrize(
'libs', (True, False)
)
@pytest.mark.parametrize(
'target', (None, 'custatevec', 'cutensornet', True)
)
def test_cuquantum(self, includes, libs, target):
# We need to launch a subprocess to have a clean ld state
cmd = [sys.executable, '-m', 'cuquantum']
if includes:
cmd.append('--includes')
if libs:
cmd.append('--libs')
if target:
if target is True:
cmd.extend(('--target', 'custatevec'))
cmd.extend(('--target', 'cutensornet'))
else:
cmd.extend(('--target', target))
result = subprocess.run(cmd, capture_output=True, env=os.environ)
if result.returncode:
if includes is False and libs is False and target is None:
assert result.returncode == 1
assert 'usage' in result.stdout.decode()
return
msg = f'Got error:\n'
msg += f'stdout: {result.stdout.decode()}\n'
msg += f'stderr: {result.stderr.decode()}\n'
assert False, msg
out = result.stdout.decode().split()
if includes:
assert any([s.startswith('-I') for s in out])
if libs:
assert any([s.startswith('-L') for s in out])
if target:
assert any([s.startswith('-l') for s in out])
| cuQuantum-main | python/tests/cuquantum_tests/test_cuquantum.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import cupy as cp
from cupy import testing
import numpy as np
try:
from mpi4py import MPI # init!
except ImportError:
MPI = None
import pytest
import cuquantum
from cuquantum import ComputeType, cudaDataType
from cuquantum import custatevec as cusv
from .. import (_can_use_cffi, dtype_to_compute_type, dtype_to_data_type,
MemHandlerTestBase, MemoryResourceFactory, LoggerTestBase)
###################################################################
#
# As of beta 2, the test suite for Python bindings is kept minimal.
# The sole goal is to ensure the Python arguments are properly
# passed to the C level. We do not ensure coverage nor correctness.
# This decision will be revisited in the future.
#
###################################################################
@pytest.fixture()
def handle():
h = cusv.create()
yield h
cusv.destroy(h)
@testing.parameterize(*testing.product({
'n_qubits': (3,),
'dtype': (np.complex64, np.complex128),
}))
class TestSV:
# Base class for all statevector tests
def get_sv(self):
arr = cp.zeros((2**self.n_qubits,), dtype=self.dtype)
arr[0] = 1 # initialize in |000...00>
return arr
# TODO: make this a static method
def _return_data(self, data, name, dtype, return_value):
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = np.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.ctypes.data, data.size
elif return_value == 'seq':
# data itself is already a flat sequence
return data, len(data)
else:
assert False
@testing.parameterize(*testing.product({
'n_svs': (3,),
'n_qubits': (4,),
'n_extra_qubits': (0, 1), # for padding purpose
'dtype': (np.complex64, np.complex128),
}))
class TestBatchedSV:
# Base class for all batched statevector tests
def get_sv(self):
arr = cp.zeros((self.n_svs, 2**(self.n_qubits + self.n_extra_qubits)), dtype=self.dtype)
arr[:, 0] = 1 # initialize in |000...00>
self.sv_stride = 2 ** (self.n_qubits + self.n_extra_qubits) # in counts, not bytes
return arr
# TODO: make this a static method
# TODO: refactor this to a helper class?
def _return_data(self, data, name, dtype, return_value):
if return_value == 'int_d':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = cp.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.data.ptr, data.size
if return_value == 'int_h':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = np.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.ctypes.data, data.size
elif return_value == 'seq':
# data itself is already a flat sequence
return data, len(data)
else:
assert False
@pytest.fixture()
def multi_gpu_handles(request):
# TODO: consider making this class more flexible
# (ex: arbitrary number of qubits and/or devices, etc)
n_devices = 2 # should be power of 2
handles = []
p2p_required = request.param
for dev in range(n_devices):
with cp.cuda.Device(dev):
h = cusv.create()
handles.append(h)
if p2p_required:
for peer in range(n_devices):
if dev == peer: continue
try:
cp.cuda.runtime.deviceEnablePeerAccess(peer)
except Exception as e:
if 'PeerAccessUnsupported' in str(e):
pytest.skip("P2P unsupported")
if 'PeerAccessAlreadyEnabled' not in str(e):
raise
yield handles
for dev in range(n_devices):
with cp.cuda.Device(dev):
h = handles.pop(0)
cusv.destroy(h)
if p2p_required:
for peer in range(n_devices):
if dev == peer: continue
try:
cp.cuda.runtime.deviceDisablePeerAccess(peer)
except Exception as e:
if 'PeerAccessNotEnabled' not in str(e):
raise
def get_exponent(n):
assert (n % 2) == 0
exponent = 1
while True:
out = n >> exponent
if out != 1:
exponent += 1
else:
break
return exponent
@testing.parameterize(*testing.product({
'n_qubits': (4,),
'dtype': (np.complex64, np.complex128),
}))
class TestMultiGpuSV:
# TODO: consider making this class more flexible
# (ex: arbitrary number of qubits and/or devices, etc)
n_devices = 2 # should be power of 2
def get_sv(self):
self.n_global_bits = get_exponent(self.n_devices)
self.n_local_bits = self.n_qubits - self.n_global_bits
self.sub_sv = []
for dev in range(self.n_devices):
with cp.cuda.Device(dev):
self.sub_sv.append(cp.zeros(
2**self.n_local_bits, dtype=self.dtype))
self.sub_sv[0][0] = 1 # initialize in |000...00>
return self.sub_sv
# TODO: make this a static method
def _return_data(self, data, name, dtype, return_value):
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0, 0
else:
# return int as void*
data = np.asarray(data, dtype=dtype)
setattr(self, name, data) # keep data alive
return data.ctypes.data, data.size
elif return_value == 'seq':
# data itself is already a flat sequence
return data, len(data)
else:
assert False
class TestLibHelper:
def test_get_version(self):
ver = cusv.get_version()
assert ver == (cusv.MAJOR_VER * 1000
+ cusv.MINOR_VER * 100
+ cusv.PATCH_VER)
assert ver == cusv.VERSION
def test_get_property(self):
assert cusv.MAJOR_VER == cusv.get_property(
cuquantum.libraryPropertyType.MAJOR_VERSION)
assert cusv.MINOR_VER == cusv.get_property(
cuquantum.libraryPropertyType.MINOR_VERSION)
assert cusv.PATCH_VER == cusv.get_property(
cuquantum.libraryPropertyType.PATCH_LEVEL)
class TestHandle:
def test_handle_create_destroy(self, handle):
# simple rount-trip test
pass
def test_workspace(self, handle):
default_workspace_size = cusv.get_default_workspace_size(handle)
# this is about 18MB as of cuQuantum beta 1
assert default_workspace_size > 0
# cuStateVec does not like a smaller workspace...
size = 24*1024**2
assert size > default_workspace_size
memptr = cp.cuda.alloc(size)
cusv.set_workspace(handle, memptr.ptr, size) # should not fail
def test_stream(self, handle):
# default is on the null stream
assert 0 == cusv.get_stream(handle)
# simple set/get round-trip
stream = cp.cuda.Stream()
cusv.set_stream(handle, stream.ptr)
assert stream.ptr == cusv.get_stream(handle)
class TestInitSV(TestSV):
@pytest.mark.parametrize('sv_type', cusv.StateVectorType)
def test_initialize_state_vector(self, handle, sv_type):
sv = self.get_sv()
data_type = dtype_to_data_type[self.dtype]
if sv_type == cusv.StateVectorType.ZERO:
sv_orig = sv.copy() # already zero-init'd
sv[:] = 1. # reset to something else
cusv.initialize_state_vector(
handle, sv.data.ptr, data_type, self.n_qubits, sv_type)
if sv_type == cusv.StateVectorType.ZERO:
assert (sv == sv_orig).all()
assert cp.allclose(cp.sum(cp.abs(sv)**2), 1.)
class TestAbs2Sum(TestSV):
@pytest.mark.parametrize(
'input_form', (
{'basis_bits': (np.int32, 'int'),},
{'basis_bits': (np.int32, 'seq'),},
)
)
def test_abs2sum_on_z_basis(self, handle, input_form):
sv = self.get_sv()
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
data_type = dtype_to_data_type[self.dtype]
# case 1: both are computed
sum0, sum1 = cusv.abs2sum_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
True, True, basis_bits, basis_bits_len)
assert np.allclose(sum0+sum1, 1)
assert (sum0 is not None) and (sum1 is not None)
# case 2: only sum0 is computed
sum0, sum1 = cusv.abs2sum_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
True, False, basis_bits, basis_bits_len)
assert np.allclose(sum0, 1)
assert (sum0 is not None) and (sum1 is None)
# case 3: only sum1 is computed
sum0, sum1 = cusv.abs2sum_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
False, True, basis_bits, basis_bits_len)
assert np.allclose(sum1, 0)
assert (sum0 is None) and (sum1 is not None)
# case 4: none is computed
with pytest.raises(ValueError):
sum0, sum1 = cusv.abs2sum_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
False, False, basis_bits, basis_bits_len)
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'),},
{'bit_ordering': (np.int32, 'seq'),},
)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_abs2sum_array_no_mask(self, handle, xp, input_form):
# change sv from |000> to 1/\sqrt{2} (|001> + |100>)
sv = self.get_sv()
sv[0] = 0
sv[1] = 1./np.sqrt(2)
sv[4] = 1./np.sqrt(2)
data_type = dtype_to_data_type[self.dtype]
bit_ordering = list(range(self.n_qubits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# test abs2sum on both host and device
abs2sum = xp.zeros((2**bit_ordering_len,), dtype=xp.float64)
abs2sum_ptr = abs2sum.data.ptr if xp is cp else abs2sum.ctypes.data
cusv.abs2sum_array(
handle, sv.data.ptr, data_type, self.n_qubits, abs2sum_ptr,
bit_ordering, bit_ordering_len, 0, 0, 0)
assert xp.allclose(abs2sum.sum(), 1)
assert xp.allclose(abs2sum[1], 0.5)
assert xp.allclose(abs2sum[4], 0.5)
# TODO(leofang): add more tests for abs2sum_array, such as nontrivial masks
class TestBatchedAbs2Sum(TestBatchedSV):
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int_h'),},
{'bit_ordering': (np.int32, 'seq'),},
)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_abs2sum_array_batched_no_mask(self, handle, xp, input_form):
# change sv from |0000> to 1/\sqrt{2} (|0001> + |1000>)
sv = self.get_sv()
sv[..., 0] = 0
sv[..., 1] = 1./np.sqrt(2)
sv[..., 8] = 1./np.sqrt(2)
data_type = dtype_to_data_type[self.dtype]
bit_ordering = list(range(self.n_qubits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# test abs2sum on both host and device
abs2sum = xp.zeros((self.n_svs, 2**bit_ordering_len,),
dtype=xp.float64)
abs2sum_ptr = abs2sum.data.ptr if xp is cp else abs2sum.ctypes.data
cusv.abs2sum_array_batched(
handle, sv.data.ptr, data_type, self.n_qubits,
self.n_svs, self.sv_stride,
abs2sum_ptr, 2**bit_ordering_len,
bit_ordering, bit_ordering_len, 0, 0, 0)
assert xp.allclose(abs2sum.sum(), self.n_svs)
assert xp.allclose(abs2sum[..., 1], 0.5)
assert xp.allclose(abs2sum[..., 8], 0.5)
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int_h'), 'mask_bit_strings': (np.int64, 'int_h'), },
{'bit_ordering': (np.int32, 'int_h'), 'mask_bit_strings': (np.int64, 'int_d'), },
{'bit_ordering': (np.int32, 'seq'), 'mask_bit_strings': (np.int64, 'seq'), },
)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_abs2sum_array_batched_masked(self, handle, xp, input_form):
# change sv from |0000> to 1/\sqrt{2} (|0001> + |1000>)
sv = self.get_sv()
sv[..., 0] = 0
sv[..., 1] = 1./np.sqrt(2)
sv[..., 8] = 1./np.sqrt(2)
data_type = dtype_to_data_type[self.dtype]
bit_ordering = list(range(self.n_qubits - 1)) # exclude the last qubit
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# mask = 0b1
mask_bit_strings = np.ones(self.n_svs)
mask_bit_strings, _ = self._return_data(
mask_bit_strings, 'mask_bit_strings',
*input_form['mask_bit_strings'])
mask_bit_ordering = [self.n_qubits - 1]
mask_bit_ordering, mask_len = self._return_data(
mask_bit_ordering, 'mask_bit_ordering', *input_form['bit_ordering'])
# test abs2sum on both host and device
abs2sum = xp.zeros((self.n_svs, 2**bit_ordering_len,),
dtype=xp.float64)
abs2sum_ptr = abs2sum.data.ptr if xp is cp else abs2sum.ctypes.data
cusv.abs2sum_array_batched(
handle, sv.data.ptr, data_type, self.n_qubits,
self.n_svs, self.sv_stride,
abs2sum_ptr, 2**bit_ordering_len,
bit_ordering, bit_ordering_len,
mask_bit_strings, mask_bit_ordering, mask_len)
# we mask out half of the values
assert xp.allclose(abs2sum.sum(), self.n_svs * 0.5)
assert xp.allclose(abs2sum[..., 0], 0.5)
class TestCollapse(TestSV):
@pytest.mark.parametrize(
'input_form', (
{'basis_bits': (np.int32, 'int'),},
{'basis_bits': (np.int32, 'seq'),},
)
)
@pytest.mark.parametrize(
'parity', (0, 1)
)
def test_collapse_on_z_basis(self, handle, parity, input_form):
sv = self.get_sv()
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
data_type = dtype_to_data_type[self.dtype]
cusv.collapse_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
parity, basis_bits, basis_bits_len, 1)
if parity == 0:
assert cp.allclose(sv.sum(), 1)
elif parity == 1:
assert cp.allclose(sv.sum(), 0)
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'), 'bitstring': (np.int32, 'int')},
{'bit_ordering': (np.int32, 'seq'), 'bitstring': (np.int32, 'seq')},
)
)
def test_collapse_by_bitstring(self, handle, input_form):
# change sv to 1/\sqrt{2} (|000> + |111>)
sv = self.get_sv()
sv[0] = np.sqrt(0.5)
sv[-1] = np.sqrt(0.5)
# collapse to |111>
bitstring = [1] * self.n_qubits
bitstring, bitstring_len = self._return_data(
bitstring, 'bitstring', *input_form['bitstring'])
bit_ordering = list(range(self.n_qubits))
bit_ordering, _ = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
data_type = dtype_to_data_type[self.dtype]
norm = 0.5
# the sv after collapse is normalized as sv -> sv / \sqrt{norm}
cusv.collapse_by_bitstring(
handle, sv.data.ptr, data_type, self.n_qubits,
bitstring, bit_ordering, bitstring_len,
norm)
assert cp.allclose(sv.sum(), 1)
assert cp.allclose(sv[-1], 1)
class TestBatchedCollapse(TestBatchedSV):
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int_h'), 'bitstrings': (np.int64, 'int_d'), 'norms': (np.double, 'int_d')},
{'bit_ordering': (np.int32, 'int_h'), 'bitstrings': (np.int64, 'int_h'), 'norms': (np.double, 'int_h')},
{'bit_ordering': (np.int32, 'seq'), 'bitstrings': (np.int64, 'seq'), 'norms': (np.double, 'seq')},
)
)
def test_collapse_by_bitstring_batched(self, handle, input_form):
# change sv to 1/\sqrt{2} (|00...0> + |11...1>)
sv = self.get_sv()
sv[:, 0] = np.sqrt(0.5)
sv[:, 2**self.n_qubits-1] = np.sqrt(0.5) # Note the padding at the end
bit_ordering = list(range(self.n_qubits))
bit_ordering, _ = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
bitstrings_len = self.n_qubits
data_type = dtype_to_data_type[self.dtype]
# collapse to |11...1>
bitstrings = [2**self.n_qubits-1] * self.n_svs
bitstrings, _ = self._return_data(
bitstrings, 'bitstrings', *input_form['bitstrings'])
# the sv after collapse is normalized as sv -> sv / \sqrt{norm}
norms = [0.5] * self.n_svs
norms, _ = self._return_data(
norms, 'norms', *input_form['norms'])
workspace_size = cusv.collapse_by_bitstring_batched_get_workspace_size(
handle, self.n_svs, bitstrings, norms)
if workspace_size > 0:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
cusv.collapse_by_bitstring_batched(
handle, sv.data.ptr, data_type, self.n_qubits,
self.n_svs, self.sv_stride,
bitstrings, bit_ordering, bitstrings_len,
norms,
workspace_ptr, workspace_size)
cp.cuda.Device().synchronize()
assert cp.allclose(sv[:, 0:2**self.n_qubits].sum(), self.n_svs)
assert cp.allclose(sv[:, 2**self.n_qubits-1], cp.ones(self.n_svs, dtype=self.dtype))
@pytest.mark.parametrize(
'rand',
# the choices here ensure we get either parity
(0, np.nextafter(1, 0))
)
@pytest.mark.parametrize(
'collapse',
(cusv.Collapse.NORMALIZE_AND_ZERO, cusv.Collapse.NONE)
)
class TestMeasure(TestSV):
@pytest.mark.parametrize(
'input_form', (
{'basis_bits': (np.int32, 'int'),},
{'basis_bits': (np.int32, 'seq'),},
)
)
def test_measure_on_z_basis(self, handle, rand, collapse, input_form):
# change the sv to 1/\sqrt{2} (|000> + |010>) to allow 50-50 chance
# of getting either parity
sv = self.get_sv()
sv[0] = np.sqrt(0.5)
sv[2] = np.sqrt(0.5)
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
data_type = dtype_to_data_type[self.dtype]
orig_sv = sv.copy()
parity = cusv.measure_on_z_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
basis_bits, basis_bits_len, rand, collapse)
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
if parity == 0:
# collapse to |000>
assert cp.allclose(sv[0], 1)
elif parity == 1:
# collapse to |111>
assert cp.allclose(sv[2], 1)
# sv is collapsed
assert not (sv == orig_sv).all()
else:
# sv is intact
assert (sv == orig_sv).all()
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'),},
{'bit_ordering': (np.int32, 'seq'),},
)
)
def test_batch_measure(self, handle, rand, collapse, input_form):
# change sv to 1/\sqrt{2} (|000> + |111>)
sv = self.get_sv()
sv[0] = np.sqrt(0.5)
sv[-1] = np.sqrt(0.5)
orig_sv = sv.copy()
data_type = dtype_to_data_type[self.dtype]
bitstring = np.empty(self.n_qubits, dtype=np.int32)
bit_ordering = list(range(self.n_qubits))
bit_ordering, _ = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
cusv.batch_measure(
handle, sv.data.ptr, data_type, self.n_qubits,
bitstring.ctypes.data, bit_ordering, bitstring.size,
rand, collapse)
if bitstring.sum() == 0:
assert rand == 0
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
# collapse to |000>
assert cp.allclose(sv[0], 1)
# sv is collapsed
assert (sv != orig_sv).any()
else:
# sv is intact
assert (sv == orig_sv).all()
elif bitstring.sum() == self.n_qubits:
assert rand == np.nextafter(1, 0)
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
# collapse to |111>
assert cp.allclose(sv[-1], 1)
# sv is collapsed
assert (sv != orig_sv).any()
else:
# sv is intact
assert (sv == orig_sv).all()
else:
assert False, f"unexpected bitstrings: {bitstrings}"
class TestMeasureBatched(TestBatchedSV):
@pytest.mark.parametrize(
'rand',
# the choices here ensure we get either parity
(0, np.nextafter(1, 0))
)
@pytest.mark.parametrize(
'input_form', (
{'bitstrings': (np.int64, 'int_h'), 'bit_ordering': (np.int32, 'int_h'), 'rand_nums': (np.float64, 'int_h')},
{'bitstrings': (np.int64, 'int_d'), 'bit_ordering': (np.int32, 'int_h'), 'rand_nums': (np.float64, 'int_d')},
{'bitstrings': (np.int64, 'int_d'), 'bit_ordering': (np.int32, 'seq'), 'rand_nums': (np.float64, 'seq')},
)
)
@pytest.mark.parametrize('collapse', cusv.Collapse)
@pytest.mark.parametrize('xp', (np, cp))
def test_measure_batched(self, handle, rand, input_form, collapse, xp):
# change sv to 1/\sqrt{2} (|00...0> + |11...1>)
sv = self.get_sv()
sv[:, 0] = np.sqrt(0.5)
sv[:, 2**self.n_qubits-1] = np.sqrt(0.5) # Note the padding at the end
orig_sv = sv.copy()
data_type = dtype_to_data_type[self.dtype]
bitstrings = np.empty(self.n_svs, dtype=np.int32)
bitstrings, _ = self._return_data(
bitstrings, 'bitstrings', *input_form['bitstrings'])
bit_ordering = list(range(self.n_qubits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
rand_nums = [rand] * self.n_svs
rand_nums, _ = self._return_data(
rand_nums, 'rand_nums', *input_form['rand_nums'])
cusv.measure_batched(
handle, sv.data.ptr, data_type, self.n_qubits,
self.n_svs, self.sv_stride,
bitstrings, bit_ordering, bit_ordering_len,
rand_nums, collapse)
bitstrings = self.bitstrings
if bitstrings.sum() == 0:
assert rand == 0
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
# collapse to |00...0>
assert cp.allclose(sv[:, 0], 1)
# sv is collapsed
assert (sv != orig_sv).any()
else:
# sv is intact
assert (sv == orig_sv).all()
elif bitstrings.sum() == (2**self.n_qubits-1)*self.n_svs:
assert rand == np.nextafter(1, 0)
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
# collapse to |11...1>
assert cp.allclose(sv[:, 2**self.n_qubits-1], 1)
# sv is collapsed
assert (sv != orig_sv).any()
else:
# sv is intact
assert (sv == orig_sv).all()
else:
assert False, f"unexpected bitstrings: {bitstrings}"
class TestApply(TestSV):
@pytest.mark.parametrize(
'input_form', (
{'targets': (np.int32, 'int'), 'controls': (np.int32, 'int'),
# sizeof(enum) == sizeof(int)
'paulis': (np.int32, 'int'),},
{'targets': (np.int32, 'seq'), 'controls': (np.int32, 'seq'),
'paulis': (np.int32, 'seq'),},
)
)
def test_apply_pauli_rotation(self, handle, input_form):
# change sv to |100>
sv = self.get_sv()
sv[0] = 0
sv[4] = 1
data_type = dtype_to_data_type[self.dtype]
targets = [0, 1]
targets, targets_len = self._return_data(
targets, 'targets', *input_form['targets'])
controls = [2]
controls, controls_len = self._return_data(
controls, 'controls', *input_form['controls'])
control_values = 0 # set all control bits to 1
paulis = [cusv.Pauli.X, cusv.Pauli.X]
paulis, _ = self._return_data(
paulis, 'paulis', *input_form['paulis'])
cusv.apply_pauli_rotation(
handle, sv.data.ptr, data_type, self.n_qubits,
0.5*np.pi, paulis,
targets, targets_len,
controls, control_values, controls_len)
sv *= -1j
# result is |111>
assert cp.allclose(sv[-1], 1)
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'input_form', (
{'targets': (np.int32, 'int'), 'controls': (np.int32, 'int')},
{'targets': (np.int32, 'seq'), 'controls': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_apply_matrix(self, handle, xp, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
sv = self.get_sv()
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
targets = [0, 1, 2]
targets, targets_len = self._return_data(
targets, 'targets', *input_form['targets'])
controls = []
controls, controls_len = self._return_data(
controls, 'controls', *input_form['controls'])
# matrix can live on host or device
matrix = xp.zeros((2**self.n_qubits, 2**self.n_qubits), dtype=sv.dtype)
matrix[-1][0] = 1
matrix_ptr = matrix.ctypes.data if xp is np else matrix.data.ptr
if mempool is None:
workspace_size = cusv.apply_matrix_get_workspace_size(
handle, data_type, self.n_qubits,
matrix_ptr, data_type, cusv.MatrixLayout.ROW, 0,
targets_len, controls_len, compute_type)
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
cusv.apply_matrix(
handle, sv.data.ptr, data_type, self.n_qubits,
matrix_ptr, data_type, cusv.MatrixLayout.ROW, 0,
targets, targets_len,
controls, 0, controls_len,
compute_type, workspace_ptr, workspace_size)
assert sv[-1] == 1 # output state is |111>
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'input_form', (
{'permutation': (np.int64, 'int'), 'basis_bits': (np.int32, 'int'),
'mask_bitstring': (np.int32, 'int'), 'mask_ordering': (np.int32, 'int')},
{'permutation': (np.int64, 'seq'), 'basis_bits': (np.int32, 'seq'),
'mask_bitstring': (np.int32, 'seq'), 'mask_ordering': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_apply_generalized_permutation_matrix(
self, handle, xp, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
sv = self.get_sv()
sv[:] = 1 # invalid sv just to make math checking easier
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
# TODO(leofang): test permutation on either host or device
permutation = list(np.random.permutation(2**self.n_qubits))
permutation_data = permutation
permutation, permutation_len = self._return_data(
permutation, 'permutation', *input_form['permutation'])
# diagonal can live on host or device
diagonal = 10 * xp.ones((2**self.n_qubits, ), dtype=sv.dtype)
diagonal_ptr = diagonal.ctypes.data if xp is np else diagonal.data.ptr
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
# TODO(leofang): test masks
mask_bitstring = 0
mask_ordering = 0
mask_len = 0
if mempool is None:
workspace_size = cusv.apply_generalized_permutation_matrix_get_workspace_size(
handle, data_type, self.n_qubits,
permutation, diagonal_ptr, data_type,
basis_bits, basis_bits_len, mask_len)
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
cusv.apply_generalized_permutation_matrix(
handle, sv.data.ptr, data_type, self.n_qubits,
permutation, diagonal_ptr, data_type, 0,
basis_bits, basis_bits_len,
mask_bitstring, mask_ordering, mask_len,
workspace_ptr, workspace_size)
assert cp.allclose(sv, diagonal[xp.asarray(permutation_data)])
class TestBatchedApply(TestBatchedSV):
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'input_form', (
{'matrix_indices': (np.int32, 'int_h'), 'targets': (np.int32, 'int_h'), 'controls': (np.int32, 'int_h')},
{'matrix_indices': (np.int32, 'int_d'), 'targets': (np.int32, 'int_h'), 'controls': (np.int32, 'int_h')},
{'matrix_indices': (np.int32, 'seq'), 'targets': (np.int32, 'seq'), 'controls': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize('xp', (np, cp))
@pytest.mark.parametrize('map_type', cusv.MatrixMapType)
def test_apply_matrix_batched(
self, handle, map_type, xp, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
sv = self.get_sv()
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
targets = list(range(self.n_qubits))
targets, targets_len = self._return_data(
targets, 'targets', *input_form['targets'])
controls = []
controls, controls_len = self._return_data(
controls, 'controls', *input_form['controls'])
if map_type == cusv.MatrixMapType.BROADCAST:
n_matrices = 1
elif map_type == cusv.MatrixMapType.MATRIX_INDEXED:
n_matrices = self.n_svs
# matrices and their indices can live on host or device
matrices = xp.zeros(
(n_matrices, 2**self.n_qubits, 2**self.n_qubits),
dtype=sv.dtype)
matrices[..., -1, 0] = 1
matrices_ptr = matrices.ctypes.data if xp is np else matrices.data.ptr
matrix_indices = list(range(n_matrices))
if len(matrix_indices) > 1:
np.random.shuffle(matrix_indices)
matrix_indices, n_matrices = self._return_data(
matrix_indices, 'matrix_indices', *input_form['matrix_indices'])
if mempool is None:
workspace_size = cusv.apply_matrix_batched_get_workspace_size(
handle, data_type, self.n_qubits, self.n_svs, self.sv_stride,
map_type, matrix_indices, matrices_ptr, data_type,
cusv.MatrixLayout.ROW, 0, n_matrices,
targets_len, controls_len, compute_type)
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
cusv.apply_matrix_batched(
handle, sv.data.ptr, data_type, self.n_qubits,
self.n_svs, self.sv_stride, map_type, matrix_indices,
matrices_ptr, data_type,
cusv.MatrixLayout.ROW, 0, n_matrices,
targets, targets_len,
controls, 0, controls_len,
compute_type, workspace_ptr, workspace_size)
assert (sv[..., 2**self.n_qubits-1] == 1).all() # output state is |11...1>
class TestExpect(TestSV):
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'input_form', (
{'basis_bits': (np.int32, 'int'),},
{'basis_bits': (np.int32, 'seq'),},
)
)
@pytest.mark.parametrize(
'expect_dtype', (np.float64, np.complex128)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_compute_expectation(self, handle, xp, expect_dtype, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
# create a uniform sv
sv = self.get_sv()
sv[:] = np.sqrt(1/(2**self.n_qubits))
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
basis_bits = list(range(self.n_qubits))
basis_bits, basis_bits_len = self._return_data(
basis_bits, 'basis_bits', *input_form['basis_bits'])
# matrix can live on host or device
matrix = xp.ones((2**self.n_qubits, 2**self.n_qubits), dtype=sv.dtype)
matrix_ptr = matrix.ctypes.data if xp is np else matrix.data.ptr
if mempool is None:
workspace_size = cusv.compute_expectation_get_workspace_size(
handle, data_type, self.n_qubits,
matrix_ptr, data_type, cusv.MatrixLayout.ROW,
basis_bits_len, compute_type)
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
expect = np.empty((1,), dtype=expect_dtype)
# TODO(leofang): check if this is relaxed in beta 2
expect_data_type = (
cudaDataType.CUDA_R_64F if expect_dtype == np.float64
else cudaDataType.CUDA_C_64F)
cusv.compute_expectation(
handle, sv.data.ptr, data_type, self.n_qubits,
expect.ctypes.data, expect_data_type,
matrix_ptr, data_type, cusv.MatrixLayout.ROW,
basis_bits, basis_bits_len,
compute_type, workspace_ptr, workspace_size)
assert xp.allclose(expect, 2**self.n_qubits)
# TODO: test other input forms?
def test_compute_expectations_on_pauli_basis(self, handle):
# create a uniform sv
sv = self.get_sv()
sv[:] = np.sqrt(1/(2**self.n_qubits))
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
# measure XX...X, YY..Y, ZZ...Z
paulis = [[cusv.Pauli.X for i in range(self.n_qubits)],
[cusv.Pauli.Y for i in range(self.n_qubits)],
[cusv.Pauli.Z for i in range(self.n_qubits)],]
basis_bits = [[*range(self.n_qubits)] for i in range(len(paulis))]
n_basis_bits = [len(basis_bits[i]) for i in range(len(paulis))]
expect = np.empty((len(paulis),), dtype=np.float64)
cusv.compute_expectations_on_pauli_basis(
handle, sv.data.ptr, data_type, self.n_qubits,
expect.ctypes.data, paulis, len(paulis),
basis_bits, n_basis_bits)
result = np.zeros_like(expect)
result[0] = 1 # for XX...X
assert np.allclose(expect, result)
class TestSampler(TestSV):
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'),},
{'bit_ordering': (np.int32, 'seq'),},
)
)
def test_sampling(self, handle, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
# create a uniform sv
sv = self.get_sv()
sv[:] = np.sqrt(1/(2**self.n_qubits))
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
shots = 4096
bitstrings = np.empty((shots,), dtype=np.int64)
rand_nums = np.random.random((shots,)).astype(np.float64)
# measure all qubits
bit_ordering = list(range(self.n_qubits))
bit_ordering, _ = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
sampler, workspace_size = cusv.sampler_create(
handle, sv.data.ptr, data_type, self.n_qubits, shots)
if mempool is None:
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
try:
cusv.sampler_preprocess(
handle, sampler, workspace_ptr, workspace_size)
cusv.sampler_sample(
handle, sampler, bitstrings.ctypes.data,
bit_ordering, self.n_qubits,
rand_nums.ctypes.data, shots,
cusv.SamplerOutput.RANDNUM_ORDER)
norm = cusv.sampler_get_squared_norm(handle, sampler)
# TODO: add a multi-GPU test for this API
# We're being sloppy here by checking a trivial case, which is
# effectively a no-op. This is just a call check.
cusv.sampler_apply_sub_sv_offset(
handle, sampler, 0, 1, 0, norm)
finally:
cusv.sampler_destroy(sampler)
keys, counts = np.unique(bitstrings, return_counts=True)
# keys are the returned bitstrings 000, 001, ..., 111
# the sv has all components, and unique() returns a sorted array,
# so the following should hold:
assert (keys == np.arange(2**self.n_qubits)).all()
assert np.allclose(norm, 1)
# TODO: test counts, which should follow a uniform distribution
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
# TODO(leofang): test mask_bitstring & mask_ordering
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'), 'mask_bitstring': (np.int32, 'int'), 'mask_ordering': (np.int32, 'int')},
{'bit_ordering': (np.int32, 'seq'), 'mask_bitstring': (np.int32, 'seq'), 'mask_ordering': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'readonly', (True, False)
)
class TestAccessor(TestSV):
def test_accessor_get(self, handle, readonly, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
# create a monotonically increasing sv
sv = self.get_sv()
data = cp.arange(2**self.n_qubits, dtype=sv.dtype)
data /= cp.sqrt(data**2)
sv[:] = data
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
# measure all qubits
bit_ordering = list(range(self.n_qubits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# TODO(leofang): test them
mask_bitstring = 0
mask_ordering = 0
mask_len = 0
if readonly:
accessor_create = cusv.accessor_create_view
else:
accessor_create = cusv.accessor_create
accessor, workspace_size = accessor_create(
handle, sv.data.ptr, data_type, self.n_qubits,
bit_ordering, bit_ordering_len,
mask_bitstring, mask_ordering, mask_len)
try:
if mempool is None:
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
cusv.accessor_set_extra_workspace(
handle, accessor, workspace_ptr, workspace_size)
buf_len = 2**2
buf = cp.empty(buf_len, dtype=sv.dtype)
# copy the last buf_len elements
cusv.accessor_get(
handle, accessor, buf.data.ptr, sv.size-1-buf_len, sv.size-1)
finally:
cusv.accessor_destroy(accessor)
assert (sv[sv.size-1-buf_len: sv.size-1] == buf).all()
def test_accessor_set(self, handle, readonly, input_form, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
# create a monotonically increasing sv
sv = self.get_sv()
data = cp.arange(2**self.n_qubits, dtype=sv.dtype)
data /= cp.sqrt(data**2)
sv[:] = data
data_type = dtype_to_data_type[self.dtype]
compute_type = dtype_to_compute_type[self.dtype]
# measure all qubits
bit_ordering = list(range(self.n_qubits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# TODO(leofang): test them
mask_bitstring = 0
mask_ordering = 0
mask_len = 0
if readonly:
accessor_create = cusv.accessor_create_view
else:
accessor_create = cusv.accessor_create
accessor, workspace_size = accessor_create(
handle, sv.data.ptr, data_type, self.n_qubits,
bit_ordering, bit_ordering_len,
mask_bitstring, mask_ordering, mask_len)
try:
if mempool is None:
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
cusv.accessor_set_extra_workspace(
handle, accessor, workspace_ptr, workspace_size)
buf_len = 2**2
buf = cp.zeros(buf_len, dtype=sv.dtype)
if readonly:
# copy the last buf_len elements would fail
with pytest.raises(cusv.cuStateVecError) as e_info:
cusv.accessor_set(
handle, accessor, buf.data.ptr, sv.size-1-buf_len, sv.size-1)
else:
# copy the last buf_len elements
cusv.accessor_set(
handle, accessor, buf.data.ptr, sv.size-1-buf_len, sv.size-1)
finally:
cusv.accessor_destroy(accessor)
if readonly:
# sv unchanged
assert (sv[sv.size-1-buf_len: sv.size-1] == data[sv.size-1-buf_len: sv.size-1]).all()
else:
assert (sv[sv.size-1-buf_len: sv.size-1] == 0).all()
class TestTestMatrixType:
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'matrix_type', (cusv.MatrixType.UNITARY, cusv.MatrixType.HERMITIAN)
)
@pytest.mark.parametrize(
'input_form', (
{'targets': (np.int32, 'int'), },
{'targets': (np.int32, 'seq'), },
)
)
@pytest.mark.parametrize(
'dtype', (np.complex64, np.complex128)
)
@pytest.mark.parametrize(
'xp', (np, cp)
)
def test_apply_matrix_type(
self, handle, xp, dtype, input_form, matrix_type, mempool):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
data_type = dtype_to_data_type[dtype]
compute_type = dtype_to_compute_type[dtype]
n_targets = 4
# matrix can live on host or device
# choose a trivial matrix
data = xp.ones(2**n_targets, dtype=dtype)
matrix = xp.diag(data)
matrix_ptr = matrix.ctypes.data if xp is np else matrix.data.ptr
if mempool is None:
workspace_size = cusv.test_matrix_type_get_workspace_size(
handle, matrix_type,
matrix_ptr, data_type, cusv.MatrixLayout.ROW, n_targets,
0, compute_type)
if workspace_size:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
else:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cusv.set_device_mem_handler(handle, handler)
workspace_ptr = 0
workspace_size = 0
residual = cusv.test_matrix_type(
handle, matrix_type,
matrix_ptr, data_type, cusv.MatrixLayout.ROW, n_targets,
0, compute_type, workspace_ptr, workspace_size)
assert np.isclose(residual, 0)
@pytest.mark.parametrize(
'rand',
# the choices here ensure we get either parity
(0, np.nextafter(1, 0))
)
@pytest.mark.parametrize(
'collapse',
(cusv.Collapse.NORMALIZE_AND_ZERO, cusv.Collapse.NONE)
)
@pytest.mark.skipif(
cp.cuda.runtime.getDeviceCount() < 2, reason='not enough GPUs')
class TestBatchMeasureWithSubSV(TestMultiGpuSV):
@pytest.mark.parametrize(
'input_form', (
{'bit_ordering': (np.int32, 'int'),},
{'bit_ordering': (np.int32, 'seq'),},
)
)
@pytest.mark.parametrize(
'multi_gpu_handles', (False,), indirect=True # no need for P2P
)
def test_batch_measure_with_offset(
self, multi_gpu_handles, rand, collapse, input_form):
handles = multi_gpu_handles
sub_sv = self.get_sv()
data_type = dtype_to_data_type[self.dtype]
bit_ordering = list(range(self.n_local_bits))
bit_ordering, bit_ordering_len = self._return_data(
bit_ordering, 'bit_ordering', *input_form['bit_ordering'])
# change sv to 1/\sqrt{2} (|0000> + |1111>), and compute abs2sum;
# calling abs2sum_array is also OK, but we focus on testing the target API
cumulative_array = np.zeros(self.n_devices+1, dtype=np.float64)
for i_sv in range(self.n_devices):
with cp.cuda.Device(i_sv):
if i_sv == 0:
# |0 000> is on GPU 0
sub_sv[i_sv][0] = np.sqrt(0.5)
elif i_sv == 1:
# |1 111> is on GPU 1
sub_sv[i_sv][-1] = np.sqrt(0.5)
abs2sum = cp.asnumpy(cp.sum(cp.abs(sub_sv[i_sv])**2))
cumulative_array[i_sv+1] = cumulative_array[i_sv] + abs2sum
orig_sub_sv = copy.deepcopy(sub_sv)
bitstring = np.empty(self.n_local_bits, dtype=np.int32)
for i_sv in range(self.n_devices):
if (cumulative_array[i_sv] <= rand
and rand < cumulative_array[i_sv+1]):
global_bits = i_sv
norm = cumulative_array[-1]
offset = cumulative_array[i_sv]
with cp.cuda.Device(i_sv) as dev:
cusv.batch_measure_with_offset(
handles[i_sv], sub_sv[i_sv].data.ptr, data_type,
self.n_local_bits, bitstring.ctypes.data,
bit_ordering, bit_ordering_len, rand,
collapse, offset, norm)
dev.synchronize()
break
else:
assert False
if global_bits == 0:
# get |0 000>
assert (bitstring == 0).all()
elif global_bits == 1:
# get |1 111>
assert (bitstring == 1).all()
else:
assert False
if collapse == cusv.Collapse.NORMALIZE_AND_ZERO:
# the measured sub sv is collapsed (those not measured are intact!)
if global_bits == 0:
# collapse to |0 000>
with cp.cuda.Device(0):
assert cp.allclose(sub_sv[0][0], 1)
assert not (sub_sv[0] == orig_sub_sv[0]).all()
with cp.cuda.Device(1):
assert (sub_sv[1] == orig_sub_sv[1]).all()
elif global_bits == 1:
# collapse to |1 111>
with cp.cuda.Device(0):
assert (sub_sv[0] == orig_sub_sv[0]).all()
with cp.cuda.Device(1):
assert cp.allclose(sub_sv[1][-1], 1)
assert not (sub_sv[1] == orig_sub_sv[1]).all()
else:
assert False, f"unexpected bitstring: {bitstring}"
else:
# sv is intact
with cp.cuda.Device(0):
assert (sub_sv[0] == orig_sub_sv[0]).all()
with cp.cuda.Device(1):
assert (sub_sv[1] == orig_sub_sv[1]).all()
class TestSwap:
@pytest.mark.parametrize(
'input_form', (
{'swapped_bits': (np.int32, 'int'),
'mask_bitstring': (np.int32, 'int'), 'mask_ordering': (np.int32, 'int')},
{'swapped_bits': (np.int32, 'seq'),
'mask_bitstring': (np.int32, 'seq'), 'mask_ordering': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'dtype', (np.complex64, np.complex128)
)
def test_swap_index_bits(self, handle, dtype, input_form):
n_qubits = 4
sv = cp.zeros(2**n_qubits, dtype=dtype)
data_type = dtype_to_data_type[dtype]
# set sv to |0110>
sv[6] = 1
orig_sv = sv.copy()
swapped_bits = [(0, 2), (1, 3)]
n_swapped_bits = len(swapped_bits)
if input_form['swapped_bits'][1] == 'int':
swapped_bits_data = np.asarray(
swapped_bits, dtype=input_form['swapped_bits'][0])
swapped_bits = swapped_bits_data.ctypes.data
# TODO: test mask
mask_bitstring = 0
mask_ordering = 0
mask_len = 0
cusv.swap_index_bits(
handle, sv.data.ptr, data_type, n_qubits,
swapped_bits, n_swapped_bits,
mask_bitstring, mask_ordering, mask_len)
# now we should get |1001>
assert (sv != orig_sv).any()
assert sv[6] == 0
assert sv[9] == 1
@pytest.mark.parametrize(
'topology', [t for t in cusv.DeviceNetworkType]
)
@pytest.mark.skipif(
cp.cuda.runtime.getDeviceCount() < 2, reason='not enough GPUs')
class TestMultiGPUSwap(TestMultiGpuSV):
@pytest.mark.parametrize(
'input_form', (
{'handles': (np.intp, 'int'), 'sub_svs': (np.intp, 'int'),
'swapped_bits': (np.int32, 'int'), 'mask': (np.int32, 'int')},
{'handles': (np.intp, 'seq'), 'sub_svs': (np.intp, 'seq'),
'swapped_bits': (np.int32, 'seq'), 'mask': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'multi_gpu_handles', (True,), indirect=True # need P2P
)
def test_multi_device_swap_index_bits(
self, multi_gpu_handles, input_form, topology):
# currently the test class sets the following:
# - n_global_qubits = 1
# - n_local_qubits = 3
handles = multi_gpu_handles
n_handles = len(handles)
sub_sv = self.get_sv()
data_type = dtype_to_data_type[self.dtype]
# set sv to |0110> (up to normalization)
with cp.cuda.Device(0):
sub_sv[0][0] = 0
sub_sv[0][-2] = 1
if input_form['handles'][1] == 'int':
handles_data = np.asarray(
handles, dtype=input_form['handles'][0])
handles = handles_data.ctypes.data
sub_sv_data = sub_sv
sub_sv_ptr_data = [arr.data.ptr for arr in sub_sv]
sub_sv = sub_sv_ptr_data
if input_form['sub_svs'][1] == 'int':
sub_sv_ptr_data = np.asarray(
sub_sv_ptr_data, dtype=input_form['sub_svs'][0])
sub_sv = sub_sv_ptr_data.ctypes.data
else:
sub_sv = sub_sv_ptr_data
swapped_bits = [(3, 1)]
n_swapped_bits = len(swapped_bits)
if input_form['swapped_bits'][1] == 'int':
swapped_bits_data = np.asarray(
swapped_bits, dtype=input_form['swapped_bits'][0])
swapped_bits = swapped_bits_data.ctypes.data
# TODO: test mask
mask_bitstring = []
mask_ordering = []
mask_len = 0
if input_form['mask'][1] == 'int':
mask_bitstring_data = np.asarray(
mask_bitstring, dtype=input_form['mask'][0])
mask_bitstring = mask_bitstring_data.ctypes.data
mask_ordering_data = np.asarray(
mask_ordering, dtype=input_form['mask'][0])
mask_ordering = mask_ordering_data.ctypes.data
cusv.multi_device_swap_index_bits(
handles, n_handles, sub_sv, data_type,
self.n_global_bits, self.n_local_bits,
swapped_bits, n_swapped_bits,
mask_bitstring, mask_ordering, mask_len,
topology)
# now we should get |1100>
sub_sv = sub_sv_data
with cp.cuda.Device(0):
assert sub_sv[0][-2] == 0
with cp.cuda.Device(1):
assert sub_sv[1][4] == 1
@pytest.mark.skipif(MPI is None, reason="need mpi4py (& MPI)")
class TestCommunicator:
@pytest.mark.parametrize(
"communicator_args", (
(cusv.CommunicatorType.MPICH, 'libmpi.so'), # see NVIDIA/cuQuantum#31
(cusv.CommunicatorType.OPENMPI, ''),
# TODO: can we use cffi to generate the wrapper lib on the fly?
(cusv.CommunicatorType.EXTERNAL, ''),
)
)
def test_communicator(self, handle, communicator_args):
if communicator_args[0] == cusv.CommunicatorType.MPICH:
vendor = "MPICH"
elif communicator_args[0] == cusv.CommunicatorType.OPENMPI:
vendor = "Open MPI"
else:
vendor = "n/a"
comm_name, _ = MPI.get_vendor()
if comm_name != vendor:
pytest.skip(f"Using {comm_name}, which mismatches with the "
f"requested MPI implementation ({vendor})")
c = cusv.communicator_create(handle, *communicator_args)
cusv.communicator_destroy(handle, c)
class TestParameters:
def test_parameters(self):
# test constructor
parameters = cusv.SVSwapParameters()
# test getter/setter
parameters.transfer_size = 42
assert parameters.transfer_size == 42
# test accessing internal data (numpy.ndarray)
parameters_arr = parameters.data
assert parameters_arr.ctypes.data == parameters.ptr
# test reading/writing the underlying ndarray of custom dtype
assert parameters_arr.dtype == cusv.sv_swap_parameters_dtype
assert parameters_arr['transfer_size'] == 42
parameters_arr['transfer_size'] = 24
assert parameters_arr['transfer_size'] == 24
assert parameters.transfer_size == 24
# test all struct members
parameters.swap_batch_index == parameters_arr['swap_batch_index']
parameters.org_sub_sv_index == parameters_arr['org_sub_sv_index']
parameters.dst_sub_sv_index == parameters_arr['dst_sub_sv_index']
parameters.org_segment_mask_string == parameters_arr['org_segment_mask_string']
parameters.dst_segment_mask_string == parameters_arr['dst_segment_mask_string']
parameters.segment_mask_ordering == parameters_arr['segment_mask_ordering']
parameters.segment_mask_len == parameters_arr['segment_mask_len']
parameters.n_segment_bits == parameters_arr['n_segment_bits']
parameters.data_transfer_type == parameters_arr['data_transfer_type']
parameters.transfer_size == parameters_arr['transfer_size']
# test alternative constructor & comparison op
new_parameters = cusv.SVSwapParameters.from_data(parameters_arr)
assert parameters.data == new_parameters.data
assert parameters.ptr == new_parameters.ptr
assert parameters == new_parameters
new_parameters_arr = np.empty(
(1,), dtype=cusv.sv_swap_parameters_dtype)
new_parameters_arr['segment_mask_ordering'][:] = 1
new_parameters = cusv.SVSwapParameters.from_data(new_parameters_arr)
assert parameters.data != new_parameters.data
assert parameters.ptr != new_parameters.ptr
assert parameters != new_parameters
# negative tests
parameters_arr = np.empty(
(2,), dtype=cusv.sv_swap_parameters_dtype)
with pytest.raises(ValueError) as e: # wrong size
parameters = cusv.SVSwapParameters.from_data(parameters_arr)
parameters_arr = np.empty(
(1,), dtype=np.float32)
with pytest.raises(ValueError) as e: # wrong dtype
parameters = cusv.SVSwapParameters.from_data(parameters_arr)
parameters_arr = "ABC"
with pytest.raises(ValueError) as e: # wrong type
parameters = cusv.SVSwapParameters.from_data(parameters_arr)
class TestWorker:
event = cp.cuda.Event()
stream = cp.cuda.Stream()
sv = cp.zeros((2**4,), dtype=cp.complex64)
@pytest.mark.parametrize(
"worker_args", ((sv.data.ptr, 0, event.ptr, cudaDataType.CUDA_C_32F, stream.ptr),)
)
@pytest.mark.parametrize(
'input_form', (
{'sv': (np.intp, 'int'), 'indices': (np.int32, 'int'),
'event': (np.intp, 'int')},
{'sv': (np.intp, 'seq'), 'indices': (np.int32, 'seq'),
'event': (np.intp, 'seq')},
)
)
@pytest.mark.parametrize(
'param_form', ('class', 'ndarray', 'int')
)
def test_worker(self, handle, worker_args, input_form, param_form):
worker, extra_size, min_size = cusv.sv_swap_worker_create(
handle,
0, # set the communicator to null, assuming single process
*worker_args)
extra_space = cp.cuda.alloc(extra_size)
cusv.sv_swap_worker_set_extra_workspace(
handle, worker, extra_space.ptr, extra_size)
transfer_space = cp.cuda.alloc(min_size)
cusv.sv_swap_worker_set_transfer_workspace(
handle, worker, transfer_space.ptr, min_size)
sv = [self.sv.data.ptr]
if input_form['sv'][1] == 'int':
sv_data = np.asarray(
sv, dtype=input_form['sv'][0])
sv = sv_data.ctypes.data
indices = [1]
if input_form['indices'][1] == 'int':
indices_data = np.asarray(
indices, dtype=input_form['indices'][0])
indices = indices_data.ctypes.data
dummy = cp.cuda.Event()
event = [dummy.ptr]
if input_form['event'][1] == 'int':
event_data = np.asarray(
event, dtype=input_form['event'][0])
event = event_data.ctypes.data
cusv.sv_swap_worker_set_sub_svs_p2p(
handle, worker,
sv, indices, event, 1)
parameters_data = cusv.SVSwapParameters()
parameters_data.swap_batch_index = 0
parameters_data.org_sub_sv_index = 0
parameters_data.dst_sub_sv_index = 1
parameters_data.n_segment_bits = 0
parameters_data.transfer_size = 1
parameters_data.data_transfer_type = cusv.DataTransferType.NONE
parameters_data.segment_mask_len = 0
if param_form == "class":
parameters = parameters_data
elif param_form == "ndarray":
parameters = parameters_data.data
elif param_form == "int":
parameters = parameters_data.ptr
cusv.sv_swap_worker_set_parameters(
handle, worker, parameters, 1)
cusv.sv_swap_worker_execute(
handle, worker, 0, 0)
cusv.sv_swap_worker_destroy(handle, worker)
class TestScheduler:
@pytest.mark.parametrize(
"scheduler_args", ((1, 1),),
)
@pytest.mark.parametrize(
'input_form', (
{'swapped_bits': (np.int32, 'int'), 'mask': (np.int32, 'int')},
{'swapped_bits': (np.int32, 'seq'), 'mask': (np.int32, 'seq')},
)
)
@pytest.mark.parametrize(
'param_form', (None, 'class', 'ndarray', 'int')
)
def test_scheduler(self, handle, scheduler_args, input_form, param_form):
scheduler = cusv.dist_index_bit_swap_scheduler_create(
handle, *scheduler_args)
swapped_bits = [(0, 1)]
n_swapped_bits = len(swapped_bits)
if input_form['swapped_bits'][1] == 'int':
swapped_bits_data = np.asarray(
swapped_bits, dtype=input_form['swapped_bits'][0])
swapped_bits = swapped_bits_data.ctypes.data
# TODO: test mask
mask_bitstring = []
mask_ordering = []
mask_len = 0
if input_form['mask'][1] == 'int':
mask_bitstring_data = np.asarray(
mask_bitstring, dtype=input_form['mask'][0])
mask_bitstring = mask_bitstring_data.ctypes.data
mask_ordering_data = np.asarray(
mask_ordering, dtype=input_form['mask'][0])
mask_ordering = mask_ordering_data.ctypes.data
n_swap_batches = cusv.dist_index_bit_swap_scheduler_set_index_bit_swaps(
handle, scheduler,
swapped_bits, n_swapped_bits,
mask_bitstring, mask_ordering, mask_len)
if param_form is None:
params_in = None
elif param_form == "class":
params_in = cusv.SVSwapParameters()
elif param_form == "ndarray":
params_in = np.empty((1,), dtype=cusv.sv_swap_parameters_dtype)
elif param_form == "int":
params = np.empty((1,), dtype=cusv.sv_swap_parameters_dtype)
params_in = params.ctypes.data
else:
assert False
params_out = cusv.dist_index_bit_swap_scheduler_get_parameters(
handle, scheduler, 0, 0, params=params_in)
cusv.dist_index_bit_swap_scheduler_destroy(handle, scheduler)
if param_form != "int":
assert isinstance(params_out, cusv.SVSwapParameters)
else:
assert params_out is None
# params_in should be modified in-place
if param_form == "class":
assert id(params_out) == id(params_in)
assert params_out.data == params_in.data
assert params_out.ptr == params_in.ptr
elif param_form == "ndarray":
assert params_out.data == params_in
assert params_out.ptr == params_in.ctypes.data
elif param_form == "int":
# nothing to compare against...
pass
class TestMemHandler(MemHandlerTestBase):
mod = cusv
prefix = "custatevec"
error = cusv.cuStateVecError
# TODO: add more different memory sources
@pytest.mark.parametrize(
'source', (None, "py-callable", 'cffi', 'cffi_struct')
)
def test_set_get_device_mem_handler(self, source, handle):
self._test_set_get_device_mem_handler(source, handle)
class TestLogger(LoggerTestBase):
mod = cusv
prefix = "custatevec"
| cuQuantum-main | python/tests/cuquantum_tests/custatevec_tests/test_custatevec.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
| cuQuantum-main | python/tests/cuquantum_tests/custatevec_tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import dataclasses
import sys
import cupy
import numpy
import pytest
from cuquantum import tensor
from cuquantum.cutensornet._internal.decomposition_utils import DECOMPOSITION_DTYPE_NAMES
from cuquantum.cutensornet._internal.utils import infer_object_package
from .approxTN_utils import tensor_decompose, verify_split_QR, verify_split_SVD
from .data import backend_names, tensor_decomp_expressions
from .test_options import _OptionsBase, TestNetworkOptions
from .test_utils import DecomposeFactory
from .test_utils import deselect_decompose_tests, gen_rand_svd_method
from .test_utils import get_stream_for_backend
@pytest.mark.uncollect_if(func=deselect_decompose_tests)
@pytest.mark.parametrize(
"stream", (None, True)
)
@pytest.mark.parametrize(
"order", ("C", "F")
)
@pytest.mark.parametrize(
"dtype", DECOMPOSITION_DTYPE_NAMES
)
@pytest.mark.parametrize(
"xp", backend_names
)
@pytest.mark.parametrize(
"decompose_expr", tensor_decomp_expressions
)
@pytest.mark.parametrize(
"blocking", (True, "auto")
)
class TestDecompose:
def _run_decompose(
self, decompose_expr, xp, dtype, order, stream, method, **kwargs):
decompose_expr, shapes = copy.deepcopy(decompose_expr)
factory = DecomposeFactory(decompose_expr, shapes=shapes)
operand = factory.generate_operands(factory.input_shapes, xp, dtype, order)[0]
backend = sys.modules[infer_object_package(operand)]
if stream:
stream = get_stream_for_backend(backend)
return_info = kwargs.pop("return_info", False)
outputs = tensor.decompose(decompose_expr,
operand,
method=method,
options={"blocking": kwargs["blocking"]},
stream=stream,
return_info=return_info)
if stream:
stream.synchronize()
if isinstance(method, tensor.QRMethod):
q, r = outputs
assert type(q) is type(r)
assert type(q) is type(operand)
assert verify_split_QR(decompose_expr, operand, q, r, None, None)
elif isinstance(method, tensor.SVDMethod):
svd_kwargs = dataclasses.asdict(method)
outputs_ref = tensor_decompose(decompose_expr, operand, method="svd", return_info=return_info, **svd_kwargs)
if return_info:
u, s, v, info = outputs
u_ref, s_ref, v_ref, info_ref = outputs_ref
assert isinstance(info, tensor.SVDInfo)
info = dataclasses.asdict(info)
else:
u, s, v = outputs
u_ref, s_ref, v_ref = outputs_ref
info = {'algorithm': method.algorithm}
info_ref = None
assert type(u) is type(v)
assert type(u) is type(operand)
if method.partition is None:
assert type(u) is type(s)
else:
assert s is None
assert verify_split_SVD(decompose_expr,
operand,
u, s, v,
u_ref, s_ref, v_ref,
info=info,
info_ref=info_ref,
**svd_kwargs)
def test_qr(self, decompose_expr, xp, dtype, order, stream, blocking):
self._run_decompose(
decompose_expr, xp, dtype, order, stream, tensor.QRMethod(),
blocking=blocking)
@pytest.mark.parametrize(
"return_info", (False, True)
)
def test_svd(
self, decompose_expr, xp, dtype, order, stream, return_info, blocking):
rng = numpy.random.default_rng(2021)
methods = [tensor.SVDMethod()] + [gen_rand_svd_method(rng) for _ in range(10)]
for method in methods:
self._run_decompose(
decompose_expr, xp, dtype, order, stream, method,
blocking=blocking, return_info=return_info)
class TestDecompositionOptions(TestNetworkOptions):
options_type = tensor.DecompositionOptions
class TestSVDMethod(_OptionsBase):
options_type = tensor.SVDMethod
def test_max_extent(self):
self.create_options({'max_extent': 6})
def test_abs_cutoff(self):
self.create_options({'abs_cutoff': 0.2})
def test_rel_cutoff(self):
self.create_options({'rel_cutoff': 0.1})
@pytest.mark.parametrize(
'partition', [None, 'U', 'V', 'UV']
)
def test_partition(self, partition):
self.create_options({'partition': partition})
@pytest.mark.parametrize(
'normalization', [None, 'L1', 'L2', 'LInf']
)
def test_normalization(self, normalization):
self.create_options({'normalization': normalization})
@pytest.mark.parametrize(
'algorithm', ['gesvd', 'gesvdj', 'gesvdp', 'gesvdr']
)
def test_algorithm(self, algorithm):
options = {'algorithm': algorithm}
if algorithm == 'gesvdj':
options['gesvdj_tol'] = 1e-16
options['gesvdj_max_sweeps'] = 80
elif algorithm == 'gesvdr':
options['gesvdr_oversampling'] = 4
options['gesvdr_niters'] = 8
self.create_options(options)
class TestSVDInfo(_OptionsBase):
options_type = tensor.SVDInfo
# All fields are required. Therefore we test them all at once.
@pytest.mark.parametrize(
'algorithm', ['gesvd', 'gesvdj', 'gesvdp', 'gesvdr']
)
def test_svd_info(self, algorithm):
info = {'reduced_extent': 6, 'full_extent': 8, 'discarded_weight': 0.02, 'algorithm': algorithm}
if algorithm == 'gesvdj':
info['gesvdj_sweeps'] = 12
info['gesvdj_residual'] = 1e-12
elif algorithm == 'gesvdp':
info['gesvdp_err_sigma'] = 1e-8
self.create_options(info)
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_tensor.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import re
import sys
import cupy
from cupy.testing import shaped_random
import numpy
try:
import torch
if not torch.cuda.is_available():
raise ImportError
except ImportError:
torch = None
from cuquantum import OptimizerOptions
from cuquantum import tensor
from cuquantum.cutensornet._internal.circuit_converter_utils import EINSUM_SYMBOLS_BASE
from cuquantum.cutensornet._internal.einsum_parser import infer_output_mode_labels
from .data import dtype_names
machine_epsilon_values = [numpy.finfo(dtype).eps for dtype in dtype_names]
rtol_mapper = dict(zip(
dtype_names,
[numpy.sqrt(m_eps) for m_eps in machine_epsilon_values]
))
atol_mapper = dict(zip(
dtype_names,
[10 * m_eps for m_eps in machine_epsilon_values]
))
def set_path_to_optimizer_options(optimizer_opts, path):
if optimizer_opts is None:
optimizer_opts = {"path": path}
elif isinstance(optimizer_opts, dict):
optimizer_opts["path"] = path
else:
assert isinstance(optimizer_opts, OptimizerOptions)
optimizer_opts.path = path
return optimizer_opts
def compute_and_normalize_numpy_path(data, num_operands):
try:
# this can fail if the TN is too large (ex: containing unicode)
path, _ = numpy.einsum_path(*data, optimize=True)
except:
raise NotImplementedError
path = path[1:]
# now we need to normalize the NumPy path, because NumPy supports
# contracting a group of tensors at once whereas we only support
# pairwise contraction
num_operands -= 1
norm_path = []
for indices in path:
assert all(idx >= 0 for idx in indices)
if len(indices) >= 2:
indices = sorted(indices, reverse=True)
norm_path.append((indices[0], indices[1]))
num_operands -= 1
for idx in indices[2:]:
# keep contracting with the latest intermediate
norm_path.append((num_operands, idx))
num_operands -= 1
else:
# single TN reduction is supported by NumPy, but we can't handle
# that, just raise to avoid testing against NumPy path
assert len(indices) > 0
raise NotImplementedError
return norm_path
def convert_linear_to_ssa(path):
n_inputs = len(path)+1
remaining = [*range(n_inputs)]
ssa_path = []
counter = n_inputs
for first, second in path:
idx1 = remaining[first]
idx2 = remaining[second]
ssa_path.append((idx1, idx2))
remaining.remove(idx1)
remaining.remove(idx2)
remaining.append(counter)
counter += 1
return ssa_path
def check_ellipsis(modes):
# find ellipsis, record the position, remove it, and modify the modes
if isinstance(modes, str):
ellipsis = modes.find("...")
if ellipsis >= 0:
modes = modes.replace("...", "")
else:
try:
ellipsis = modes.index(Ellipsis)
except ValueError:
ellipsis = -1
if ellipsis >= 0:
modes = modes[:ellipsis] + modes[ellipsis+1:]
return ellipsis, modes
def check_intermediate_modes(
intermediate_modes, input_modes, output_modes, path):
# remove ellipsis, if any, since it's singleton
input_modes = list(map(
lambda modes: (lambda modes: check_ellipsis(modes))(modes)[1],
input_modes
))
_, output_modes = check_ellipsis(output_modes)
# peek at the very first element
if (isinstance(intermediate_modes[0], tuple)
and isinstance(intermediate_modes[0][0], str)):
# this is our internal mode label for ellipsis
custom_label = re.compile(r'\b__\d+__\b')
intermediate_modes = list(map(
lambda modes: list(filter(lambda mode: not custom_label.match(mode), modes)),
intermediate_modes
))
ssa_path = convert_linear_to_ssa(path)
contraction_list = input_modes
contraction_list += intermediate_modes
for k, (i, j) in enumerate(ssa_path):
modesA = set(contraction_list[i])
modesB = set(contraction_list[j])
modesOut = set(intermediate_modes[k])
assert modesOut.issubset(modesA.union(modesB))
assert set(output_modes) == set(intermediate_modes[-1])
class ExpressionFactory:
"""Take a valid einsum expression and compute shapes, modes, etc for testing."""
size_dict = dict(zip(EINSUM_SYMBOLS_BASE, (2, 3, 4)*18))
def __init__(self, expression):
self.expr = expression
if isinstance(expression, str):
self.expr_format = "subscript"
elif isinstance(expression, tuple):
self.expr_format = "interleaved"
else:
assert False
self._modes = None
self._num_inputs = 0
self._num_outputs = 0
def _gen_shape(self, modes):
shape = []
# find ellipsis, record the position, and remove it
ellipsis, modes = check_ellipsis(modes)
# generate extents for remaining modes
for mode in modes:
if mode in self.size_dict:
extent = self.size_dict[mode]
else:
# exotic mode label, let's assign an extent to it
if isinstance(mode, str):
extent = ord(mode) % 3 + 2
else:
extent = abs(hash(mode)) % 3 + 2
self.size_dict[mode] = extent
shape.append(extent)
# put back ellipsis, assuming it has single axis of extent 5
if ellipsis >= 0:
shape.insert(ellipsis, 5)
return shape
@property
def num_inputs(self):
return self._num_inputs
@property
def num_outputs(self):
return self._num_outputs
@property
def input_shapes(self):
out = []
for modes in self.input_modes:
shape = self._gen_shape(modes)
out.append(shape)
return out
@property
def output_shape(self):
raise NotImplementedError # TODO
@property
def modes(self):
raise NotImplementedError
@property
def input_modes(self):
return self.modes[:self.num_inputs]
@property
def output_modes(self):
return self.modes[self.num_inputs:]
def generate_operands(self, shapes, xp, dtype, order):
# we always generate data from shaped_random as CuPy fixes
# the RNG seed for us
if xp == "torch-cpu":
_xp = numpy
elif xp == "torch-gpu":
_xp = cupy
else:
_xp = sys.modules[xp]
operands = [
shaped_random(shape, xp=_xp, dtype=dtype, order=order)
for shape in shapes
]
if xp == "torch-cpu":
operands = [torch.as_tensor(op, device="cpu") for op in operands]
elif xp == "torch-gpu":
operands = [torch.as_tensor(op, device="cuda") for op in operands]
return operands
class EinsumFactory(ExpressionFactory):
"""Take a valid einsum expression and compute shapes, modes, etc for testing."""
@property
def modes(self):
if self._modes is None:
if self.expr_format == "subscript":
if "->" in self.expr:
inputs, output = self.expr.split("->")
inputs = inputs.split(",")
else:
inputs = self.expr.split(",")
output = infer_output_mode_labels(inputs)
else:
# output could be a placeholder
inputs = self.expr[:-1]
if self.expr[-1] is None:
output = infer_output_mode_labels(inputs)
else:
output = self.expr[-1]
self._num_inputs = len(inputs)
self._num_outputs = 1
self._modes = tuple(inputs) + tuple([output])
return self._modes
def convert_by_format(self, operands, *, dummy=False):
if dummy:
# create dummy NumPy arrays to bypass the __array_function__
# dispatcher, see numpy/numpy#21379 for discussion
operands = [numpy.broadcast_to(0, arr.shape) for arr in operands]
if self.expr_format == "subscript":
data = [self.expr, *operands]
elif self.expr_format == "interleaved":
modes = [tuple(modes) for modes in self.input_modes]
data = [i for pair in zip(operands, modes) for i in pair]
data.append(tuple(self.output_modes[0]))
return data
class DecomposeFactory(ExpressionFactory):
def __init__(self, expression, *, shapes=None):
super().__init__(expression)
if shapes is not None:
# overwrite the base class's dict
inputs, _ = self.expr.split("->")
inputs = inputs.split(",")
self.size_dict = dict((m, e) for k, v in zip(inputs, shapes) for m, e in zip(k, v))
@property
def modes(self):
if self._modes is None:
if self.expr_format == "subscript":
if "->" in self.expr:
inputs, outputs = self.expr.split("->")
inputs = inputs.split(",")
outputs = outputs.split(",")
self._num_inputs = len(inputs)
self._num_outputs = len(outputs)
self._modes = tuple(inputs) + tuple(outputs)
else:
raise ValueError("output tensor must be explicitly specified for decomposition")
else:
raise ValueError("decomposition does not support interleave format")
return self._modes
def gen_rand_svd_method(rng):
method = {"max_extent": rng.choice(range(1, 7)),
"abs_cutoff": rng.random() / 2.0, # [0, 0.5)
"rel_cutoff": 0.1 + rng.random() / 2.5 , # [0.1, 0.5)
"normalization": rng.choice([None, "L1", "L2", "LInf"]),
"partition": rng.choice([None, "U", "V", "UV"]),
"algorithm": rng.choice(['gesvd', 'gesvdj', 'gesvdp', 'gesvdr'])}
if method["algorithm"] == 'gesvdj':
method["gesvdj_tol"] = rng.choice([0, 1e-14])
method["gesvdj_max_sweeps"] = rng.choice([0, 100])
elif method["algorithm"] == 'gesvdr':
method["gesvdr_niters"] = rng.choice([0, 40])
# we can't set oversampling as it depends on matrix size here
return tensor.SVDMethod(**method)
# We want to avoid fragmenting the stream-ordered mempools
_predefined_streams = {
numpy: cupy.cuda.Stream(), # implementation detail
cupy: cupy.cuda.Stream(),
}
if torch is not None:
_predefined_streams[torch] = torch.cuda.Stream()
def get_stream_for_backend(backend):
return _predefined_streams[backend]
# We use the pytest marker hook to deselect/ignore collected tests
# that we do not want to run. This is better than showing a ton of
# tests as "skipped" at the end, since technically they never get
# tested.
#
# Note the arguments here must be named and ordered in exactly the
# same way as the tests being marked by @pytest.mark.uncollect_if().
def deselect_contract_tests(
einsum_expr_pack, xp, dtype, *args, **kwargs):
if xp.startswith('torch') and torch is None:
return True
if xp == 'torch-cpu' and dtype == 'float16':
# float16 only implemented for gpu
return True
if isinstance(einsum_expr_pack, list):
_, _, _, overwrite_dtype = einsum_expr_pack
if dtype != overwrite_dtype:
return True
return False
def deselect_decompose_tests(
decompose_expr, xp, dtype, *args, **kwargs):
if xp.startswith('torch') and torch is None:
return True
return False
def deselect_contract_decompose_algorithm_tests(qr_method, svd_method, *args, **kwargs):
if qr_method is False and svd_method is False: # not a valid algorithm
return True
return False
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import pytest
import cuquantum
from .data import einsum_expressions
from .test_utils import compute_and_normalize_numpy_path
from .test_utils import EinsumFactory
from .test_utils import set_path_to_optimizer_options
@pytest.mark.parametrize(
"einsum_expr_pack", einsum_expressions
)
class TestContractPath:
def _test_runner(
self, func, einsum_expr_pack, use_numpy_path, **kwargs):
einsum_expr = copy.deepcopy(einsum_expr_pack)
if isinstance(einsum_expr_pack, list):
einsum_expr, network_opts, optimizer_opts, overwrite_dtype = einsum_expr
dtype = overwrite_dtype
else:
network_opts = optimizer_opts = None
dtype = "float32"
assert isinstance(einsum_expr, (str, tuple))
factory = EinsumFactory(einsum_expr)
# backend/dtype/order do not matter, so we just pick one here
operands = factory.generate_operands(
factory.input_shapes, "cupy", dtype, "C")
path = None
if use_numpy_path:
try:
path = compute_and_normalize_numpy_path(
factory.convert_by_format(operands, dummy=True),
len(operands))
except NotImplementedError:
# we can't support the returned NumPy path, just skip
pytest.skip("NumPy path is either not found or invalid")
data = factory.convert_by_format(operands)
if func is cuquantum.contract_path:
if path is not None:
optimizer_opts = set_path_to_optimizer_options(
optimizer_opts, path)
path, info = func(
*data, options=network_opts, optimize=optimizer_opts)
assert isinstance(path, list)
assert isinstance(info, cuquantum.OptimizerInfo)
else: # cuquantum.einsum_path()
optimize = kwargs.pop('optimize')
assert optimize == True
path, info = func(*data, optimize=optimize)
assert path[0] == "einsum_path"
path = path[1:]
# sanity checks; the correctness checks are done in the contract() tests
assert len(path) == len(operands)-1
operand_ids = list(range(len(operands)))
for i, j in path:
op_i, op_j = operand_ids[i], operand_ids[j]
operand_ids.remove(op_i)
operand_ids.remove(op_j)
operand_ids.append(-1) # placeholder for intermediate
# all input tensors are contracted
assert len(operand_ids) == 1
assert operand_ids[0] == -1
@pytest.mark.parametrize(
"use_numpy_path", (False, True)
)
def test_contract_path(self, einsum_expr_pack, use_numpy_path):
self._test_runner(
cuquantum.contract_path, einsum_expr_pack, use_numpy_path)
def test_einsum_path(self, einsum_expr_pack):
# We only support optimize=True and don't allow setting the path
self._test_runner(
cuquantum.einsum_path, einsum_expr_pack, False, optimize=True)
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_contract_path.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import logging
import pytest
from cuquantum import cutensornet as cutn
from cuquantum import BaseCUDAMemoryManager
from cuquantum import ComputeType
from cuquantum import MemoryPointer
from cuquantum import NetworkOptions
from cuquantum import OptimizerInfo
from cuquantum import OptimizerOptions
from cuquantum import PathFinderOptions
from cuquantum import ReconfigOptions
from cuquantum import SlicerOptions
class _OptionsBase:
__slots__ = ('options_type',)
def create_options(self, options):
return self.options_type(**options)
class TestNetworkOptions(_OptionsBase):
options_type = NetworkOptions
@pytest.mark.parametrize(
'compute_type', [t for t in ComputeType]
)
def test_compute_type(self, compute_type):
self.create_options({'compute_type': compute_type})
def test_device_id(self):
self.create_options({'device_id': 0})
def test_handle(self):
handle = 10000
self.create_options({'handle': handle})
def test_logger(self):
self.create_options({"logger": logging.getLogger()})
@pytest.mark.parametrize(
'memory_limit', (int(1e8), "100 MiB", "80%")
)
def test_memory_limit(self, memory_limit):
self.create_options({'memory_limit': memory_limit})
# since BaseCUDAMemoryManager is a protocol, as long as the method
# is there it doesn't matter if it's used as the base class or not
@pytest.mark.parametrize(
"base", (object, BaseCUDAMemoryManager)
)
def test_allocator(self, base):
class MyAllocator(base):
def memalloc(self, size):
return MemoryPointer(0, size, None)
allocator = MyAllocator()
self.create_options({'allocator': allocator})
class TestOptimizerOptions(_OptionsBase):
options_type = OptimizerOptions
def test_samples(self):
self.create_options({'samples': 100})
def test_threads(self):
self.create_options({'threads': 8})
def test_path(self):
self.create_options({'path': {"num_partitions": 100}})
self.create_options({
'path': PathFinderOptions(**{"num_partitions": 100}),
})
def test_slicing(self):
self.create_options({'slicing': {"disable_slicing": 1}})
self.create_options({
'slicing': SlicerOptions(**{"disable_slicing": 1}),
})
def test_reconfiguration(self):
self.create_options({'reconfiguration': {"num_leaves": 100}})
self.create_options({
'reconfiguration': ReconfigOptions(**{"num_leaves": 100}),
})
def test_seed(self):
self.create_options({'seed': 100})
class TestOptimizerInfo(_OptionsBase):
options_type = OptimizerInfo
# All fields in OptimizerInfo are required, so we must test
# them at once
def test_optimizer_info(self):
self.create_options({
"largest_intermediate": 100.0,
"opt_cost": 100.0,
"path": [(0, 1), (0, 1)],
"slices": [("a", 4), ("b", 3)],
"num_slices": 10,
"intermediate_modes": [(1, 3), (2, 4)],
})
class TestPathFinderOptions(_OptionsBase):
options_type = PathFinderOptions
def test_num_partitions(self):
self.create_options({"num_partitions": 100})
def test_cutoff_size(self):
self.create_options({"cutoff_size": 100})
@pytest.mark.parametrize(
"algorithm", [algo for algo in cutn.GraphAlgo]
)
def test_algorithm(self, algorithm):
self.create_options({"algorithm": algorithm})
def test_imbalance_factor(self):
self.create_options({"imbalance_factor": 1000})
def test_num_iterations(self):
self.create_options({"num_iterations": 100})
def test_num_cuts(self):
self.create_options({"num_cuts": 100})
class TestReconfigOptions(_OptionsBase):
options_type = ReconfigOptions
def test_num_iterations(self):
self.create_options({"num_iterations": 100})
def test_num_leaves(self):
self.create_options({"num_leaves": 100})
class TestSlicerOptions(_OptionsBase):
options_type = SlicerOptions
def test_disable_slicing(self):
self.create_options({"disable_slicing": 1})
@pytest.mark.parametrize(
"memory_model", [m for m in cutn.MemoryModel]
)
def test_memory_model(self, memory_model):
self.create_options({"memory_model": memory_model})
def test_memory_factor(self):
self.create_options({"memory_factor": 20})
def test_min_slices(self):
self.create_options({"min_slices": 10})
def test_slice_factor(self):
self.create_options({"slice_factor": 5})
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_options.py |
# Copyright (c) 2022-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import re
import sys
import threading
import cupy as cp
from cupy.cuda.runtime import getDevice, setDevice
import pytest
from cuquantum.cutensornet import _internal
from cuquantum.cutensornet._internal import utils
from cuquantum.utils import WHITESPACE_UNICODE
class TestDeviceCtx:
@pytest.mark.skipif(
cp.cuda.runtime.getDeviceCount() < 2, reason='not enough GPUs')
def test_device_ctx(self):
assert getDevice() == 0
with utils.device_ctx(0):
assert getDevice() == 0
with utils.device_ctx(1):
assert getDevice() == 1
with utils.device_ctx(0):
assert getDevice() == 0
assert getDevice() == 1
assert getDevice() == 0
assert getDevice() == 0
with utils.device_ctx(1):
assert getDevice() == 1
setDevice(0)
with utils.device_ctx(1):
assert getDevice() == 1
assert getDevice() == 0
assert getDevice() == 0
@pytest.mark.skipif(
cp.cuda.runtime.getDeviceCount() < 2, reason='not enough GPUs')
def test_thread_safe(self):
# adopted from https://github.com/cupy/cupy/blob/master/tests/cupy_tests/cuda_tests/test_device.py
# recall that the CUDA context is maintained per-thread, so when each thread
# starts it is on the default device (=device 0).
t0_setup = threading.Event()
t1_setup = threading.Event()
t0_first_exit = threading.Event()
t0_exit_device = []
t1_exit_device = []
def t0_seq():
with utils.device_ctx(0):
with utils.device_ctx(1):
t0_setup.set()
t1_setup.wait()
t0_exit_device.append(getDevice())
t0_exit_device.append(getDevice())
t0_first_exit.set()
assert getDevice() == 0
def t1_seq():
t0_setup.wait()
with utils.device_ctx(1):
with utils.device_ctx(0):
t1_setup.set()
t0_first_exit.wait()
t1_exit_device.append(getDevice())
t1_exit_device.append(getDevice())
assert getDevice() == 0
try:
cp.cuda.runtime.setDevice(1)
t0 = threading.Thread(target=t0_seq)
t1 = threading.Thread(target=t1_seq)
t1.start()
t0.start()
t0.join()
t1.join()
assert t0_exit_device == [1, 0]
assert t1_exit_device == [0, 1]
finally:
cp.cuda.runtime.setDevice(0)
def test_one_shot(self):
dev = utils.device_ctx(0)
with dev:
pass
# CPython raises AttributeError, but we should not care here
with pytest.raises(Exception):
with dev:
pass
class TestGetSymbol:
def test_no_whitespace(self):
# Note: max(whitespace_s) = 12288
out = []
for i in range(0, 30000):
s = _internal.circuit_converter_utils._get_symbol(i)
assert not s.isspace()
out.append(s)
# check the mapping is unique
assert len(set(out)) == 30000
def test_whitespace_unicode_consistency(self):
all_s = ''.join(chr(c) for c in range(sys.maxunicode+1))
whitespace_s = ''.join(re.findall(r'\s', all_s))
assert WHITESPACE_UNICODE == whitespace_s
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_internal.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
try:
import torch
if not torch.cuda.is_available():
raise ImportError
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
except ImportError:
pass
# This is future proof: In the future when CuPy enables cuQuantum Python
# as an optional backend, we don't want to create a circular dependency
# that ultimately tests against ourselves. Here we enable CUB as the only
# optinaly backend and exclude cuTENSOR/cuQuantum Python/etc, using CuPy's
# private API (for development/testing).
cp._core.set_reduction_accelerators(['cub'])
cp._core.set_routine_accelerators(['cub'])
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/__init__.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import pytest
from .circuit_utils import backends
from .circuit_utils import cirq_circuits, CirqTester
from .circuit_utils import qiskit_circuits, QiskitTester
class TestCircuitToEinsum:
# If PyTorch/Qiskit/Cirq is not installed, the corresponding tests are silently
# skipped.
@pytest.mark.parametrize("circuit", cirq_circuits)
@pytest.mark.parametrize("dtype", ('complex64', 'complex128',))
@pytest.mark.parametrize("backend", backends)
def test_cirq(self, circuit, dtype, backend, nsample=3, nsite_max=3, nfix_max=3):
cirq_tests = CirqTester(circuit, dtype, backend, nsample, nsite_max, nfix_max)
cirq_tests.run_tests()
@pytest.mark.parametrize("circuit", qiskit_circuits)
@pytest.mark.parametrize("dtype", ('complex64', 'complex128',))
@pytest.mark.parametrize("backend", backends)
def test_qiskit(self, circuit, dtype, backend, nsample=3, nsite_max=3, nfix_max=3):
qiskit_tests = QiskitTester(circuit, dtype, backend, nsample, nsite_max, nfix_max)
qiskit_tests.run_tests()
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_circuit_converter.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import copy
import sys
import cupy
import numpy
import opt_einsum
import pytest
import cuquantum
from cuquantum import cutensornet as cutn
from cuquantum.cutensornet._internal.utils import infer_object_package
from .data import backend_names, dtype_names, einsum_expressions
from .test_utils import atol_mapper, EinsumFactory, rtol_mapper
from .test_utils import compute_and_normalize_numpy_path
from .test_utils import deselect_contract_tests
from .test_utils import get_stream_for_backend
from .test_utils import set_path_to_optimizer_options
# TODO: parametrize compute type?
@pytest.mark.uncollect_if(func=deselect_contract_tests)
@pytest.mark.parametrize(
"use_numpy_path", (False, True)
)
@pytest.mark.parametrize(
"stream", (None, True)
)
@pytest.mark.parametrize(
"order", ("C", "F")
)
@pytest.mark.parametrize(
"dtype", dtype_names
)
@pytest.mark.parametrize(
"xp", backend_names
)
@pytest.mark.parametrize(
"einsum_expr_pack", einsum_expressions
)
class TestContract:
def _test_runner(
self, func, einsum_expr_pack, xp, dtype, order,
stream, use_numpy_path, **kwargs):
einsum_expr = copy.deepcopy(einsum_expr_pack)
if isinstance(einsum_expr, list):
einsum_expr, network_opts, optimizer_opts, _ = einsum_expr
else:
network_opts = optimizer_opts = None
assert isinstance(einsum_expr, (str, tuple))
factory = EinsumFactory(einsum_expr)
operands = factory.generate_operands(
factory.input_shapes, xp, dtype, order)
backend = sys.modules[infer_object_package(operands[0])]
if stream:
stream = get_stream_for_backend(backend)
path = None
if use_numpy_path:
try:
path = compute_and_normalize_numpy_path(
factory.convert_by_format(operands, dummy=True),
len(operands))
except NotImplementedError:
# we can't support the returned NumPy path, just skip
pytest.skip("NumPy path is either not found or invalid")
data = factory.convert_by_format(operands)
if func is cuquantum.contract:
return_info = kwargs.pop('return_info')
if path is not None:
optimizer_opts = set_path_to_optimizer_options(
optimizer_opts, path)
out = func(
*data, options=network_opts, optimize=optimizer_opts,
stream=stream, return_info=return_info)
if return_info:
out, (path, info) = out
assert isinstance(path, list)
assert isinstance(info, cuquantum.OptimizerInfo)
else: # cuquantum.einsum()
optimize = kwargs.pop('optimize')
if optimize == 'path':
optimize = path if path is not None else False
try:
out = func(*data, optimize=optimize)
except cutn.cuTensorNetError as e:
if (optimize is not True
and "CUTENSORNET_STATUS_NOT_SUPPORTED" in str(e)):
pytest.skip("cuquantum.einsum() fail -- TN too large?")
else:
raise
if stream:
stream.synchronize()
backend_out = sys.modules[infer_object_package(out)]
assert backend_out is backend
assert out.dtype == operands[0].dtype
out_ref = opt_einsum.contract(
*data, backend="torch" if "torch" in xp else xp)
assert backend.allclose(
out, out_ref, atol=atol_mapper[dtype], rtol=rtol_mapper[dtype])
@pytest.mark.parametrize(
"return_info", (False, True)
)
def test_contract(
self, einsum_expr_pack, xp, dtype, order,
stream, use_numpy_path, return_info):
self._test_runner(
cuquantum.contract, einsum_expr_pack, xp, dtype, order,
stream, use_numpy_path, return_info=return_info)
@pytest.mark.parametrize(
"optimize", (False, True, "path")
)
def test_einsum(
self, einsum_expr_pack, xp, dtype, order,
stream, use_numpy_path, optimize):
self._test_runner(
cuquantum.einsum, einsum_expr_pack, xp, dtype, order,
stream, use_numpy_path, optimize=optimize)
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_contract.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import functools
import copy
import re
import sys
import cupy
import numpy
import opt_einsum
import pytest
from cuquantum import Network
from cuquantum.cutensornet._internal.utils import infer_object_package
from .data import backend_names, dtype_names, einsum_expressions
from .test_utils import atol_mapper, EinsumFactory, rtol_mapper
from .test_utils import check_intermediate_modes
from .test_utils import compute_and_normalize_numpy_path
from .test_utils import deselect_contract_tests
from .test_utils import get_stream_for_backend
from .test_utils import set_path_to_optimizer_options
# TODO: parametrize compute type?
@pytest.mark.uncollect_if(func=deselect_contract_tests)
@pytest.mark.parametrize(
"use_numpy_path", (False, True)
)
@pytest.mark.parametrize(
"stream", (None, True)
)
@pytest.mark.parametrize(
"autotune", (False, 5)
)
@pytest.mark.parametrize(
"order", ("C", "F")
)
@pytest.mark.parametrize(
"dtype", dtype_names
)
@pytest.mark.parametrize(
"xp", backend_names
)
@pytest.mark.parametrize(
"einsum_expr_pack", einsum_expressions
)
class TestNetwork:
def test_network(
self, einsum_expr_pack, xp, dtype, order, autotune,
stream, use_numpy_path):
einsum_expr = copy.deepcopy(einsum_expr_pack)
if isinstance(einsum_expr, list):
einsum_expr, network_opts, optimizer_opts, _ = einsum_expr
else:
network_opts = optimizer_opts = None
assert isinstance(einsum_expr, (str, tuple))
factory = EinsumFactory(einsum_expr)
operands = factory.generate_operands(
factory.input_shapes, xp, dtype, order)
backend = sys.modules[infer_object_package(operands[0])]
data = factory.convert_by_format(operands)
if stream:
stream = get_stream_for_backend(backend)
tn = Network(*data, options=network_opts)
# We already test tn as a context manager in the samples, so let's test
# explicitly calling tn.free() here.
try:
if not use_numpy_path:
path, info = tn.contract_path(optimize=optimizer_opts)
uninit_f_str = re.compile("{.*}")
assert uninit_f_str.search(str(info)) is None
check_intermediate_modes(
info.intermediate_modes, factory.input_modes,
factory.output_modes[0], path)
else:
try:
path_ref = compute_and_normalize_numpy_path(
factory.convert_by_format(operands, dummy=True),
len(operands))
except NotImplementedError:
# we can't support the returned NumPy path, just skip
pytest.skip("NumPy path is either not found or invalid")
else:
optimizer_opts = set_path_to_optimizer_options(
optimizer_opts, path_ref)
path, _ = tn.contract_path(optimizer_opts)
# round-trip test
# note that within each pair it could have different order
assert all(map(lambda x, y: sorted(x) == sorted(y), path, path_ref))
if autotune:
tn.autotune(iterations=autotune, stream=stream)
# check the result
self._verify_contract(
tn, operands, backend, data, xp, dtype, stream)
# generate new data and bind them to the TN
operands = factory.generate_operands(
factory.input_shapes, xp, dtype, order)
data = factory.convert_by_format(operands)
tn.reset_operands(*operands)
# check the result
self._verify_contract(
tn, operands, backend, data, xp, dtype, stream)
finally:
tn.free()
def _verify_contract(
self, tn, operands, backend, data, xp, dtype, stream):
out = tn.contract(stream=stream)
if stream:
stream.synchronize()
backend_out = sys.modules[infer_object_package(out)]
assert backend_out is backend
assert out.dtype == operands[0].dtype
out_ref = opt_einsum.contract(
*data, backend="torch" if "torch" in xp else xp)
assert backend.allclose(
out, out_ref, atol=atol_mapper[dtype], rtol=rtol_mapper[dtype])
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_network.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from collections import Counter
import itertools
from types import MappingProxyType
try:
import cirq
except ImportError:
cirq = None
import cupy as cp
import numpy as np
import pytest
try:
import torch
if not torch.cuda.is_available():
raise ImportError
except ImportError:
torch = None
try:
import qiskit
except ImportError:
qiskit = None
from cuquantum import contract, CircuitToEinsum
from cuquantum import cutensornet as cutn
from cuquantum.cutensornet._internal import circuit_parser_utils_cirq, circuit_parser_utils_qiskit
from cuquantum.cutensornet._internal.circuit_converter_utils import convert_mode_labels_to_expression
from cuquantum.cutensornet._internal.circuit_converter_utils import EINSUM_SYMBOLS_BASE
from cuquantum.cutensornet._internal.circuit_converter_utils import get_pauli_gates
from cuquantum.cutensornet._internal.circuit_converter_utils import parse_gates_to_mode_labels_operands
from cuquantum.cutensornet._internal.utils import infer_object_package
from .test_utils import atol_mapper, get_stream_for_backend, rtol_mapper
from .test_cutensornet import manage_resource
# note: this implementation would cause pytorch tests being silently skipped
# if pytorch is not available, which is the desired effect since otherwise
# it'd be too noisy
backends = [np, cp]
if torch:
backends.append(torch)
cirq_circuits = []
qiskit_circuits = []
EMPTY_DICT = MappingProxyType(dict())
def gen_qubits_map(qubits):
n_qubits = len(qubits)
if n_qubits > len(EINSUM_SYMBOLS_BASE):
raise NotImplementedError(f'test suite only supports up to {len(EINSUM_SYMBOLS_BASE)} qubits')
qubits_map = dict(zip(qubits, EINSUM_SYMBOLS_BASE[:n_qubits]))
return qubits_map
def bitstring_generator(n_qubits, nsample=1):
for _ in range(nsample):
bitstring = ''.join(np.random.choice(('0', '1'), n_qubits))
yield bitstring
def where_fixed_generator(qubits, nfix_max, nsite_max=None):
indices = np.arange(len(qubits))
for nfix in range(nfix_max):
for _ in range(2):
np.random.shuffle(indices)
fixed_sites = [qubits[indices[ix]] for ix in range(nfix)]
bitstring = ''.join(np.random.choice(('0', '1'), nfix))
fixed = dict(zip(fixed_sites, bitstring))
if nsite_max is None:
yield fixed
else:
for nsite in range(1, nsite_max+1):
where = [qubits[indices[ix]] for ix in range(nfix, nfix+nsite)]
yield where, fixed
def random_pauli_string_generator(n_qubits, num_strings=4):
for _ in range(num_strings):
yield ''.join(np.random.choice(['I','X', 'Y', 'Z'], n_qubits))
def get_partial_indices(qubits, fixed):
partial_indices = [slice(None)] * len(qubits)
index_map = {'0': slice(0, 1),
'1': slice(1, 2)}
for ix, q in enumerate(qubits):
if q in fixed:
partial_indices[ix] = index_map[fixed[q]]
return partial_indices
################################################
# functions to generate cirq.Circuit for testing
################################################
def get_cirq_qft_circuit(n_qubits):
qubits = cirq.LineQubit.range(n_qubits)
qreg = list(qubits)[::-1]
operations = []
while len(qreg) > 0:
q_head = qreg.pop(0)
operations.append(cirq.H(q_head))
for i, qubit in enumerate(qreg):
operations.append((cirq.CZ ** (1 / 2 ** (i + 1)))(qubit, q_head))
circuit = cirq.Circuit(operations)
return circuit
def get_cirq_random_circuit(n_qubits, n_moments, op_density=0.9, seed=3):
return cirq.testing.random_circuit(n_qubits, n_moments, op_density, random_state=seed)
N_QUBITS_RANGE = range(7, 9)
N_MOMENTS_RANGE = DEPTH_RANGE = range(5, 7)
if cirq:
for n_qubits in N_QUBITS_RANGE:
cirq_circuits.append(get_cirq_qft_circuit(n_qubits))
for n_moments in N_MOMENTS_RANGE:
cirq_circuits.append(get_cirq_random_circuit(n_qubits, n_moments))
try:
from cuquantum_benchmarks.frontends.frontend_cirq import Cirq as cuqnt_cirq
from cuquantum_benchmarks.benchmarks import qpe, quantum_volume, qaoa
cirq_generators = [qpe.QPE, quantum_volume.QuantumVolume, qaoa.QAOA]
config = {'measure': True, 'unfold': True, 'p': 4}
for generator in cirq_generators:
for n_qubits in (5, 6):
seq = generator.generateGatesSequence(n_qubits, config)
circuit = cuqnt_cirq(n_qubits, config).generateCircuit(seq)
cirq_circuits.append(circuit)
except:
pass
#########################################################
# functions to generate qiskit.QuantumCircuit for testing
#########################################################
def get_qiskit_qft_circuit(n_qubits):
return qiskit.circuit.library.QFT(n_qubits, do_swaps=False).decompose()
def get_qiskit_random_circuit(n_qubits, depth):
from qiskit.circuit.random import random_circuit
circuit = random_circuit(n_qubits, depth, max_operands=3)
return circuit
def get_qiskit_composite_circuit():
sub_q = qiskit.QuantumRegister(2)
sub_circ = qiskit.QuantumCircuit(sub_q, name='sub_circ')
sub_circ.h(sub_q[0])
sub_circ.crz(1, sub_q[0], sub_q[1])
sub_circ.barrier()
sub_circ.id(sub_q[1])
sub_circ.u(1, 2, -2, sub_q[0])
# Convert to a gate and stick it into an arbitrary place in the bigger circuit
sub_inst = sub_circ.to_instruction()
qr = qiskit.QuantumRegister(3, 'q')
circ = qiskit.QuantumCircuit(qr)
circ.h(qr[0])
circ.cx(qr[0], qr[1])
circ.cx(qr[1], qr[2])
circ.append(sub_inst, [qr[1], qr[2]])
circ.append(sub_inst, [qr[0], qr[2]])
circ.append(sub_inst, [qr[0], qr[1]])
return circ
def get_qiskit_nested_circuit():
qr = qiskit.QuantumRegister(6, 'q')
circ = qiskit.QuantumCircuit(qr)
sub_ins = get_qiskit_composite_circuit().to_instruction()
circ.append(sub_ins, [qr[0], qr[2], qr[4]])
circ.append(sub_ins, [qr[1], qr[3], qr[5]])
circ.cx(qr[0], qr[3])
circ.cx(qr[1], qr[4])
circ.cx(qr[2], qr[5])
return circ
def get_cc_unitary_gate(seed=None):
# random unitary two qubit gate
from qiskit.extensions import UnitaryGate
if seed is None:
seed = 1234
rng = np.random.default_rng(seed)
m = rng.standard_normal(size=(4, 4)) + 1j*rng.standard_normal(size=(4, 4))
q, r = np.linalg.qr(m)
d = np.diag(r)
q *= d/abs(d)
gate = UnitaryGate(q).control(2)
return gate
def get_qiskit_multi_control_circuit():
qubits = qiskit.QuantumRegister(5)
circuit = qiskit.QuantumCircuit(qubits)
for q in qubits:
circuit.h(q)
qs = list(qubits)
# 3 layers of multi-controlled qubits
np.random.seed(0)
for i in range(2):
np.random.shuffle(qs)
ccu_gate = get_cc_unitary_gate(i)
circuit.append(ccu_gate, qs[:4])
for q in qubits:
if i % 2 == 1:
circuit.h(q)
else:
circuit.x(q)
circuit.global_phase = 0.5
circuit.p(0.1, qubits[0])
return circuit
if qiskit:
circuit = get_qiskit_composite_circuit()
qiskit_circuits.append(circuit.copy())
circuit.global_phase = 0.5
qiskit_circuits.append(circuit)
qiskit_circuits.append(get_qiskit_nested_circuit())
qiskit_circuits.append(get_qiskit_multi_control_circuit())
for n_qubits in N_QUBITS_RANGE:
qiskit_circuits.append(get_qiskit_qft_circuit(n_qubits))
for depth in DEPTH_RANGE:
qiskit_circuits.append(get_qiskit_random_circuit(n_qubits, depth))
try:
from cuquantum_benchmarks.frontends.frontend_qiskit import Qiskit as cuqnt_qiskit
from cuquantum_benchmarks.benchmarks import qpe, quantum_volume, qaoa
qiskit_generators = [qpe.QPE, quantum_volume.QuantumVolume, qaoa.QAOA]
config = {'measure': True, 'unfold': True, 'p': 4}
for generator in qiskit_generators:
for n_qubits in (5, 6):
seq = generator.generateGatesSequence(n_qubits, config)
circuit = cuqnt_qiskit(n_qubits, config).generateCircuit(seq)
qiskit_circuits.append(circuit)
except:
pass
def compute_histogram_overlap(hist1, hist2, nshots):
# assuming hist1 & hist2 have the same sample size (=nshots)
overlap = 0
for val, count in hist1.items():
if val not in hist2:
continue
overlap += min(hist1[val], hist2[val])
overlap /= nshots
return overlap
###################################################################
#
# Simulator APIs inside cirq and qiskit may be subject to change.
# Version tests are needed. In cases where simulator API changes,
# the implementatitons to be modified are:
# `CirqTest._get_state_vector_from_simulator` and
# `QiskitTest._get_state_vector_from_simulator`
#
###################################################################
class BaseTester:
def __init__(self, circuit, dtype, backend, nsample, nsite_max, nfix_max, nshots=5000, seed=1024):
self.circuit = circuit
self.converter = CircuitToEinsum(circuit, dtype=dtype, backend=backend)
self.backend = backend
self.qubits = list(self.converter.qubits)
self.n_qubits = self.converter.n_qubits
self.dtype = dtype
self.sv = None
self.nsample = nsample
self.nsite_max = max(1, min(nsite_max, self.n_qubits-1))
self.nfix_max = max(min(nfix_max, self.n_qubits-nsite_max-1), 0)
self.nshots = nshots
self.seed = seed
self.state_purity = cutn.StatePurity.PURE
self.state_prepared = False
def get_state_vector_from_simulator(self):
if self.sv is None:
self.sv = self._get_state_vector_from_simulator()
return self.sv
def get_amplitude_from_simulator(self, bitstring):
sv = self.get_state_vector_from_simulator()
index = [int(ibit) for ibit in bitstring]
return sv[tuple(index)]
def get_batched_amplitudes_from_simulator(self, fixed):
sv = self.get_state_vector_from_simulator()
partial_indices = get_partial_indices(self.qubits, fixed)
batched_amplitudes = sv[tuple(partial_indices)]
return batched_amplitudes.reshape((2,)*(self.n_qubits-len(fixed)))
def get_reduced_density_matrix_from_simulator(self, where, fixed=EMPTY_DICT):
r"""
For where = (a, b), reduced density matrix is formulated as:
:math: `rho_{a,b,a^{\prime},b^{\prime}} = \sum_{c,d,e,...} SV^{\star}_{a^{\prime}, b^{\prime}, c, d, e, ...} SV_{a, b, c, d, e, ...}`
"""
sv = self.get_state_vector_from_simulator()
partial_indices = get_partial_indices(self.qubits, fixed)
sv = sv[tuple(partial_indices)]
qubits_map = gen_qubits_map(self.qubits)
output_inds = ''.join([qubits_map[q] for q in where])
output_inds += output_inds.upper()
left_inds = ''.join([qubits_map[q] for q in self.qubits])
right_inds = ''
for q in self.qubits:
if q in where:
right_inds += qubits_map[q].upper()
else:
right_inds += qubits_map[q]
expression = left_inds + ',' + right_inds + '->' + output_inds
if self.backend is torch:
rdm = contract(expression, sv, sv.conj().resolve_conj())
else:
rdm = contract(expression, sv, sv.conj())
return rdm
def get_expectation_from_sv(self, pauli_string):
input_mode_labels = [[*range(self.n_qubits)]]
qubits_frontier = dict(zip(self.qubits, itertools.count()))
next_frontier = max(qubits_frontier.values()) + 1
pauli_map = dict(zip(self.qubits, pauli_string))
dtype = getattr(self.backend, self.dtype)
pauli_gates = get_pauli_gates(pauli_map, dtype=dtype, backend=self.backend)
gate_mode_labels, gate_operands = parse_gates_to_mode_labels_operands(pauli_gates,
qubits_frontier,
next_frontier)
mode_labels = input_mode_labels + gate_mode_labels + [[qubits_frontier[ix] for ix in self.qubits]]
output_mode_labels = []
expression = convert_mode_labels_to_expression(mode_labels, output_mode_labels)
sv = self.get_state_vector_from_simulator()
if self.backend is torch:
operands = [sv] + gate_operands + [sv.conj().resolve_conj()]
else:
operands = [sv] + gate_operands + [sv.conj()]
expec = contract(expression, *operands)
return expec
def _get_state_vector_from_simulator(self):
raise NotImplementedError
def _get_sampling_from_simulator(self, qubits_to_sample=None, seed=None):
raise NotImplementedError
def get_sampling_from_sv(self, qubits_to_sample=None, seed=None):
sv = self.get_state_vector_from_simulator()
p = abs(sv) ** 2
# convert p to double type in case probs does not add up to 1
if self.backend is np:
p = p.astype('float64')
elif self.backend is cp:
p = cp.asnumpy(p).astype('float64')
elif self.backend is torch:
if p.device.type == 'cpu':
p = p.numpy().astype('float64')
else:
p = p.cpu().numpy().astype('float64')
if qubits_to_sample is not None:
sorted_qubits_to_sample = [q for q in self.qubits if q in qubits_to_sample]
axis = [i for (i, q) in enumerate(self.qubits) if q not in qubits_to_sample]
if axis:
p = p.sum(tuple(axis))
# potential transpose to match the order of qubits_to_sample
transpose_order = [sorted_qubits_to_sample.index(q) for q in qubits_to_sample]
p = p.transpose(*transpose_order)
# normalize
p /= p.sum()
if seed is not None:
np.random.seed(seed)
samples = np.random.choice(np.arange(p.size), p=p.flat, size=self.nshots)
hist_sv = np.unique(samples, return_counts=True)
return dict(zip(*hist_sv))
def maybe_prepare_state(self):
if not self.state_prepared:
if not hasattr(self, 'state'):
raise RuntimeError("state not initialized")
if self.backend is not cp:
raise RuntimeError("This func is only expected to be executed for cupy backend")
gates = self.converter.gates
immutable = 0
adjoint = 0
unitary = 1 # assuming all gates unitary
self.operands = []
for (operand, qubits) in gates:
n_state_modes = len(qubits)
state_modes = [self.qubits.index(q) for q in qubits]
# keep operand alive otherwise cupy will re-use the memory space
operand = operand.T.astype(operand.dtype, order=np.random.choice(['C', 'F']))
self.operands.append(operand)
tensor_mode_strides = [stride_in_bytes//operand.itemsize for stride_in_bytes in operand.strides]
update_tensor = np.random.choice([True, False], p=[0.1, 0.9])
if update_tensor:
tmp = cp.empty_like(operand)
tensor_id = cutn.state_apply_tensor(self.handle, self.state, n_state_modes,
state_modes, tmp.data.ptr, tensor_mode_strides,
immutable, adjoint, unitary)
cutn.state_update_tensor(self.handle, self.state, tensor_id, operand.data.ptr, unitary)
else:
cutn.state_apply_tensor(self.handle, self.state, n_state_modes,
state_modes, operand.data.ptr, tensor_mode_strides,
immutable, adjoint, unitary)
self.state_prepared = True
def _run_cutensornet_sampling_marginal(self, task, create_args, execute_args, stream):
self.maybe_prepare_state()
if task == 'marginal':
create_func = cutn.create_marginal
configure_func = cutn.marginal_configure
hyper_sample_attr = cutn.MarginalAttribute.OPT_NUM_HYPER_SAMPLES
num_hyper_samples_dtype = cutn.marginal_get_attribute_dtype(hyper_sample_attr)
prepare_func = cutn.marginal_prepare
execute_func = cutn.marginal_compute
destroy_func = cutn.destroy_marginal
elif task == 'sampler':
create_func = cutn.create_sampler
configure_func = cutn.sampler_configure
hyper_sample_attr = cutn.SamplerAttribute.OPT_NUM_HYPER_SAMPLES
num_hyper_samples_dtype = cutn.sampler_get_attribute_dtype(hyper_sample_attr)
prepare_func = cutn.sampler_prepare
execute_func = cutn.sampler_sample
destroy_func = cutn.destroy_sampler
else:
raise ValueError("only supports marginal and sampler")
dev = cp.cuda.Device()
free_mem = dev.mem_info[0]
scratch_size = free_mem // 2 # maximal usage of 50% device memory
task_obj = create_func(self.handle, self.state, *create_args)
num_hyper_samples = np.asarray(8, dtype=num_hyper_samples_dtype)
configure_func(self.handle, task_obj, hyper_sample_attr,
num_hyper_samples.ctypes.data, num_hyper_samples.dtype.itemsize)
prepare_func(self.handle, task_obj, scratch_size, self.workspace, stream.ptr) # similar args for marginal and sampler
workspace_size_d = cutn.workspace_get_memory_size(self.handle,
self.workspace, cutn.WorksizePref.RECOMMENDED, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if workspace_size_d >= scratch_size:
destroy_func(task_obj)
return None
scratch_space = cp.cuda.alloc(workspace_size_d)
cutn.workspace_set_memory(self.handle,
self.workspace, cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH, scratch_space.ptr, workspace_size_d)
execute_func(self.handle, task_obj, *execute_args, stream.ptr)
stream.synchronize()
destroy_func(task_obj)
return True
def get_reduced_density_matrix_from_cutn(self, where, fixed=EMPTY_DICT):
n_marginal_modes = len(where)
marginal_modes = [self.qubits.index(q) for q in where]
if fixed:
n_projected_modes = len(fixed)
projected_modes = []
projected_mode_values = []
for q, bit in fixed.items():
projected_modes.append(self.qubits.index(q))
projected_mode_values.append(int(bit))
else:
n_projected_modes = projected_modes = projected_mode_values = 0
rdm = cp.empty((2,2)*n_marginal_modes, dtype=self.dtype, order=np.random.choice(['C', 'F']))
rdm_strides = [s // rdm.itemsize for s in rdm.strides]
stream = cp.cuda.get_current_stream()
create_args = (n_marginal_modes, marginal_modes, n_projected_modes, projected_modes, rdm_strides)
execute_args = (projected_mode_values, self.workspace, rdm.data.ptr)
if self._run_cutensornet_sampling_marginal('marginal', create_args, execute_args, stream):
return rdm
else:
return None
def get_sampling_from_cutensornet(self, qubits_to_sample=None, seed=None):
if qubits_to_sample is None:
qubits_to_sample = self.qubits
n_modes_to_sample = len(qubits_to_sample)
modes_to_sample = [self.qubits.index(q) for q in qubits_to_sample]
samples = np.empty((self.nshots, n_modes_to_sample), dtype='int64', order='C') # equivalent to (n_modes, nshots) in F order
stream = cp.cuda.get_current_stream()
create_args = (n_modes_to_sample, modes_to_sample)
execute_args = (self.nshots, self.workspace, samples.ctypes.data)
if self._run_cutensornet_sampling_marginal('sampler', create_args, execute_args, stream):
sampling = {}
for bitstring, n_sampling in zip(*np.unique(samples, axis=0, return_counts=True)):
bitstring = np.array2string(bitstring, separator='')[1:-1]
sampling[int(bitstring, 2)] = n_sampling
return sampling
else:
return None
def test_qubits(self):
assert len(self.qubits) == self.n_qubits
def test_gates(self):
for (gate_operand, qubits) in self.converter.gates:
assert gate_operand.ndim == len(qubits) * 2
assert infer_object_package(gate_operand) == self.backend.__name__
def test_state_vector(self):
expression, operands = self.converter.state_vector()
sv1 = contract(expression, *operands)
sv2 = self.get_state_vector_from_simulator()
assert self.backend.allclose(
sv1, sv2, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
def test_amplitude(self):
for bitstring in bitstring_generator(self.n_qubits, self.nsample):
expression, operands = self.converter.amplitude(bitstring)
amp1 = contract(expression, *operands)
amp2 = self.get_amplitude_from_simulator(bitstring)
assert self.backend.allclose(
amp1, amp2, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
def test_batched_amplitudes(self):
for fixed in where_fixed_generator(self.qubits, self.nfix_max):
expression, operands = self.converter.batched_amplitudes(fixed)
batched_amps1 = contract(expression, *operands)
batched_amps2 = self.get_batched_amplitudes_from_simulator(fixed)
assert self.backend.allclose(
batched_amps1, batched_amps2, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
def test_reduced_density_matrix(self):
for where, fixed in where_fixed_generator(self.qubits, self.nfix_max, nsite_max=self.nsite_max):
expression1, operands1 = self.converter.reduced_density_matrix(where, fixed=fixed, lightcone=True)
expression2, operands2 = self.converter.reduced_density_matrix(where, fixed=fixed, lightcone=False)
assert len(operands1) <= len(operands2) + 2 # potential phase handling for qiskit Circuit
rdm1 = contract(expression1, *operands1)
rdm2 = contract(expression2, *operands2)
rdm3 = self.get_reduced_density_matrix_from_simulator(where, fixed=fixed)
assert self.backend.allclose(
rdm1, rdm2, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
assert self.backend.allclose(
rdm1, rdm3, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
if self.backend is cp:
rdm4 = self.get_reduced_density_matrix_from_cutn(where, fixed=fixed)
if rdm4 is not None:
assert self.backend.allclose(
rdm1, rdm4, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
def test_expectation(self):
for pauli_string in random_pauli_string_generator(self.n_qubits, 2):
expression1, operands1 = self.converter.expectation(pauli_string, lightcone=True)
expression2, operands2 = self.converter.expectation(pauli_string, lightcone=False)
assert len(operands1) <= len(operands2) + 2 # potential phase handling for qiskit Circuit
expec1 = contract(expression1, *operands1)
expec2 = contract(expression2, *operands2)
expec3 = self.get_expectation_from_sv(pauli_string)
assert self.backend.allclose(
expec1, expec2, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
assert self.backend.allclose(
expec1, expec3, atol=atol_mapper[self.dtype], rtol=rtol_mapper[self.dtype])
def test_sampling(self):
full_qubits = list(self.qubits)
np.random.shuffle(full_qubits)
selected_qubits = full_qubits[:len(full_qubits)//2]
for qubits_to_sample in (None, selected_qubits):
seed = self.seed
nshots = self.nshots
max_try = 3
overlap_best = 0.
for counter in range(1, max_try+1):
# build a histogram for the reference impl
hist_ref = self._get_sampling_from_simulator(qubits_to_sample=qubits_to_sample, seed=seed)
# do the same for cutensornet sampling
hist_cutn = self.get_sampling_from_cutensornet(qubits_to_sample=qubits_to_sample, seed=seed)
# compute overlap of the histograms (cutn vs ref)
overlap = compute_histogram_overlap(hist_cutn, hist_ref, self.nshots)
if overlap > overlap_best:
overlap_best = overlap
else:
print("WARNING: overlap not improving as nshots increases!")
# do the same for sampling from the (exactly computed) SV
hist_sv = self.get_sampling_from_sv(qubits_to_sample=qubits_to_sample, seed=seed)
# compute overlap of the histograms (sv vs ref)
overlap_check = compute_histogram_overlap(hist_sv, hist_ref, self.nshots)
print(f"with nshots = {self.nshots}, {overlap_best = }, {overlap_check = }")
# to reduce test time we set 95% here, but 99% will also work
if np.round(overlap, decimals=2) < 0.95:
self.nshots *= 10
print(f"retry with nshots = {self.nshots} ...")
else:
self.nshots = nshots # restore
break
else:
self.nshots = nshots # restore
assert False, f"{overlap_best=} after {counter} retries..."
@manage_resource("handle")
@manage_resource("state")
@manage_resource("workspace")
def run_tests(self):
self.test_state_vector()
self.test_amplitude()
self.test_batched_amplitudes()
self.test_reduced_density_matrix()
self.test_expectation()
self.test_gates()
self.test_qubits()
if self.backend is cp:
# sampling only needed to be tested for cupy backend
self.test_sampling()
class CirqTester(BaseTester):
def _get_state_vector_from_simulator(self):
qubits = self.qubits
simulator = cirq.Simulator(dtype=self.dtype)
circuit = circuit_parser_utils_cirq.remove_measurements(self.circuit)
result = simulator.simulate(circuit, qubit_order=qubits)
statevector = result.state_vector().reshape((2,)*self.n_qubits)
if self.backend is torch:
statevector = torch.as_tensor(statevector, dtype=getattr(torch, self.dtype), device='cuda')
else:
statevector = self.backend.asarray(statevector, dtype=self.dtype)
return statevector
def _get_sampling_from_simulator(self, qubits_to_sample=None, seed=None):
if qubits_to_sample is None:
qubits_to_sample = list(self.qubits)
circuit = circuit_parser_utils_cirq.remove_measurements(self.circuit)
circuit.append(cirq.measure_each(qubits_to_sample))
circuit.append(cirq.measure(*qubits_to_sample, key='meas'))
result = cirq.sample(
circuit, repetitions=self.nshots, seed=seed, dtype=getattr(np, self.dtype))
result = result.histogram(key='meas')
sampling = {}
nsamples = 0
for bitstring, nsample in result.items():
sampling[int(bitstring)] = nsample
nsamples += nsample
assert nsamples == self.nshots
return sampling
class QiskitTester(BaseTester):
def _get_precision(self):
precision = {'complex64': 'single',
'complex128': 'double'}[self.dtype]
return precision
def _get_state_vector_from_simulator(self):
# requires qiskit >= 0.24.0
precision = self._get_precision()
circuit = circuit_parser_utils_qiskit.remove_measurements(self.circuit)
try:
# for qiskit >= 0.25.0
simulator = qiskit.Aer.get_backend('aer_simulator_statevector', precision=precision)
circuit = qiskit.transpile(circuit, simulator)
circuit.save_statevector()
result = simulator.run(circuit).result()
except:
# for qiskit 0.24.*
simulator = qiskit.Aer.get_backend('statevector_simulator', precision=precision)
result = qiskit.execute(circuit, simulator).result()
sv = np.asarray(result.get_statevector()).reshape((2,)*circuit.num_qubits)
# statevector returned by qiskit's simulator is labelled by the inverse of :attr:`qiskit.QuantumCircuit.qubits`
# this is different from `cirq` and different from the implementation in :class:`CircuitToEinsum`
sv = sv.transpose(list(range(circuit.num_qubits))[::-1])
if self.backend is torch:
sv = torch.as_tensor(sv, dtype=getattr(torch, self.dtype), device='cuda')
else:
sv = self.backend.asarray(sv, dtype=self.dtype)
return sv
def _get_sampling_from_simulator(self, qubits_to_sample=None, seed=None):
if qubits_to_sample is None:
qubits_to_sample = list(self.qubits)
circuit = self.circuit.remove_final_measurements(inplace=False)
new_creg = circuit._create_creg(len(qubits_to_sample), "meas")
circuit.add_register(new_creg)
circuit.measure(qubits_to_sample, new_creg)
precision = self._get_precision()
backend = qiskit.Aer.get_backend('qasm_simulator', precision=precision)
result = backend.run(qiskit.transpile(circuit, backend), shots=self.nshots, seed=seed).result()
counts = result.get_counts(circuit)
sampling = {}
nsamples = 0
for bitstring, nsample in counts.items():
# little endian from qiskit
value = int(bitstring[::-1], 2)
sampling[value] = nsample
nsamples += nsample
assert nsamples == self.nshots
return sampling
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/circuit_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
# Note: This file must be self-contained and not import private helpers!
import importlib
import logging
try:
import cupy as cp
except ImportError:
cp = None
import numpy as np
####################################################
################# Helper functions #################
####################################################
def get_stream_pointer(backend):
if backend == "numpy":
return 0
elif backend == "cupy":
return cp.cuda.get_current_stream().ptr
elif backend == "torch":
import torch
return torch.cuda.current_stream().cuda_stream
else:
raise NotImplementedError(f"{backend} not supported")
def infer_backend(obj):
module = obj.__class__.__module__.split(".")[0]
return importlib.import_module(module)
def parse_split_expression(split_expression):
modes_in, modes_out = split_expression.split("->")
left_modes, right_modes = modes_out.split(",")
shared_modes = set(left_modes) & set(right_modes)
# only allow one shared mode in the output
assert len(shared_modes) == 1, f"the split expr \"{split_expression}\" does not have a unique shared mode"
shared_mode = list(shared_modes)[0]
return modes_in, left_modes, right_modes, shared_mode
def get_new_modes(used_modes, num):
# Note: cannot use _internal.circuit_converter_utils._get_symbol() here, as this
# module needs to be standalone. We don't need that many symbols here, anyway.
base_modes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
new_modes = ""
for mode in base_modes:
if mode not in used_modes:
new_modes += mode
if len(new_modes) == num:
break
else:
raise RuntimeError(f"can't find {num} new modes")
return new_modes
def prepare_reduced_qr_modes(modes_in, modes_out, new_mode, shared_modes_in):
"""Given the input modes and output modes in a gate problem, generate the modes for QR for the reduced algorithm"""
modes_q = ""
modes_r = new_mode
for mode in modes_in:
if mode in modes_out and mode not in shared_modes_in: # in case the same mode is used as shared modes in both input and output
modes_q += mode
else:
modes_r += mode
modes_q += new_mode
return modes_q, modes_r
# used by cutensornetApprox
def parse_modes_extents(extent_map, split_expression):
modes_in, left_modes_out, right_modes_out, shared_mode_out = parse_split_expression(split_expression)
shared_modes_duplicated = False
input_modes = modes_in.split(",")
if len(input_modes) == 1:
left_extent = compute_size(extent_map, left_modes_out.replace(shared_mode_out, ""))
right_extent = compute_size(extent_map, right_modes_out.replace(shared_mode_out, ""))
elif len(input_modes) == 3:
new_modes = get_new_modes(split_expression, 2)
left_modes_in, right_modes_in, gate_modes = input_modes
shared_modes_in = set(left_modes_in) & set(right_modes_in)
modes_qa, modes_ra = prepare_reduced_qr_modes(left_modes_in, left_modes_out, new_modes[0], shared_modes_in)
modes_qb, modes_rb = prepare_reduced_qr_modes(right_modes_in, right_modes_out, new_modes[1], shared_modes_in)
# extent for shared mode between qa and ra
intm_open_extent_a = min(compute_size(extent_map, modes_qa.replace(new_modes[0], "")),
compute_size(extent_map, modes_ra.replace(new_modes[0], "")))
# extent for shared mode between qb and rb
intm_open_extent_b = min(compute_size(extent_map, modes_qb.replace(new_modes[1], "")),
compute_size(extent_map, modes_rb.replace(new_modes[1], "")))
intm_modes_out = infer_contracted_output_modes(modes_ra+modes_rb+gate_modes)
intm_modes_left = "".join([mode for mode in intm_modes_out if mode in left_modes_out and mode != shared_mode_out]) # excluding new_modes[0] (shared mode between qa and ra) and the shared mode between intm_left and intm_right
intm_modes_right = "".join([mode for mode in intm_modes_out if mode in right_modes_out and mode != shared_mode_out]) # excluding new_modes[1] (shared) and the shared mode between intm_left and intm_right
assert set(infer_contracted_output_modes(intm_modes_left+intm_modes_right+new_modes)) == set(intm_modes_out)
left_extent = compute_size(extent_map, intm_modes_left) * intm_open_extent_a # multiply by intm_open_extent_a to add back the contribution from shared mode bewteen qa and ra
right_extent = compute_size(extent_map, intm_modes_right) * intm_open_extent_b # multiply by intm_open_extent_b to add back the contribution from shared mode bewteen qb and rb
shared_modes_duplicated = shared_mode_out in shared_modes_in
else:
raise ValueError("Split_expression must be a valid SVD/QR or Gate expression")
return modes_in, left_modes_out, right_modes_out, shared_mode_out, shared_modes_duplicated, min(left_extent, right_extent)
def infer_contracted_output_modes(modes_in):
modes_in = modes_in.replace(",","")
modes_out = "".join([mode for mode in modes_in if modes_in.count(mode)==1])
return modes_out
def compute_size(size_dict, modes):
"""Given the modes, compute the product of all extents that are recorded in size_dict. Note modes not in size_dict will be neglected."""
size = 1
for mode in modes:
if mode in size_dict:
size *= size_dict[mode]
return size
def get_tensordot_axes(modes, shared_mode):
axes = []
for ax, mode in enumerate(modes):
if mode != shared_mode:
axes.append(ax)
return [axes, axes]
def reverse_einsum(split_expression, array_left, array_mid, array_right):
backend = infer_backend(array_left)
einsum_kwargs = get_einsum_kwargs(backend)
modes_in, left_modes, right_modes, shared_mode = parse_split_expression(split_expression)
if modes_in.count(",") == 0:
modes_out = modes_in
else:
modes_out = infer_contracted_output_modes(modes_in)
if array_mid is None:
# For QR or SVD with S partitioned onto U or V or both.
einsum_string = f"{left_modes},{right_modes}->{modes_out}"
out = backend.einsum(einsum_string, array_left, array_right)
else:
# For SVD with singular values explicitly returned
einsum_string = f"{left_modes},{shared_mode},{right_modes}->{modes_out}"
out = backend.einsum(einsum_string, array_left, array_mid, array_right, **einsum_kwargs)
return out
def is_exact_split(**split_options):
max_extent = split_options.get("max_extent", 0)
abs_cutoff = split_options.get("abs_cutoff", 0)
rel_cutoff = split_options.get("rel_cutoff", 0)
normalization = split_options.get("normalization", None)
return (max_extent == 0 or max_extent is None) and abs_cutoff == 0 and rel_cutoff == 0 and normalization is None
def split_contract_decompose(subscripts):
if '.' in subscripts:
raise ValueError("This function does not support ellipses notation.")
inputs, outputs = subscripts.split('->')
tmp_subscripts = outputs.replace(",", "")
intm_modes = "".join(s for s in tmp_subscripts if tmp_subscripts.count(s) == 1)
contract_subscripts = f"{inputs}->{intm_modes}"
decompose_subscripts = f"{intm_modes}->{outputs}"
return contract_subscripts, decompose_subscripts
#NOTE: torch does not have native support on F order
# We here get around this by converting to CuPy/NumPy ndarrays as a get arounds
# the overhead for torch tensors on GPU should be minimal as torch tensors support __cuda_array_interface__
def torch_support_wrapper(func):
def new_func(T, *args, **kwargs):
backend = infer_backend(T)
if backend not in (cp, np): # torch
if T.device.type == 'cpu':
out = func(T.numpy(), *args, **kwargs)
else:
out = func(cp.asarray(T), *args, **kwargs)
return backend.as_tensor(out, device=T.device)
return func(T, *args, **kwargs)
return new_func
def get_einsum_kwargs(backend):
if backend in (cp, np):
return {'optimize': True}
else:
return {} # optimize not supported in torch.einsum
####################################
############ Execution #############
####################################
@torch_support_wrapper
def tensor_permute(T, input_modes, output_modes):
axes = [input_modes.index(i) for i in output_modes]
return T.transpose(*axes).astype(T.dtype, order="F")
@torch_support_wrapper
def tensor_reshape_fortran_order(T, shape):
return T.reshape(shape, order='F')
def matrix_qr(T):
backend = infer_backend(T)
return backend.linalg.qr(T)
def matrix_svd(
T,
max_extent=0,
abs_cutoff=0,
rel_cutoff=0,
partition=None,
normalization=None,
return_info=True,
**kwargs,
):
info = dict()
backend = infer_backend(T)
if backend not in (cp, np) and T.device.type != 'cpu':
u, s, v = backend.linalg.svd(T, full_matrices=False, driver='gesvd')
if v.is_conj(): # VH from torch.linalg.svd is a view, we need to materialize it
v = v.resolve_conj()
else:
u, s, v = backend.linalg.svd(T, full_matrices=False)
info["full_extent"] = len(s)
cutoff = max(abs_cutoff, rel_cutoff*s[0])
if max_extent == 0 or max_extent is None:
max_extent = len(s)
reduced_extent = min(max_extent, int((s>cutoff).sum()))
reduced_extent = max(reduced_extent, 1)
info["reduced_extent"] = reduced_extent
if reduced_extent != len(s):
sqrt_sum = backend.linalg.norm(s).item() ** 2
u = u[:, :reduced_extent]
s = s[:reduced_extent]
v = v[:reduced_extent]
reduced_sqrt_sum = backend.linalg.norm(s).item() ** 2
info["discarded_weight"] = 1 - reduced_sqrt_sum / sqrt_sum
else:
info["discarded_weight"] = 0.
if normalization == "L1":
s /= s.sum()
elif normalization == "L2":
s /= backend.linalg.norm(s)
elif normalization == "LInf":
s = s / s[0]
elif normalization is not None:
raise ValueError
if partition == "U":
u = backend.einsum("ab,b->ab", u, s)
s = None
elif partition == "V":
v = backend.einsum("ab,a->ab", v, s)
s = None
elif partition == "UV":
s_sqrt = backend.sqrt(s)
u = backend.einsum("ab,b->ab", u, s_sqrt)
v = backend.einsum("ab,a->ab", v, s_sqrt)
s = None
elif partition is not None:
raise ValueError
if return_info:
return u, s, v, info
else:
return u, s, v
def tensor_decompose(
split_expression,
T,
method='qr',
return_info=False,
**kwargs
):
modes_in, left_modes, right_modes, shared_mode = parse_split_expression(split_expression)
left_modes_intm = left_modes.replace(shared_mode, '') + shared_mode
right_modes_intm = shared_mode + right_modes.replace(shared_mode, '')
modes_in_intm = left_modes_intm[:-1] + right_modes_intm[1:]
T_intm = tensor_permute(T, modes_in, modes_in_intm)
left_shape = T_intm.shape[:len(left_modes)-1]
right_shape = T_intm.shape[len(left_modes)-1:]
m = np.prod(left_shape, dtype=np.int64)
n = np.prod(right_shape, dtype=np.int64)
T_intm = tensor_reshape_fortran_order(T_intm, (m, n))
if method.lower() == 'qr':
if kwargs:
raise ValueError("QR does not support any options")
if return_info:
raise ValueError("No info for tensor QR")
out_left, out_right = matrix_qr(T_intm)
elif method.lower() == 'svd':
out_left, s, out_right, info = matrix_svd(T_intm, return_info=True, **kwargs)
else:
raise NotImplementedError(f"{method} not supported")
T_intm = tensor_reshape_fortran_order(T_intm, (m, n))
out_left = tensor_reshape_fortran_order(out_left, tuple(left_shape)+(-1,))
out_right = tensor_reshape_fortran_order(out_right, (-1, ) + tuple(right_shape))
out_left = tensor_permute(out_left, left_modes_intm, left_modes)
out_right = tensor_permute(out_right, right_modes_intm, right_modes)
if method == "qr":
return out_left, out_right
else:
if return_info:
return out_left, s, out_right, info
else:
return out_left, s, out_right
def gate_decompose(
split_expression,
array_a,
array_b,
array_g,
gate_algo="direct",
return_info=False,
**kwargs
):
modes_in, left_modes_out, right_modes_out, shared_mode_out = parse_split_expression(split_expression)
backend = infer_backend(array_a)
einsum_kwargs = get_einsum_kwargs(backend)
left_modes_in, right_modes_in, modes_g = modes_in.split(",")
if gate_algo == "direct":
modes_intm = infer_contracted_output_modes(modes_in)
T = backend.einsum(f"{modes_in}->{modes_intm}", array_a, array_b, array_g, **einsum_kwargs)
svd_expression = f"{modes_intm}->{left_modes_out},{right_modes_out}"
return tensor_decompose(svd_expression, T, method='svd', return_info=return_info, **kwargs)
elif gate_algo == "reduced":
new_modes = get_new_modes(split_expression, 2)
size_dict = dict(zip(left_modes_in, array_a.shape))
size_dict.update(dict(zip(right_modes_in, array_b.shape)))
shared_modes_in_ab = set(left_modes_in) & set(right_modes_in)
modes_qa, modes_ra = prepare_reduced_qr_modes(left_modes_in, left_modes_out, new_modes[0], shared_modes_in_ab)
modes_qb, modes_rb = prepare_reduced_qr_modes(right_modes_in, right_modes_out, new_modes[1], shared_modes_in_ab)
skip_qr_a = compute_size(size_dict, modes_qa) <= compute_size(size_dict, modes_ra)
skip_qr_b = compute_size(size_dict, modes_qb) <= compute_size(size_dict, modes_rb)
if not skip_qr_a:
qa, ra = tensor_decompose(f"{left_modes_in}->{modes_qa},{modes_ra}", array_a, method="qr")
if not skip_qr_b:
qb, rb = tensor_decompose(f"{right_modes_in}->{modes_qb},{modes_rb}", array_b, method="qr")
intm_modes_in = f"{left_modes_in if skip_qr_a else modes_ra},{right_modes_in if skip_qr_b else modes_rb},{modes_g}"
modes_rg = infer_contracted_output_modes(intm_modes_in)
einsum_string = intm_modes_in + f"->{modes_rg}"
T = backend.einsum(einsum_string, array_a if skip_qr_a else ra, array_b if skip_qr_b else rb, array_g, **einsum_kwargs)
modes_rgu = ""
modes_rgv = shared_mode_out
for mode in modes_rg:
if mode in left_modes_out or mode == new_modes[0]:
modes_rgu += mode
else:
modes_rgv += mode
modes_rgu += shared_mode_out
svd_expression = f"{modes_rg}->{left_modes_out if skip_qr_a else modes_rgu},{right_modes_out if skip_qr_b else modes_rgv}"
svd_outputs = tensor_decompose(svd_expression, T, method="svd", return_info=return_info, **kwargs)
if skip_qr_a:
u = svd_outputs[0]
else:
u = backend.einsum(f"{modes_qa},{modes_rgu}->{left_modes_out}", qa, svd_outputs[0])
s = svd_outputs[1]
if skip_qr_b:
v = svd_outputs[2]
else:
v = backend.einsum(f"{modes_qb},{modes_rgv}->{right_modes_out}", qb, svd_outputs[2])
if return_info:
return u, s, v, svd_outputs[3]
else:
return u, s, v
else:
raise ValueError
####################################
########### Verification ###########
####################################
QR_TOLERANCE = {"float32": 1e-5,
"float64": 1e-13,
"complex64": 1e-5,
"complex128": 1e-13}
SVD_TOLERANCE = {"float32": 7e-3,
"float64": 1e-13,
"complex64": 7e-3,
"complex128": 1e-13}
def get_tolerance(task, dtype):
if hasattr(dtype, "name"):
dtype = dtype.name
else:
dtype = str(dtype).split('.')[-1]
if task == "qr":
return QR_TOLERANCE[dtype]
elif task in ["svd", "gate"]:
return SVD_TOLERANCE[dtype]
else:
raise ValueError
def verify_close(
array_a,
array_b,
rtol,
scale_by_norm=False,
scale_factor=1,
error_message=None
):
backend = infer_backend(array_a)
diff = backend.linalg.norm(array_a - array_b).item()
if scale_by_norm:
diff /= scale_factor * backend.linalg.norm(array_b).item()
else:
diff /= scale_factor
is_close = diff < rtol
if not is_close:
array_diff = backend.abs(array_a - array_b).ravel()
idx = backend.argmax(array_diff)
if error_message:
logging.error(error_message)
else:
logging.error("Large difference found in input tensors")
logging.error(f"For a target rtol of {rtol}, diff max: {array_diff.max()} found at idx: {idx} (a[idx]: {array_a.ravel()[idx]}, b[idx]: {array_b.ravel()[idx]})")
return is_close
def verify_unitary(
T,
modes,
shared_mode,
rtol,
tensor_name="Tensor"
):
backend = infer_backend(T)
axes = get_tensordot_axes(modes, shared_mode)
out = backend.tensordot(T, T.conj(), axes)
if backend not in (cp, np): # torch
identity = backend.eye(out.shape[0], device=T.device)
else:
identity = backend.eye(out.shape[0])
error_message = f"{tensor_name} is not unitary"
return verify_close(out, identity, rtol, False, out.shape[0], error_message)
def verify_upper_triangular(
T,
modes,
shared_mode,
rtol,
tensor_name="Tensor"
):
backend = infer_backend(T)
shared_idx = modes.index(shared_mode)
mid_extent = T.shape[shared_idx]
axes = [shared_idx] + [idx for idx in range(len(modes)) if idx != shared_idx]
T_intm = tensor_permute(T, modes, shared_mode+modes.replace(shared_mode, ''))
T_intm = tensor_reshape_fortran_order(T_intm, (mid_extent, -1))
error_message = f"{tensor_name} is not upper triangular"
return verify_close(T_intm, backend.triu(T_intm), rtol, False, mid_extent, error_message)
def verify_split_QR(
split_expression,
T,
array_q,
array_r,
array_q_ref,
array_r_ref
):
modes_in, left_modes, right_modes, shared_mode = parse_split_expression(split_expression)
shared_mode_idx = left_modes.index(shared_mode)
shared_extent = array_q.shape[shared_mode_idx]
if T is not None:
reference = T
else:
reference = reverse_einsum(split_expression, array_q_ref, None, array_r_ref)
out = reverse_einsum(split_expression, array_q, None, array_r)
rtol = get_tolerance("qr", out.dtype)
is_equal = verify_close(reference, out, rtol, True, scale_factor=shared_extent, error_message="Contracted output is not close to the expected outcome")
is_unitary = verify_unitary(array_q, left_modes, shared_mode, rtol, tensor_name="Output tensor Q")
is_upper_triangular = verify_upper_triangular(array_r, right_modes, shared_mode, rtol, tensor_name="Output tensor R")
return is_equal and is_unitary and is_upper_triangular
def verify_split_SVD(
split_expression,
T,
array_u,
array_s,
array_v,
array_u_ref,
array_s_ref,
array_v_ref,
info=None,
info_ref=None,
**split_options
):
# Note: this functions works for both SVD and Gate (specifying T to be None)
modes_in, left_modes, right_modes, shared_mode = parse_split_expression(split_expression)
shared_mode_idx = left_modes.index(shared_mode)
shared_extent = array_u.shape[shared_mode_idx]
try:
max_mid_extent = min(array_u.size, array_v.size) // shared_extent
except:
# for torch
max_mid_extent = min(array_u.numel(), array_v.numel()) // shared_extent
max_extent = split_options.pop('max_extent', max_mid_extent)
if is_exact_split(**split_options) and max_extent == max_mid_extent and T is not None:
reference = T
else:
reference = reverse_einsum(split_expression, array_u_ref, array_s_ref, array_v_ref)
out = reverse_einsum(split_expression, array_u, array_s, array_v)
if hasattr(out.dtype, "name"):
dtype_name = out.dtype.name
else:
dtype_name = str(out.dtype).split('.')[-1]
backend = infer_backend(out)
rtol = get_tolerance("svd", out.dtype) # Note: tolerance for gate and svd is equal
if info is not None:
algorithm = info['algorithm']
else:
algorithm = 'gesvd'
if algorithm == 'gesvdj':
if dtype_name in ['float64', 'complex128']:
rtol = 1e-8
if 'gesvdj_residual' not in info:
logging.warning("gesvdj_residual not recorded in info; verification may fail due to unknown runtime status")
else:
rtol = max(rtol, info['gesvdj_residual'])
elif algorithm == 'gesvdp':
if dtype_name in ['float64', 'complex128']:
rtol = 1e-8
if 'gesvdp_err_sigma' not in info:
logging.warning("gesvdp_err_sigma not recorded in info; verification may fail due to unknown runtime status")
elif info['gesvdp_err_sigma'] > 1e-4:
logging.warning(f"Large err sigma found for gesvdp: {info['gesvdp_err_sigma']}, skipping verification")
return True
elif algorithm == 'gesvdr':
if dtype_name in ['float64', 'complex128']:
rtol = 1e-4
is_equal = verify_close(reference, out, rtol, True, scale_factor=shared_extent, error_message="Contracted output is not close to the expected outcome")
partition = split_options.get("partition", None)
if partition not in ["U", "V", "UV", None]:
raise ValueError
normalization = split_options.get("normalization", None)
if normalization not in ["L1", "L2", "LInf", None]:
raise ValueError
is_s_equal = True
left_tensordot_axes = get_tensordot_axes(left_modes, shared_mode)
right_tensordot_axes = get_tensordot_axes(right_modes, shared_mode)
if partition == "U":
array_s = backend.sqrt(backend.tensordot(array_u, array_u.conj(), left_tensordot_axes,).diagonal().real)
array_s_ref = backend.sqrt(backend.tensordot(array_u_ref, array_u_ref.conj(), left_tensordot_axes,).diagonal().real)
array_u = backend.einsum(f"{left_modes},{shared_mode}->{left_modes}", array_u, 1.0/array_s)
elif partition == "V":
array_s = backend.sqrt(backend.tensordot(array_v, array_v.conj(), right_tensordot_axes,).diagonal().real)
array_s_ref = backend.sqrt(backend.tensordot(array_v_ref, array_v_ref.conj(), right_tensordot_axes).diagonal().real)
array_v = backend.einsum(f"{right_modes},{shared_mode}->{right_modes}", array_v, 1.0/array_s)
elif partition == "UV":
array_s = backend.tensordot(array_u, array_u.conj(), left_tensordot_axes).diagonal().real
array_s1 = backend.tensordot(array_v, array_v.conj(), right_tensordot_axes,).diagonal().real
array_s_ref = backend.tensordot(array_u_ref, array_u_ref.conj(), left_tensordot_axes).diagonal().real
is_s_equal = verify_close(array_s, array_s1, rtol, scale_by_norm=True, scale_factor=shared_extent, error_message="Singular values from u and v are not equal")
array_u = backend.einsum(f"{left_modes},{shared_mode}->{left_modes}", array_u, 1.0/backend.sqrt(array_s))
array_v = backend.einsum(f"{right_modes},{shared_mode}->{right_modes}", array_v, 1.0/backend.sqrt(array_s))
is_u_unitary = verify_unitary(array_u, left_modes, shared_mode, rtol, tensor_name="Output tensor U")
is_v_unitary = verify_unitary(array_v, right_modes, shared_mode, rtol, tensor_name="Output tensor V")
is_s_equal = is_s_equal and verify_close(array_s, array_s_ref, rtol, scale_by_norm=True, scale_factor=shared_extent, error_message="Output singular values not matching reference")
info_equal = True
if info is not None and info_ref is not None:
for attr in ["full_extent", "reduced_extent"]:
info_equal = info_equal and info[attr] == info_ref[attr]
# For gesvdr, discarded weight is only computed when fix extent truncation is not enabled
if info['algorithm'] != 'gesvdr' or max_extent == max_mid_extent:
info_equal = info_equal and (abs(info["discarded_weight"]-info_ref["discarded_weight"]) < rtol)
if not info_equal:
info_details = "".join([f"{key}:({info.get(key)}, {info_ref.get(key)}); " for key in info.keys()])
logging.error(f"SVD Info not matching the reference: {info_details}")
return is_equal and is_u_unitary and is_v_unitary and is_s_equal and info_equal
def verify_split(
split_expression,
method,
T,
array_left,
array_mid,
array_right,
array_left_ref,
array_mid_ref,
array_right_ref,
info=None,
info_ref=None,
**split_options
):
if method == "qr":
return verify_split_QR(split_expression, T, array_left, array_right, array_left_ref, array_right_ref)
elif method in ["gate", "svd"]:
return verify_split_SVD(split_expression, T,
array_left, array_mid, array_right,
array_left_ref, array_mid_ref, array_right_ref,
info=info, info_ref=info_ref, **split_options)
else:
raise NotImplementedError
if __name__ == "__main__":
np.random.seed(3)
T = np.random.random([2,2,2,2])
split_expression = "abcd->axc,xdb"
method = 'qr'
q, r = tensor_decompose(split_expression, T, method=method)
assert verify_split(split_expression, method, T, q, None, r, q, None, r)
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/approxTN_utils.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
from collections import abc
import functools
import os
import cupy as cp
from cupy import testing
import numpy as np
try:
import mpi4py
from mpi4py import MPI # init!
except ImportError:
mpi4py = MPI = None
import pytest
try:
import torch
# unlike in other test modules, we don't check torch.cuda.is_available()
# here because we allow verifying against PyTorch CPU tensors
except:
torch = None
import cuquantum
from cuquantum import ComputeType, cudaDataType
from cuquantum import cutensornet as cutn
from cuquantum import tensor
from cuquantum.cutensornet._internal.decomposition_utils import get_svd_info_dict, parse_svd_config
from cuquantum.cutensornet._internal.utils import check_or_create_options
from . import approxTN_utils
from .data import gate_decomp_expressions, tensor_decomp_expressions
from .test_utils import atol_mapper, get_stream_for_backend, rtol_mapper
from .. import (_can_use_cffi, dtype_to_compute_type, dtype_to_data_type,
MemHandlerTestBase, MemoryResourceFactory, LoggerTestBase)
###################################################################
#
# As of beta 2, the test suite for Python bindings is kept minimal.
# The sole goal is to ensure the Python arguments are properly
# passed to the C level. We do not ensure coverage nor correctness.
# This decision will be revisited in the future.
#
###################################################################
def manage_resource(name):
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kwargs):
# "self" refers to the test case
try:
if name == 'handle':
h = cutn.create()
elif name == 'dscr':
tn, dtype, input_form = self.tn, self.dtype, self.input_form
einsum, shapes = tn # unpack
tn = TensorNetworkFactory(einsum, shapes, dtype, order=self.order)
i_n_inputs, i_n_modes, i_extents, i_strides, i_modes = \
tn.get_input_metadata(**input_form)
o_n_modes, o_extents, o_strides, o_modes = \
tn.get_output_metadata(**input_form)
i_qualifiers = np.zeros(i_n_inputs, dtype=cutn.tensor_qualifiers_dtype)
if self.qual is not None:
i_qualifiers['requires_gradient'][:] = True
h = cutn.create_network_descriptor(
self.handle,
i_n_inputs, i_n_modes, i_extents, i_strides, i_modes, i_qualifiers,
o_n_modes, o_extents, o_strides, o_modes,
dtype_to_data_type[dtype], dtype_to_compute_type[dtype])
# we also need to keep the tn data alive
self.tn = tn
elif name == 'tensor_decom':
tn, dtype, tensor_form = self.tn, self.dtype, self.tensor_form
options = getattr(self, "options", {})
max_extent = options.get("max_extent", None)
subscript, shapes = tn # unpack
tn = TensorDecompositionFactory(subscript, shapes, dtype, max_extent=max_extent)
h = []
for t in tn.tensor_names:
t = cutn.create_tensor_descriptor(
self.handle,
*tn.get_tensor_metadata(t, **tensor_form),
dtype_to_data_type[dtype])
h.append(t)
# we also need to keep the tn data alive
self.tn = tn
elif name == 'config':
h = cutn.create_contraction_optimizer_config(self.handle)
elif name == 'info':
h = cutn.create_contraction_optimizer_info(
self.handle, self.dscr)
elif name == 'svd_config':
h = cutn.create_tensor_svd_config(self.handle)
elif name == 'svd_info':
h = cutn.create_tensor_svd_info(self.handle)
elif name == 'autotune':
h = cutn.create_contraction_autotune_preference(self.handle)
elif name == 'workspace':
h = cutn.create_workspace_descriptor(self.handle)
elif name == 'slice_group':
# we use this version to avoid creating a sequence; another API
# is tested elsewhere
h = cutn.create_slice_group_from_id_range(self.handle, 0, 1, 1)
elif name == 'state':
dtype = dtype_to_data_type[getattr(np, self.dtype)]
h = cutn.create_state(self.handle, self.state_purity, self.n_qubits, (2,)*self.n_qubits, dtype)
else:
assert False, f'name "{name}" not recognized'
setattr(self, name, h)
impl(self, *args, **kwargs)
except:
print(f'managing resource {name} failed')
raise
finally:
if name == 'handle' and hasattr(self, name):
cutn.destroy(self.handle)
del self.handle
elif name == 'dscr' and hasattr(self, name):
cutn.destroy_network_descriptor(self.dscr)
del self.dscr
elif name == 'tensor_decom' and hasattr(self, name):
for t in self.tensor_decom:
cutn.destroy_tensor_descriptor(t)
del self.tensor_decom
elif name == 'config' and hasattr(self, name):
cutn.destroy_contraction_optimizer_config(self.config)
del self.config
elif name == 'info' and hasattr(self, name):
cutn.destroy_contraction_optimizer_info(self.info)
del self.info
elif name == 'svd_config' and hasattr(self, name):
cutn.destroy_tensor_svd_config(self.svd_config)
del self.svd_config
elif name == 'svd_info' and hasattr(self, name):
cutn.destroy_tensor_svd_info(self.svd_info)
del self.svd_info
elif name == 'autotune' and hasattr(self, name):
cutn.destroy_contraction_autotune_preference(self.autotune)
del self.autotune
elif name == 'workspace' and hasattr(self, name):
h = cutn.destroy_workspace_descriptor(self.workspace)
del self.workspace
elif name == 'slice_group':
h = cutn.destroy_slice_group(self.slice_group)
del self.slice_group
elif name == 'state':
h = cutn.destroy_state(self.state)
return test_func
return decorator
class TestLibHelper:
def test_get_version(self):
ver = cutn.get_version()
assert ver == (cutn.MAJOR_VER * 10000
+ cutn.MINOR_VER * 100
+ cutn.PATCH_VER)
assert ver == cutn.VERSION
def test_get_cudart_version(self):
# CUDA runtime is statically linked, so we can't compare
# with the "runtime" version
ver = cutn.get_cudart_version()
assert isinstance(ver, int)
class TestHandle:
@manage_resource('handle')
def test_handle_create_destroy(self):
# simple rount-trip test
pass
class TensorNetworkFactory:
# TODO(leofang): replace the utilities here by high-level private APIs
# This factory CANNOT be reused; once a TN descriptor uses it, it must
# be discarded.
def __init__(self, einsum, shapes, dtype, *, order='C'):
self.einsum = einsum
inputs, output = einsum.split('->') if "->" in einsum else (einsum, None)
i_shapes, o_shape = shapes[:-1], shapes[-1]
inputs = tuple(tuple(_input) for _input in inputs.split(","))
assert all([len(i) == len(s) for i, s in zip(inputs, i_shapes)])
assert len(output) == len(o_shape)
# xp strides in bytes, cutn strides in counts
itemsize = cp.dtype(dtype).itemsize
self.input_tensors = [
testing.shaped_random(s, cp, dtype, seed=i, order=order)
for i, s in enumerate(i_shapes)]
self.input_n_modes = [len(i) for i in inputs]
self.input_extents = i_shapes
self.input_strides = [[stride // itemsize for stride in arr.strides]
for arr in self.input_tensors]
self.input_modes = [tuple([ord(m) for m in i]) for i in inputs]
self.output_tensor = cp.empty(o_shape, dtype=dtype, order=order)
self.output_n_modes = len(o_shape)
self.output_extent = o_shape
self.output_stride = [stride // itemsize for stride in self.output_tensor.strides]
self.output_mode = tuple([ord(m) for m in output])
self.gradients = None
def _get_data_type(self, category):
if 'n_modes' in category:
return np.int32
elif 'extent' in category:
return np.int64
elif 'stride' in category:
return np.int64
elif 'mode' in category:
return np.int32
elif 'tensor' in category:
return None # unused
else:
assert False
def _return_data(self, category, return_value):
data = getattr(self, category)
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0
elif category in ('input_tensors', 'gradients'):
# special case for device arrays, return int as void**
data = np.asarray([d.data.ptr for d in data],
dtype=np.intp)
setattr(self, f'{category}_ptrs', data) # keep data alive
# some data are not nested in nature, so we peek at the first
# element to determine
elif isinstance(data[0], abc.Sequence):
# return int as void**
data = [np.asarray(d, dtype=self._get_data_type(category))
for d in data]
setattr(self, category, data) # keep data alive
data = np.asarray([d.ctypes.data for d in data],
dtype=np.intp)
setattr(self, f'{category}_ptrs', data) # keep data alive
else:
# return int as void*
data = np.asarray(data, dtype=self._get_data_type(category))
setattr(self, category, data) # keep data alive
return data.ctypes.data
elif return_value == 'seq':
if len(data) == 0:
# empty, leave it as is
pass
elif category in ('input_tensors', 'gradients'):
# special case for device arrays
data = [d.data.ptr for d in data]
setattr(self, f'{category}_ptrs', data) # keep data alive
# some data are not nested in nature, so we peek at the first
# element to determine
elif isinstance(data[0], abc.Sequence):
data = [np.asarray(d, dtype=self._get_data_type(category))
for d in data]
setattr(self, category, data) # keep data alive
else:
# data itself is already a flat sequence
pass
return data
elif return_value == 'nested_seq':
return data
else:
assert False
def get_input_metadata(self, **kwargs):
n_inputs = len(self.input_tensors)
n_modes = self._return_data('input_n_modes', kwargs.pop('n_modes'))
extents = self._return_data('input_extents', kwargs.pop('extent'))
strides = self._return_data('input_strides', kwargs.pop('stride'))
modes = self._return_data('input_modes', kwargs.pop('mode'))
return n_inputs, n_modes, extents, strides, modes
def get_output_metadata(self, **kwargs):
n_modes = self.output_n_modes
extent = self._return_data('output_extent', kwargs.pop('extent'))
stride = self._return_data('output_stride', kwargs.pop('stride'))
mode = self._return_data('output_mode', kwargs.pop('mode'))
return n_modes, extent, stride, mode
def get_input_tensors(self, **kwargs):
data = self._return_data('input_tensors', kwargs['data'])
return data
def get_output_tensor(self):
return self.output_tensor.data.ptr
def get_gradient_tensors(self, **kwargs):
if self.gradients is None:
# as of 23.06, the gradient tensors' strides follow those of the
# input tensors
self.gradients = [cp.empty_like(arr) for arr in self.input_tensors]
data = self._return_data('gradients', kwargs['data'])
return data
@testing.parameterize(*testing.product({
'tn': (
('ab,bc->ac', [(2, 3), (3, 2), (2, 2)]),
('ab,ba->', [(2, 3), (3, 2), ()]),
('abc,bca->', [(2, 3, 4), (3, 4, 2), ()]),
('ab,bc,cd->ad', [(2, 3), (3, 1), (1, 5), (2, 5)]),
),
'dtype': (
np.float32, np.float64, np.complex64, np.complex128
),
# use the same format for both input/output tensors
'input_form': (
{'n_modes': 'int', 'extent': 'int', 'stride': 'int',
'mode': 'int', 'data': 'int'},
{'n_modes': 'int', 'extent': 'seq', 'stride': 'seq',
'mode': 'seq', 'data': 'seq'},
{'n_modes': 'seq', 'extent': 'nested_seq', 'stride': 'nested_seq',
'mode': 'seq', 'data': 'seq'},
),
'order': ('C', 'F'),
# mainly for gradient tests
'qual': (None, True),
}))
class TestTensorNetworkBase:
# Use this class as the base to share all common test parametrizations
pass
class TestTensorNetworkDescriptor(TestTensorNetworkBase):
@pytest.mark.parametrize(
'API', ('old', 'new')
)
@manage_resource('handle')
@manage_resource('dscr')
def test_descriptor_create_destroy(self, API):
# we could just do a simple round-trip test, but let's also get
# this helper API tested
handle, dscr = self.handle, self.dscr
if API == 'old':
# TODO: remove this branch
num_modes, modes, extents, strides = cutn.get_output_tensor_details(
handle, dscr)
else:
tensor_dscr = cutn.get_output_tensor_descriptor(handle, dscr)
num_modes, modes, extents, strides = cutn.get_tensor_details(
handle, tensor_dscr)
assert num_modes == self.tn.output_n_modes
assert (modes == np.asarray(self.tn.output_mode, dtype=np.int32)).all()
assert (extents == np.asarray(self.tn.output_extent, dtype=np.int64)).all()
assert (strides == np.asarray(self.tn.output_stride, dtype=np.int64)).all()
if API == 'new':
cutn.destroy_tensor_descriptor(tensor_dscr)
class TestOptimizerInfo(TestTensorNetworkBase):
def _get_path(self, handle, info):
raise NotImplementedError
def _set_path(self, handle, info, path):
attr = cutn.ContractionOptimizerInfoAttribute.PATH
dtype = cutn.contraction_optimizer_info_get_attribute_dtype(attr)
if not isinstance(path, np.ndarray):
path = np.ascontiguousarray(path, dtype=np.int32)
path_obj = np.asarray((path.shape[0], path.ctypes.data), dtype=dtype)
self._set_scalar_attr(handle, info, attr, path_obj)
def _get_scalar_attr(self, handle, info, attr):
dtype = cutn.contraction_optimizer_info_get_attribute_dtype(attr)
data = np.empty((1,), dtype=dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, info, attr,
data.ctypes.data, data.dtype.itemsize)
return data
def _set_scalar_attr(self, handle, info, attr, data):
dtype = cutn.contraction_optimizer_info_get_attribute_dtype(attr)
if not isinstance(data, np.ndarray):
data = np.ascontiguousarray(data, dtype=dtype)
cutn.contraction_optimizer_info_set_attribute(
handle, info, attr,
data.ctypes.data, data.dtype.itemsize)
@manage_resource('handle')
@manage_resource('dscr')
@manage_resource('info')
def test_optimizer_info_create_destroy(self):
# simple round-trip test
pass
@pytest.mark.parametrize(
'attr', [val for val in cutn.ContractionOptimizerInfoAttribute]
)
@manage_resource('handle')
@manage_resource('dscr')
@manage_resource('info')
def test_optimizer_info_get_set_attribute(self, attr):
if attr in (
cutn.ContractionOptimizerInfoAttribute.NUM_SLICES,
cutn.ContractionOptimizerInfoAttribute.NUM_SLICED_MODES,
cutn.ContractionOptimizerInfoAttribute.PHASE1_FLOP_COUNT,
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
cutn.ContractionOptimizerInfoAttribute.LARGEST_TENSOR,
cutn.ContractionOptimizerInfoAttribute.SLICING_OVERHEAD,
cutn.ContractionOptimizerInfoAttribute.EFFECTIVE_FLOPS_EST,
cutn.ContractionOptimizerInfoAttribute.RUNTIME_EST,
):
pytest.skip("setter not supported")
elif attr in (
cutn.ContractionOptimizerInfoAttribute.PATH,
cutn.ContractionOptimizerInfoAttribute.SLICED_MODE,
cutn.ContractionOptimizerInfoAttribute.SLICED_EXTENT,
cutn.ContractionOptimizerInfoAttribute.SLICING_CONFIG,
cutn.ContractionOptimizerInfoAttribute.INTERMEDIATE_MODES,
cutn.ContractionOptimizerInfoAttribute.NUM_INTERMEDIATE_MODES,
):
pytest.skip("TODO")
handle, info = self.handle, self.info
# Hack: assume this is a valid value for all attrs
factor = 30
self._set_scalar_attr(handle, info, attr, factor)
# do a round-trip test as a sanity check
factor2 = self._get_scalar_attr(handle, info, attr)
assert factor == factor2
@pytest.mark.parametrize(
"buffer_form", ("int", "buf")
)
@manage_resource('handle')
@manage_resource('dscr')
@manage_resource('info')
def test_optimizer_info_packing_unpacking(self, buffer_form):
tn, handle, dscr, info = self.tn, self.handle, self.dscr, self.info
attr = cutn.ContractionOptimizerInfoAttribute.PATH
dtype = cutn.contraction_optimizer_info_get_attribute_dtype(attr)
# compute a valid path for the problem
path, _ = np.einsum_path(
tn.einsum,
*[arr for arr in map(lambda a: np.broadcast_to(0, a.shape),
tn.input_tensors)])
# set the path in info (a few other attributes would be computed too)
# and then serialize it
self._set_path(handle, info, path[1:])
buf_size = cutn.contraction_optimizer_info_get_packed_size(
handle, info)
buf_data = np.empty((buf_size,), dtype=np.int8)
if buffer_form == "int":
buf = buf_data.ctypes.data
else: # buffer_form == "buf"
buf = buf_data
cutn.contraction_optimizer_info_pack_data(
handle, info, buf, buf_size)
# sanity check: all info must give the same attribute
attr = cutn.ContractionOptimizerInfoAttribute.LARGEST_TENSOR
largest = self._get_scalar_attr(handle, info, attr)
info2 = cutn.create_contraction_optimizer_info_from_packed_data(
handle, dscr, buf, buf_size)
largest2 = self._get_scalar_attr(handle, info2, attr)
info3 = cutn.create_contraction_optimizer_info(handle, dscr)
cutn.update_contraction_optimizer_info_from_packed_data(
handle, buf, buf_size, info3)
largest3 = self._get_scalar_attr(handle, info3, attr)
try:
assert largest == largest2
assert largest == largest3
finally:
cutn.destroy_contraction_optimizer_info(info2)
cutn.destroy_contraction_optimizer_info(info3)
class TestOptimizerConfig:
@manage_resource('handle')
@manage_resource('config')
def test_optimizer_config_create_destroy(self):
# simple round-trip test
pass
@pytest.mark.parametrize(
'attr', [val for val in cutn.ContractionOptimizerConfigAttribute]
)
@manage_resource('handle')
@manage_resource('config')
def test_optimizer_config_get_set_attribute(self, attr):
handle, config = self.handle, self.config
dtype = cutn.contraction_optimizer_config_get_attribute_dtype(attr)
# Hack: assume this is a valid value for all attrs
if attr in (cutn.ContractionOptimizerConfigAttribute.GRAPH_ALGORITHM,
cutn.ContractionOptimizerConfigAttribute.SLICER_MEMORY_MODEL,
cutn.ContractionOptimizerConfigAttribute.SLICER_DISABLE_SLICING,
cutn.ContractionOptimizerConfigAttribute.SIMPLIFICATION_DISABLE_DR,
cutn.ContractionOptimizerConfigAttribute.COST_FUNCTION_OBJECTIVE,
cutn.ContractionOptimizerConfigAttribute.SMART_OPTION):
factor = np.asarray([1], dtype=dtype)
else:
factor = np.asarray([30], dtype=dtype)
cutn.contraction_optimizer_config_set_attribute(
handle, config, attr,
factor.ctypes.data, factor.dtype.itemsize)
# do a round-trip test as a sanity check
factor2 = np.zeros_like(factor)
cutn.contraction_optimizer_config_get_attribute(
handle, config, attr,
factor2.ctypes.data, factor2.dtype.itemsize)
assert factor == factor2
class TestAutotunePreference:
@manage_resource('handle')
@manage_resource('autotune')
def test_autotune_preference_create_destroy(self):
# simple round-trip test
pass
@pytest.mark.parametrize(
'attr', [val for val in cutn.ContractionAutotunePreferenceAttribute]
)
@manage_resource('handle')
@manage_resource('autotune')
def test_autotune_preference_get_set_attribute(self, attr):
handle, pref = self.handle, self.autotune
dtype = cutn.contraction_autotune_preference_get_attribute_dtype(attr)
# Hack: assume this is a valid value for all attrs
factor = np.asarray([2], dtype=dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref, attr,
factor.ctypes.data, factor.dtype.itemsize)
# do a round-trip test as a sanity check
factor2 = np.zeros_like(factor)
cutn.contraction_autotune_preference_get_attribute(
handle, pref, attr,
factor2.ctypes.data, factor2.dtype.itemsize)
assert factor == factor2
@pytest.mark.parametrize(
'mempool', (None, 'py-callable', 'cffi', 'cffi_struct')
)
@pytest.mark.parametrize(
'workspace_pref', ("min", "recommended", "max")
)
@pytest.mark.parametrize(
'autotune', (True, False)
)
@pytest.mark.parametrize(
'contract', ("legacy", "slice_group", "gradient")
)
@pytest.mark.parametrize(
'stream', (cp.cuda.Stream.null, get_stream_for_backend(cp))
)
class TestContraction(TestTensorNetworkBase):
# There is no easy way for us to test each API independently, so we instead
# parametrize the steps and test the whole workflow
@manage_resource('handle')
@manage_resource('dscr')
@manage_resource('info')
@manage_resource('config')
@manage_resource('autotune')
@manage_resource('workspace')
@manage_resource('slice_group')
def test_contraction_gradient_workflow(
self, mempool, workspace_pref, autotune, contract, stream):
if (isinstance(mempool, str) and mempool.startswith('cffi')
and not _can_use_cffi()):
pytest.skip("cannot run cffi tests")
# unpack
handle, dscr, info, config, pref = self.handle, self.dscr, self.info, self.config, self.autotune
workspace = self.workspace
tn, input_form = self.tn, self.input_form
# make sure inputs are ready
# TODO: use stream_wait_event to establish stream order is better
cp.cuda.Device().synchronize()
if mempool:
mr = MemoryResourceFactory(mempool)
handler = mr.get_dev_mem_handler()
cutn.set_device_mem_handler(handle, handler)
workspace_hint = 32*1024**2 # large enough for our test cases
# we have to run this API in any case in order to create a path
cutn.contraction_optimize(
handle, dscr, config, workspace_hint, info)
# for simplicity, compute grads for all tensors
if contract == "gradient":
if self.qual is None:
# set up the grad flag via TN attributes instead of input qualifiers
tensor_id_range = np.arange(len(tn.input_tensors), dtype=np.int32)
net_attr_dtype = cutn.network_get_attribute_dtype(
cutn.NetworkAttribute.INPUT_TENSORS_REQUIRE_GRAD)
tensor_ids = np.zeros(1, dtype=net_attr_dtype)
tensor_ids['num_tensors'] = tensor_id_range.size
tensor_ids['data'] = tensor_id_range.ctypes.data
cutn.network_set_attribute(
handle, dscr, cutn.NetworkAttribute.INPUT_TENSORS_REQUIRE_GRAD,
tensor_ids.ctypes.data, tensor_ids.dtype.itemsize)
# round-trip
tensor_id_range_back = np.zeros_like(tensor_id_range)
tensor_ids['num_tensors'] = tensor_id_range_back.size
tensor_ids['data'] = tensor_id_range_back.ctypes.data
cutn.network_get_attribute(
handle, dscr, cutn.NetworkAttribute.INPUT_TENSORS_REQUIRE_GRAD,
tensor_ids.ctypes.data, tensor_ids.dtype.itemsize)
assert (tensor_id_range_back == tensor_id_range).all()
output_grads = cp.ones_like(tn.output_tensor)
# manage workspace
placeholder = []
if mempool is None:
cutn.workspace_compute_contraction_sizes(handle, dscr, info, workspace)
for kind in cutn.WorkspaceKind: # for both scratch & cache
required_size = cutn.workspace_get_memory_size(
handle, workspace,
getattr(cutn.WorksizePref, f"{workspace_pref.upper()}"),
cutn.Memspace.DEVICE, # TODO: parametrize memspace?
kind)
if contract != "gradient":
cutn.workspace_compute_sizes(handle, dscr, info, workspace)
required_size_deprecated = cutn.workspace_get_memory_size(
handle, workspace,
getattr(cutn.WorksizePref, f"{workspace_pref.upper()}"),
cutn.Memspace.DEVICE, # TODO: parametrize memspace?
kind)
assert required_size == required_size_deprecated
if workspace_hint < required_size:
assert False, \
f"wrong assumption on the workspace size " \
f"(given: {workspace_hint}, needed: {required_size})"
if required_size > 0:
workspace_ptr = cp.cuda.alloc(required_size)
cutn.workspace_set_memory(
handle, workspace,
cutn.Memspace.DEVICE,
kind,
workspace_ptr.ptr, required_size)
placeholder.append(workspace_ptr) # keep it alive
# round-trip check
assert ((workspace_ptr.ptr, required_size) ==
cutn.workspace_get_memory(handle, workspace,
cutn.Memspace.DEVICE, kind))
else:
for kind in cutn.WorkspaceKind:
cutn.workspace_set_memory(
handle, workspace,
cutn.Memspace.DEVICE,
kind,
0, -1) # TODO: check custom workspace size?
plan = None
try:
plan = cutn.create_contraction_plan(
handle, dscr, info, workspace)
if autotune:
cutn.contraction_autotune(
handle, plan,
tn.get_input_tensors(**input_form),
tn.get_output_tensor(),
workspace, pref, stream.ptr)
# we don't care about correctness here, so just contract 1 slice
# TODO(leofang): check correctness?
if contract == "legacy":
cutn.contraction(
handle, plan,
tn.get_input_tensors(**input_form),
tn.get_output_tensor(),
workspace, 0, stream.ptr)
elif contract in ("slice_group", "gradient"):
accumulate = 0
cutn.contract_slices(
handle, plan,
tn.get_input_tensors(**input_form),
tn.get_output_tensor(),
accumulate,
workspace, self.slice_group, stream.ptr)
if contract == "gradient":
cutn.compute_gradients_backward(
handle, plan,
tn.get_input_tensors(**input_form),
output_grads.data.ptr,
tn.get_gradient_tensors(**input_form),
accumulate, workspace, stream.ptr)
stream.synchronize()
finally:
if plan is not None:
cutn.destroy_contraction_plan(plan)
if contract == "gradient" and torch:
if not torch.cuda.is_available():
# copy data back to CPU
dev = "cpu"
func = cp.asnumpy
else:
# zero-copy from CuPy to PyTorch!
dev = "cuda"
func = (lambda x: x) # no op
inputs = [torch.as_tensor(func(t), device=dev)
for t in tn.input_tensors]
output_grads = torch.as_tensor(func(output_grads), device=dev)
for t in inputs:
t.requires_grad_(True)
assert t.grad is None
# repeat the same calculation with PyTorch so that it fills up the
# gradients for us to do verification
out = torch.einsum(tn.einsum, *inputs)
out.backward(output_grads)
# compare gradients
for grad_cutn, in_torch in zip(tn.gradients, inputs):
grad_torch = in_torch.grad
if torch.is_complex(grad_torch):
grad_torch = grad_torch.conj().resolve_conj()
# zero-copy if on GPU
assert cp.allclose(grad_cutn, cp.asarray(grad_torch))
@pytest.mark.parametrize(
'source', ('int', 'seq', 'range')
)
class TestSliceGroup:
@manage_resource('handle')
def test_slice_group(self, source):
# we don't do a simple round-trip test here because there are two
# flavors of constructors
if source == "int":
ids = np.arange(10, dtype=np.int64)
slice_group = cutn.create_slice_group_from_ids(
self.handle, ids.ctypes.data, ids.size)
elif source == "seq":
ids = np.arange(10, dtype=np.int64)
slice_group = cutn.create_slice_group_from_ids(
self.handle, ids, ids.size)
elif source == "range":
slice_group = cutn.create_slice_group_from_id_range(
self.handle, 0, 10, 1)
cutn.destroy_slice_group(slice_group)
# TODO: add more different memory sources
@pytest.mark.parametrize(
'source', (None, "py-callable", 'cffi', 'cffi_struct')
)
class TestMemHandler(MemHandlerTestBase):
mod = cutn
prefix = "cutensornet"
error = cutn.cuTensorNetError
@manage_resource('handle')
def test_set_get_device_mem_handler(self, source):
self._test_set_get_device_mem_handler(source, self.handle)
class TensorDecompositionFactory:
# QR/SVD Example: "ab->ax,xb"
# Gate Example: "ijk,klm,jkpq->->ipk,kqm"
# This factory CANNOT be reused; once a tensor descriptor uses it, it must
# be discarded.
def __init__(self, subscript, shapes, dtype, max_extent=None):
self.subscript = subscript
if len(shapes) not in [1, 3]:
raise NotImplementedError
modes_in, left_modes, right_modes, shared_mode = approxTN_utils.parse_split_expression(subscript)
modes_in = modes_in.split(",")
size_dict = dict()
for modes, shape in zip(modes_in, shapes):
for mode, extent in zip(modes, shape):
if mode in size_dict:
assert size_dict[mode] == extent
else:
size_dict[mode] = extent
_, left_modes_out, right_modes_out, shared_mode_out, _, mid_extent = approxTN_utils.parse_modes_extents(size_dict, subscript)
# Note: we need to parse options as this is where max_extent is specified
self.shared_mode_idx_left = left_modes_out.find(shared_mode_out)
self.shared_mode_idx_right = right_modes_out.find(shared_mode_out)
if max_extent is None:
# no truncation on extent
self.mid_extent = mid_extent
else:
assert max_extent > 0
self.mid_extent = min(mid_extent, max_extent)
self.tensor_names = [f"input_{i}" for i in range(len(shapes))] + ["left", "right"] # note s needs to be explictly managed in the tester function
# xp strides in bytes, cutn strides in counts
dtype = cp.dtype(dtype)
real_dtype = dtype.char.lower()
is_complex = dtype.char != real_dtype
itemsize = dtype.itemsize
def _get_tensor(name, modes):
if name.startswith('input'):
shape = [size_dict[mode] for mode in modes]
if is_complex: # complex
arr = (cp.random.random(shape, dtype=real_dtype)
+ 1j*cp.random.random(shape, dtype=real_dtype)).astype(dtype)
else:
arr = cp.random.random(shape, dtype=dtype)
else:
shape = [self.mid_extent if mode == shared_mode_out else size_dict[mode] for mode in modes]
arr = cp.empty(shape, dtype=dtype, order='F')
return arr
for name, modes in zip(self.tensor_names, modes_in + [left_modes_out, right_modes_out]):
arr = _get_tensor(name, modes)
setattr(self, f'{name}_tensor', arr)
setattr(self, f'{name}_n_modes', len(arr.shape))
setattr(self, f'{name}_extent', arr.shape)
setattr(self, f'{name}_stride', [stride // itemsize for stride in arr.strides])
setattr(self, f'{name}_mode', tuple([ord(m) for m in modes]))
def _get_data_type(self, category):
if 'n_modes' in category:
return np.int32
elif 'extent' in category:
return np.int64
elif 'stride' in category:
return np.int64
elif 'mode' in category:
return np.int32
elif 'tensor' in category:
return None # unused
else:
assert False
def _return_data(self, category, return_value):
data = getattr(self, category)
if return_value == 'int':
if len(data) == 0:
# empty, give it a NULL
return 0
else:
# return int as void*
data = np.asarray(data, dtype=self._get_data_type(category))
setattr(self, category, data) # keep data alive
return data.ctypes.data
elif return_value == 'seq':
return data
else:
assert False
def get_tensor_metadata(self, name, **kwargs):
assert name in self.tensor_names
n_modes = getattr(self, f'{name}_n_modes')
extent = self._return_data(f'{name}_extent', kwargs.pop('extent'))
stride = self._return_data(f'{name}_stride', kwargs.pop('stride'))
mode = self._return_data(f'{name}_mode', kwargs.pop('mode'))
return n_modes, extent, stride, mode
def get_tensor_ptr(self, name):
return getattr(self, f'{name}_tensor').data.ptr
def get_operands(self, include_inputs=True, include_outputs=True):
operands = []
for name in self.tensor_names:
if include_inputs and name.startswith('input'):
operands.append(getattr(self, f'{name}_tensor'))
elif include_outputs and not name.startswith('input'):
operands.append(getattr(self, f'{name}_tensor'))
return operands
@testing.parameterize(*testing.product({
'tn': tensor_decomp_expressions,
'dtype': (
np.float32, np.float64, np.complex64, np.complex128
),
'tensor_form': (
{'extent': 'int', 'stride': 'int', 'mode': 'int'},
{'extent': 'seq', 'stride': 'seq', 'mode': 'seq'},
),
}))
class TestTensorQR:
# There is no easy way for us to test each API independently, so we instead
# parametrize the steps and test the whole workflow
@manage_resource('handle')
@manage_resource('tensor_decom')
@manage_resource('workspace')
def test_tensor_qr(self):
# unpack
handle, tn, workspace = self.handle, self.tn, self.workspace
tensor_in, tensor_q, tensor_r = self.tensor_decom
dtype = cp.dtype(self.dtype)
# prepare workspace
cutn.workspace_compute_qr_sizes(
handle, tensor_in, tensor_q, tensor_r, workspace)
# for now host workspace is always 0, so just query device one
# also, it doesn't matter which one (min/recommended/max) is queried
required_size = cutn.workspace_get_memory_size(
handle, workspace, cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE, # TODO: parametrize memspace?
cutn.WorkspaceKind.SCRATCH)
if required_size > 0:
workspace_ptr = cp.cuda.alloc(required_size)
cutn.workspace_set_memory(
handle, workspace, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH,
workspace_ptr.ptr, required_size)
# round-trip check
assert (workspace_ptr.ptr, required_size) == cutn.workspace_get_memory(
handle, workspace, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
# perform QR
stream = cp.cuda.get_current_stream().ptr # TODO
cutn.tensor_qr(
handle, tensor_in, tn.get_tensor_ptr('input_0'),
tensor_q, tn.get_tensor_ptr('left'),
tensor_r, tn.get_tensor_ptr('right'),
workspace, stream)
# for QR, no need to compute the reference for correctness check
operands = tn.get_operands(include_inputs=True, include_outputs=True) # input, q, r
assert approxTN_utils.verify_split_QR(tn.subscript, *operands, None, None)
@testing.parameterize(*testing.product({
'tn': tensor_decomp_expressions,
'dtype': (
np.float32, np.float64, np.complex64, np.complex128
),
'tensor_form': (
{'extent': 'int', 'stride': 'int', 'mode': 'int'},
{'extent': 'seq', 'stride': 'seq', 'mode': 'seq'},
),
'options': (
{}, # standard exact svd
{'max_extent': 4, 'normalization':'L1', 'partition':'U', 'algorithm': 'gesvdr', 'gesvdr_niters': 40}, # fix extent truncation
{'abs_cutoff': 0.1, 'rel_cutoff': 0.1, 'algorithm': 'gesvdj', 'gesvdj_tol':1e-14, 'gesvdj_max_sweeps': 80}, # value based truncation
{'abs_cutoff': 0.1, 'normalization':'L2', 'partition':'V', 'algorithm': 'gesvdj'}, # absolute value based truncation
{'rel_cutoff': 0.1, 'normalization':'LInf', 'partition':'UV', 'algorithm': 'gesvdp'}, # relative value based truncation
{'max_extent': 4, 'abs_cutoff': 0.1, 'rel_cutoff': 0.1, 'normalization':'L1', 'partition':'UV', 'algorithm': 'gesvdp'}, # compound truncation
),
}))
class TestTensorSVD:
# There is no easy way for us to test each API independently, so we instead
# parametrize the steps and test the whole workflow
@manage_resource('handle')
@manage_resource('tensor_decom')
@manage_resource('svd_config')
@manage_resource('svd_info')
@manage_resource('workspace')
def test_tensor_svd(self):
# unpack
handle, tn, workspace = self.handle, self.tn, self.workspace
tensor_in, tensor_u, tensor_v = self.tensor_decom
svd_config, svd_info = self.svd_config, self.svd_info
dtype = cp.dtype(self.dtype)
# parse svdConfig
svd_method = check_or_create_options(tensor.SVDMethod, self.options, "SVDMethod")
parse_svd_config(handle, svd_config, svd_method, logger=None)
# prepare workspace
cutn.workspace_compute_svd_sizes(
handle, tensor_in, tensor_u, tensor_v, svd_config, workspace)
# for now host workspace is always 0, so just query device one
# also, it doesn't matter which one (min/recommended/max) is queried
workspaces = {}
allocators = {cutn.Memspace.DEVICE: cp.cuda.alloc,
cutn.Memspace.HOST: lambda nbytes: np.empty(nbytes, dtype=np.int8)}
for mem_space, allocator in allocators.items():
required_size = cutn.workspace_get_memory_size(
handle, workspace, cutn.WorksizePref.MIN,
mem_space,
cutn.WorkspaceKind.SCRATCH)
if required_size > 0:
workspaces[mem_space] = workspace_ptr = allocator(required_size) # keep alive
if mem_space == cutn.Memspace.DEVICE:
workspace_ptr_address = workspace_ptr.ptr
else:
workspace_ptr_address = workspace_ptr.ctypes.data
cutn.workspace_set_memory(
handle, workspace, mem_space, cutn.WorkspaceKind.SCRATCH,
workspace_ptr_address, required_size)
# round-trip check
assert (workspace_ptr_address, required_size) == cutn.workspace_get_memory(
handle, workspace, mem_space, cutn.WorkspaceKind.SCRATCH)
partition = self.options.get("partition", None)
if partition is None:
s = cp.empty(tn.mid_extent, dtype=dtype.char.lower())
s_ptr = s.data.ptr
else:
s = None
s_ptr = 0
# perform SVD
stream = cp.cuda.get_current_stream().ptr # TODO
cutn.tensor_svd(
handle, tensor_in, tn.get_tensor_ptr('input_0'),
tensor_u, tn.get_tensor_ptr('left'),
s_ptr,
tensor_v, tn.get_tensor_ptr('right'),
svd_config, svd_info, workspace, stream)
# get runtime truncation details
info = get_svd_info_dict(handle, svd_info)
T, u, v = tn.get_operands(include_inputs=True, include_outputs=True)
# update the container if reduced extent if found to be different from specified mid extent
extent_U_out, strides_U_out = cutn.get_tensor_details(handle, tensor_u)[2:]
extent_V_out, strides_V_out = cutn.get_tensor_details(handle, tensor_v)[2:]
reduced_extent = info['reduced_extent']
assert extent_U_out[tn.shared_mode_idx_left] == reduced_extent
assert extent_V_out[tn.shared_mode_idx_right] == reduced_extent
if tuple(extent_U_out) != u.shape:
strides_U_out = [i * u.itemsize for i in strides_U_out]
strides_V_out = [i * v.itemsize for i in strides_V_out]
tn.left_tensor = u = cp.ndarray(extent_U_out, dtype=u.dtype, memptr=u.data, strides=strides_U_out)
if s is not None:
s = cp.ndarray(reduced_extent, dtype=s.dtype, memptr=s.data, order='F')
tn.right_tensor = v = cp.ndarray(extent_V_out, dtype=v.dtype, memptr=v.data, strides=strides_V_out)
u_ref, s_ref, v_ref, info_ref = approxTN_utils.tensor_decompose(
tn.subscript, T,
method='svd', return_info=True,
**self.options)
assert approxTN_utils.verify_split_SVD(
tn.subscript, T,
tn.left_tensor, s, tn.right_tensor,
u_ref, s_ref, v_ref,
info=info, info_ref=info_ref,
**self.options)
@testing.parameterize(*testing.product({
'tn': gate_decomp_expressions,
'dtype': (
np.float32, np.float64, np.complex64, np.complex128
),
'tensor_form': (
{'extent': 'int', 'stride': 'int', 'mode': 'int'},
{'extent': 'seq', 'stride': 'seq', 'mode': 'seq'},
),
'algo': (
"direct", "reduced"
),
'options': (
{}, # standard exact svd
{'max_extent': 4, 'normalization':'L1', 'partition':'U', 'algorithm': 'gesvdr', 'gesvdr_niters': 40}, # fix extent truncation
{'abs_cutoff': 0.1, 'rel_cutoff': 0.1, 'algorithm': 'gesvdj', 'gesvdj_tol':1e-14, 'gesvdj_max_sweeps': 80}, # value based truncation
{'abs_cutoff': 0.1, 'normalization':'L2', 'partition':'V', 'algorithm': 'gesvdj'}, # absolute value based truncation
{'rel_cutoff': 0.1, 'normalization':'LInf', 'partition':'UV', 'algorithm': 'gesvdp'}, # relative value based truncation
{'max_extent': 4, 'abs_cutoff': 0.1, 'rel_cutoff': 0.1, 'normalization':'L1', 'partition':'UV', 'algorithm': 'gesvdp'}, # compound truncation
),
}))
class TestTensorGate:
GATE_ALGO_MAP = {"direct": cutn.GateSplitAlgo.DIRECT,
"reduced": cutn.GateSplitAlgo.REDUCED}
# There is no easy way for us to test each API independently, so we instead
# parametrize the steps and test the whole workflow
@manage_resource('handle')
@manage_resource('tensor_decom')
@manage_resource('svd_config')
@manage_resource('svd_info')
@manage_resource('workspace')
def test_gate_split(self):
# unpack
handle, tn, workspace = self.handle, self.tn, self.workspace
tensor_in_a, tensor_in_b, tensor_in_g, tensor_u, tensor_v = self.tensor_decom
algo = self.algo
gate_algorithm = self.GATE_ALGO_MAP[algo]
svd_config, svd_info = self.svd_config, self.svd_info
# parse svdConfig
svd_method = check_or_create_options(tensor.SVDMethod, self.options, "SVDMethod")
parse_svd_config(handle, svd_config, svd_method, logger=None)
dtype = cp.dtype(self.dtype)
compute_type = dtype_to_compute_type[self.dtype]
# prepare workspace
cutn.workspace_compute_gate_split_sizes(handle,
tensor_in_a, tensor_in_b, tensor_in_g, tensor_u, tensor_v,
gate_algorithm, svd_config, compute_type, workspace)
workspaces = {}
allocators = {cutn.Memspace.DEVICE: cp.cuda.alloc,
cutn.Memspace.HOST: lambda nbytes: np.empty(nbytes, dtype=np.int8)}
for mem_space, allocator in allocators.items():
required_size = cutn.workspace_get_memory_size(
handle, workspace, cutn.WorksizePref.MIN,
mem_space,
cutn.WorkspaceKind.SCRATCH)
if required_size > 0:
workspaces[mem_space] = workspace_ptr = allocator(required_size) # keep alive
if mem_space == cutn.Memspace.DEVICE:
workspace_ptr_address = workspace_ptr.ptr
else:
workspace_ptr_address = workspace_ptr.ctypes.data
cutn.workspace_set_memory(
handle, workspace, mem_space, cutn.WorkspaceKind.SCRATCH,
workspace_ptr_address, required_size)
# round-trip check
assert (workspace_ptr_address, required_size) == cutn.workspace_get_memory(
handle, workspace, mem_space, cutn.WorkspaceKind.SCRATCH)
partition = self.options.get("partition", None)
if partition is None:
s = cp.empty(tn.mid_extent, dtype=dtype.char.lower())
s_ptr = s.data.ptr
else:
s = None
s_ptr = 0
# perform gate split
stream = cp.cuda.get_current_stream().ptr # TODO
cutn.gate_split(handle, tensor_in_a, tn.get_tensor_ptr('input_0'),
tensor_in_b, tn.get_tensor_ptr('input_1'),
tensor_in_g, tn.get_tensor_ptr('input_2'),
tensor_u, tn.get_tensor_ptr('left'), s_ptr,
tensor_v, tn.get_tensor_ptr('right'),
gate_algorithm, svd_config, compute_type,
svd_info, workspace, stream)
# get runtime truncation information
info = get_svd_info_dict(handle, svd_info)
arr_a, arr_b, arr_gate, u, v = tn.get_operands(include_inputs=True, include_outputs=True)
# update the container if reduced extent if found to be different from specified mid extent
extent_U_out, strides_U_out = cutn.get_tensor_details(handle, tensor_u)[2:]
extent_V_out, strides_V_out = cutn.get_tensor_details(handle, tensor_v)[2:]
reduced_extent = info['reduced_extent']
assert extent_U_out[tn.shared_mode_idx_left] == reduced_extent
assert extent_V_out[tn.shared_mode_idx_right] == reduced_extent
if tuple(extent_U_out) != u.shape:
strides_U_out = [i * u.itemsize for i in strides_U_out]
strides_V_out = [i * v.itemsize for i in strides_V_out]
tn.left_tensor = u = cp.ndarray(extent_U_out, dtype=u.dtype, memptr=u.data, strides=strides_U_out)
if s is not None:
s = cp.ndarray(reduced_extent, dtype=s.dtype, memptr=s.data, order='F')
tn.right_tensor = v = cp.ndarray(extent_V_out, dtype=v.dtype, memptr=v.data, strides=strides_V_out)
u_ref, s_ref, v_ref, info_ref = approxTN_utils.gate_decompose(
tn.subscript,
arr_a,
arr_b,
arr_gate,
gate_algo=algo,
return_info=True,
**self.options)
assert approxTN_utils.verify_split_SVD(
tn.subscript, None,
u, s, v,
u_ref, s_ref, v_ref,
info=info, info_ref=info_ref,
**self.options)
class TestTensorSVDConfig:
@manage_resource('handle')
@manage_resource('svd_config')
def test_tensor_svd_config_create_destroy(self):
# simple round-trip test
pass
@pytest.mark.parametrize(
'attr', [val for val in cutn.TensorSVDConfigAttribute if val != cutn.TensorSVDConfigAttribute.ALGO_PARAMS]
)
@manage_resource('handle')
@manage_resource('svd_config')
def test_tensor_svd_config_get_set_attribute(self, attr):
handle, svd_config = self.handle, self.svd_config
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
# Hack: assume this is a valid value for all attrs
factor = np.asarray([0.8], dtype=dtype)
cutn.tensor_svd_config_set_attribute(
handle, svd_config, attr,
factor.ctypes.data, factor.dtype.itemsize)
# do a round-trip test as a sanity check
factor2 = np.zeros_like(factor)
cutn.tensor_svd_config_get_attribute(
handle, svd_config, attr,
factor2.ctypes.data, factor2.dtype.itemsize)
assert factor == factor2
@pytest.mark.parametrize(
'svd_algorithm', (cutn.TensorSVDAlgo.GESVDJ, cutn.TensorSVDAlgo.GESVDR)
)
@manage_resource('handle')
@manage_resource('svd_config')
def test_tensor_svd_config_get_set_params_attribute(self, svd_algorithm):
handle, svd_config = self.handle, self.svd_config
# set ALGO first
algo_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.ALGO)
algo = np.asarray(svd_algorithm, dtype=algo_dtype)
cutn.tensor_svd_config_set_attribute(
handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO,
algo.ctypes.data, algo.dtype.itemsize)
algo_params_dtype = cutn.tensor_svd_algo_params_get_dtype(svd_algorithm)
# Hack: assume this is a valid value for all SVD parameters
factor = np.asarray([1.8], dtype=algo_params_dtype) # 0 may trigger default behavior, eg, gesvdr_niters set to 0 means default (10)
cutn.tensor_svd_config_set_attribute(
handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS,
factor.ctypes.data, factor.dtype.itemsize)
# do a round-trip test as a sanity check
factor2 = np.zeros_like(factor)
cutn.tensor_svd_config_get_attribute(
handle, svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS,
factor2.ctypes.data, factor2.dtype.itemsize)
assert factor == factor2
@pytest.mark.skipif(mpi4py is None, reason="need mpi4py")
@pytest.mark.skipif(os.environ.get("CUTENSORNET_COMM_LIB") is None,
reason="wrapper lib not set")
class TestDistributed:
def _get_comm(self, comm):
if comm == 'world':
return MPI.COMM_WORLD.Dup()
elif comm == 'self':
return MPI.COMM_SELF.Dup()
else:
assert False
@pytest.mark.parametrize(
'comm', ('world', 'self'),
)
@manage_resource('handle')
def test_distributed(self, comm):
handle = self.handle
comm = self._get_comm(comm)
cutn.distributed_reset_configuration(
handle, *cutn.get_mpi_comm_pointer(comm))
assert comm.Get_size() == cutn.distributed_get_num_ranks(handle)
assert comm.Get_rank() == cutn.distributed_get_proc_rank(handle)
cutn.distributed_synchronize(handle)
cutn.distributed_reset_configuration(handle, 0, 0) # reset
# no need to free the comm, for world/self mpi4py does it for us...
class TestLogger(LoggerTestBase):
mod = cutn
prefix = "cutensornet"
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_cutensornet.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cuquantum
# We include torch tests here unconditionally, and use pytest deselect to
# exclude them if torch is not present.
backend_names = (
"numpy",
"cupy",
"torch-cpu",
"torch-gpu",
)
dtype_names = (
"float16",
"float32",
"float64",
"complex64",
"complex128",
)
# the expressions here should be
# - a plain einsum expression (subscript, or interleaved as a tuple)
# - for interleaved, the output modes can be explicitly given or left as None
# - a list [einsum_expr, network_options, optimizer_options, overwrite_dtype]
# the second variant is suitable for testing exotic TNs that require further customization
# TODO: expand the tests
einsum_expressions = (
"ij,jb,ah",
"ea,fb,abcd,gc,hd->efgh",
"ea,fb,abcd,gc,hd",
"ij,jk,kl->il",
"ij,jk,kl",
"ij,jk,ki",
"abc,bcd->",
"ab,bc,ca->",
"abc,ace,abd->de",
"abc,ace,abd->ade",
["...ik,...k,...kj->...ij", {}, {}, "complex128"], # SVD reconstruction
((2, 3, 4), (3, 4, 5), (2, 1), (1, 5), None),
(('a', 'b'), ('b', 'c', 'd'), ('a',)), # opt_einsum and cutensornet support this, but not NumPy et al
[((5, 4, 3), (3, 4, 6), (6, 5), None), {}, {}, "float32"],
["abc,bcd,ade", {}, {"slicing": {"min_slices": 4}}, "float64"],
# TODO: need >8 operand tests (from L0)
["ao,bp,cq,dr,es,ft,gu,hv,iw,jx,ky,lz,mA,nB,oP,pO,qN,rM,sL,tK,uJ,vI,wH,xG,yF,zE,AD,BC,DC,FC,PC,CQ,FD,ID,DR,JE,KE,PE,ES,GF,FT,LG,NG,GU,IH,JH,MH,HV,KI,IW,KJ,JX,KY,NL,OL,LZ,NM,OM,MÀ,NÁ,PO,OÂ,PÃ,RQ,TQ,ÃQ,QÄ,TR,WR,RÅ,XS,YS,ÃS,SÆ,UT,TÇ,ZU,ÁU,UÈ,WV,XV,ÀV,VÉ,YW,WÊ,YX,XË,YÌ,ÁZ,ÂZ,ZÍ,ÁÀ,ÂÀ,ÀÎ,ÁÏ,ÃÂ,ÂÐ,ÃÑ,Äß,ÅÞ,ÆÝ,ÇÜ,ÈÛ,ÉÚ,ÊÙ,ËØ,Ì×,ÍÖ,ÎÕ,ÏÔ,ÐÓ,ÑÒ->", {}, {}, "float64"], # QAOA MaxCut
["ab,bc->ac", {'compute_type': cuquantum.ComputeType.COMPUTE_64F}, {}, "complex128"],
# CuPy large TN tests
["a,b,c->abc", {}, {}, "float64"],
["acdf,jbje,gihb,hfac", {}, {}, "float64"],
["acdf,jbje,gihb,hfac,gfac,gifabc,hfac", {}, {}, "float64"],
["chd,bde,agbc,hiad,bdi,cgh,agdb", {"blocking": "auto"}, {}, "float64"],
["eb,cb,fb->cef", {}, {}, "float64"],
["dd,fb,be,cdb->cef", {}, {}, "float64"],
["bca,cdb,dbf,afc->", {}, {}, "float64"],
["dcc,fce,ea,dbf->ab", {}, {}, "float64"],
["a,ac,ab,ad,cd,bd,bc->", {}, {}, "float64"],
)
# the expression here should be
# - a sequence of [decomposition_expression, input_tensor_shapes as a list of tuple]
tensor_decomp_expressions = (
('ab->ax,xb', [(8, 8)]),
('ab->ax,bx', [(8, 8)]),
('ab->xa,xb', [(8, 8)]),
('ab->xa,bx', [(8, 8)]),
('ab->ax,xb', [(6, 8)]),
('ab->ax,bx', [(6, 8)]),
('ab->xa,xb', [(6, 8)]),
('ab->xa,bx', [(6, 8)]),
('ab->ax,xb', [(8, 6)]),
('ab->ax,bx', [(8, 6)]),
('ab->xa,xb', [(8, 6)]),
('ab->xa,bx', [(8, 6)]),
('abcd->cxa,bdx', [(2, 3, 4, 5)]),
('abcd->cax,bdx', [(2, 3, 4, 5)]),
('mnijk->jny,kmyi', [(2, 9, 3, 3, 4)]),
)
# the expression here should be
# - a sequence of [gate_decomposition_expression, input_tensor_shapes as a list of tuple]
gate_decomp_expressions = (
('ijk,klm,jlpq->ipk,kqm', [(2, 2, 2), (2, 2, 2), (2, 2, 2, 2)]),
('ijk,klm,jlpq->kpi,qmk', [(2, 2, 2), (2, 2, 2), (2, 2, 2, 2)]),
('ijk,klm,jlpq->pki,mkq', [(2, 2, 2), (2, 2, 2), (2, 2, 2, 2)]),
('sOD,DdNr,ROrsq->KR,qKdN', [(2, 4, 2), (2, 3, 4, 2), (5, 4, 2, 2, 2)]),
('beQ,cey,cbJj->Je,jQey', [(3, 5, 4), (2, 5, 7), (2, 3, 4, 4)])
)
# the expression here can be
# - a string as a standard contract and decompose expression
# - a list of [contract decompose expression, network options, optimize options, kwargs]
contract_decompose_expr = (
'ea,fb,abcd,gc,hd->exf,gxh',
'ij,jk,kl->ix,lx',
'ijk,klm,jlpq->ipk,kqm',
'abcd,cdef->axb,fex',
'abcd,cdef->axf,bex',
['sOD,DdNr,ROrsq->KR,qKdN', {'blocking': 'auto'}, {}, {}],
'beQ,cey,cbJj->Je,jQey',
'ijlm,jqr,lqsn->imx,xrsn',
['ijk,klm,jlpq->ipk,kqm', {}, {}, {'return_info': False}],
['sOD,DdNr,ROrsq->KR,qKdN', {'device_id':0}, {'slicing': {'min_slices': 4}}, {'return_info': False}],
['ea,fb,abcd,gc,hd->exf,gxh', {'device_id':0}, {'path': [(2,4), (0,3), (0,2), (0,1)]}, {}],
)
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import sys
import dataclasses
import cupy
import numpy
import opt_einsum as oe
import pytest
from cuquantum import tensor, OptimizerInfo
from cuquantum.cutensornet.experimental import contract_decompose, ContractDecomposeAlgorithm, ContractDecomposeInfo
from cuquantum.cutensornet.experimental._internal.utils import is_gate_split
from cuquantum.cutensornet._internal.decomposition_utils import DECOMPOSITION_DTYPE_NAMES, parse_decomposition
from cuquantum.cutensornet._internal.utils import infer_object_package
from .approxTN_utils import split_contract_decompose, tensor_decompose, verify_split_QR, verify_split_SVD
from .data import backend_names, contract_decompose_expr
from .test_options import _OptionsBase
from .test_utils import DecomposeFactory, deselect_contract_decompose_algorithm_tests, deselect_decompose_tests, gen_rand_svd_method
from .test_utils import get_stream_for_backend
@pytest.mark.uncollect_if(func=deselect_decompose_tests)
@pytest.mark.parametrize(
"stream", (None, True)
)
@pytest.mark.parametrize(
"order", ("C", "F")
)
@pytest.mark.parametrize(
"dtype", DECOMPOSITION_DTYPE_NAMES
)
@pytest.mark.parametrize(
"xp", backend_names
)
@pytest.mark.parametrize(
"decompose_expr", contract_decompose_expr
)
class TestContractDecompose:
def _run_contract_decompose(self, decompose_expr, xp, dtype, order, stream, algorithm):
if isinstance(decompose_expr, list):
decompose_expr, options, optimize, kwargs = decompose_expr
else:
options, optimize, kwargs = {}, {}, {}
return_info = kwargs.get('return_info', True)
kwargs['return_info'] = return_info
factory = DecomposeFactory(decompose_expr)
operands = factory.generate_operands(factory.input_shapes, xp, dtype, order)
backend = sys.modules[infer_object_package(operands[0])]
contract_expr, decomp_expr = split_contract_decompose(decompose_expr)
_, input_modes, output_modes, _, _, _, max_mid_extent= parse_decomposition(decompose_expr, *operands)
if not is_gate_split(input_modes, output_modes, algorithm):
if algorithm.qr_method is not False and algorithm.svd_method is not False: # QR assisted contract SVD decomposition
pytest.skip("QR assisted SVD decomposition not support for more than three operands")
shared_mode_out = (set(output_modes[0]) & set(output_modes[1])).pop()
shared_mode_idx_left = output_modes[0].index(shared_mode_out)
shared_mode_idx_right = output_modes[1].index(shared_mode_out)
if stream:
stream = get_stream_for_backend(backend)
outputs = contract_decompose(decompose_expr, *operands,
algorithm=algorithm, stream=stream, options=options, optimize=optimize, **kwargs)
if stream:
stream.synchronize()
#NOTE: The reference here is based on splitting the contract_decompose problem into two sub-problems
# - 1. contraction. The reference is based on opt_einsum contract
# - 2. decomposition. The reference is based on tensor_decompose in approxTN_utils
# note that a naive reference implementation here may not find the optimal reduce extent, for example:
# A[x,y] B[y,z] with input extent x=4, y=2, z=4 -> contract QR decompose -> A[x,k]B[k,z] .
# When naively applying the direct algorithm above, the mid extent k in the output will be 2.
# This case is already consider in contract_decompose. Here make following modifications for correctness testing
# For contract and QR decompose, we check the output extent is correct
# For contract and SVD decompose, we inject this mid_extent in the args to the reference implementation when needed.
intm = oe.contract(contract_expr, *operands)
if algorithm.svd_method is False:
if return_info:
q, r, info = outputs
assert isinstance(info, ContractDecomposeInfo)
else:
q, r = outputs
assert type(q) is type(r)
assert type(q) is type(operands[0])
assert q.shape[shared_mode_idx_left] == max_mid_extent
assert r.shape[shared_mode_idx_right] == max_mid_extent
assert verify_split_QR(decomp_expr, intm, q, r, None, None)
else:
svd_kwargs = dataclasses.asdict(algorithm.svd_method)
max_extent = svd_kwargs.get('max_extent')
if max_extent in [0, None] or max_extent > max_mid_extent:
svd_kwargs['max_extent'] = max_mid_extent
outputs_ref = tensor_decompose(decomp_expr, intm, method="svd", return_info=return_info, **svd_kwargs)
if return_info:
u, s, v, info = outputs
assert isinstance(info, ContractDecomposeInfo)
u_ref, s_ref, v_ref, info_ref = outputs_ref
info = info.svd_info
assert isinstance(info, tensor.SVDInfo)
info = dataclasses.asdict(info)
else:
u, s, v = outputs
u_ref, s_ref, v_ref = outputs_ref
info = info_ref = None
assert type(u) is type(v)
assert type(u) is type(operands[0])
if algorithm.svd_method.partition is None:
assert type(u) is type(s)
else:
assert s is None
assert verify_split_SVD(decomp_expr,
intm,
u, s, v,
u_ref, s_ref, v_ref,
info=info,
info_ref=info_ref,
**svd_kwargs)
def test_contract_qr_decompose(self, decompose_expr, xp, dtype, order, stream):
algorithm = ContractDecomposeAlgorithm(qr_method={}, svd_method=False)
self._run_contract_decompose(decompose_expr, xp, dtype, order, stream, algorithm)
def test_contract_svd_decompose(self, decompose_expr, xp, dtype, order, stream):
rng = numpy.random.default_rng(2021)
methods = [tensor.SVDMethod()] + [gen_rand_svd_method(rng) for _ in range(10)]
for svd_method in methods:
algorithm = ContractDecomposeAlgorithm(qr_method=False, svd_method=svd_method)
self._run_contract_decompose(decompose_expr, xp, dtype, order, stream, algorithm)
def test_contract_qr_assisted_svd_decompose(self, decompose_expr, xp, dtype, order, stream):
rng = numpy.random.default_rng(2021)
methods = [tensor.SVDMethod()] + [gen_rand_svd_method(rng) for _ in range(10)]
for svd_method in methods:
algorithm = ContractDecomposeAlgorithm(qr_method={}, svd_method=svd_method)
self._run_contract_decompose(decompose_expr, xp, dtype, order, stream, algorithm)
class TestContractDecomposeAlgorithm(_OptionsBase):
options_type = ContractDecomposeAlgorithm
@pytest.mark.uncollect_if(func=deselect_contract_decompose_algorithm_tests)
@pytest.mark.parametrize(
'svd_method', [False, {}, tensor.SVDMethod()]
)
@pytest.mark.parametrize(
'qr_method', [False, {}]
)
def test_contract_decompose_algorithm(self, qr_method, svd_method):
self.create_options({'qr_method': qr_method, 'svd_method': svd_method})
class TestContractDecomposeInfo(_OptionsBase):
options_type = ContractDecomposeInfo
# Not all fields are optional so we test them all at once
@pytest.mark.uncollect_if(func=deselect_contract_decompose_algorithm_tests)
@pytest.mark.parametrize(
'optimizer_info', [None, OptimizerInfo(largest_intermediate=100.0,
opt_cost=100.0,
path=[(0, 1), (0, 1)],
slices=[("a", 4), ("b", 3)],
num_slices=10,
intermediate_modes=[(1, 3), (2, 4)])]
)
@pytest.mark.parametrize(
'svd_info', [None, tensor.SVDInfo(reduced_extent=2, full_extent=4, discarded_weight=0.01, algorithm='gesvdj')]
)
@pytest.mark.parametrize(
'svd_method', [False, {}, tensor.SVDMethod()]
)
@pytest.mark.parametrize(
'qr_method', [False, {}]
)
def test_contract_decompose_info(self, qr_method, svd_method, svd_info, optimizer_info):
self.create_options({
"qr_method": qr_method,
"svd_method": svd_method,
"svd_info": svd_info,
"optimizer_info": optimizer_info,
})
| cuQuantum-main | python/tests/cuquantum_tests/cutensornet_tests/test_experimental.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from mpi4py import MPI
import cuquantum
from cuquantum import cutensornet as cutn
root = 0
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
if rank == root:
print("*** Printing is done only from the root process to prevent jumbled messages ***")
print(f"The number of processes is {size}")
num_devices = cp.cuda.runtime.getDeviceCount()
device_id = rank % num_devices
dev = cp.cuda.Device(device_id)
dev.use()
props = cp.cuda.runtime.getDeviceProperties(dev.id)
if rank == root:
print("cuTensorNet-vers:", cutn.get_version())
print("===== root process device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
######################################################################################
# Computing: R_{k,l} = A_{a,b,c,d,e,f} B_{b,g,h,e,i,j} C_{m,a,g,f,i,k} D_{l,c,h,d,j,m}
######################################################################################
if rank == root:
print("Include headers and define data types.")
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
num_inputs = 4
# Create an array of modes
modes_A = [ord(c) for c in ('a','b','c','d','e','f')]
modes_B = [ord(c) for c in ('b','g','h','e','i','j')]
modes_C = [ord(c) for c in ('m','a','g','f','i','k')]
modes_D = [ord(c) for c in ('l','c','h','d','j','m')]
modes_R = [ord(c) for c in ('k','l')]
# Create an array of extents (shapes) for each tensor
dim = 8
extent_A = (dim,) * 6
extent_B = (dim,) * 6
extent_C = (dim,) * 6
extent_D = (dim,) * 6
extent_R = (dim,) * 2
if rank == root:
print("Define network, modes, and extents.")
#################
# Initialize data
#################
if rank == root:
A = np.random.random(np.prod(extent_A)).astype(np.float32)
B = np.random.random(np.prod(extent_B)).astype(np.float32)
C = np.random.random(np.prod(extent_C)).astype(np.float32)
D = np.random.random(np.prod(extent_D)).astype(np.float32)
else:
A = np.empty(np.prod(extent_A), dtype=np.float32)
B = np.empty(np.prod(extent_B), dtype=np.float32)
C = np.empty(np.prod(extent_C), dtype=np.float32)
D = np.empty(np.prod(extent_D), dtype=np.float32)
comm.Bcast(A, root)
comm.Bcast(B, root)
comm.Bcast(C, root)
comm.Bcast(D, root)
A_d = cp.asarray(A)
B_d = cp.asarray(B)
C_d = cp.asarray(C)
D_d = cp.asarray(D)
R_d = cp.empty(np.prod(extent_R), dtype=np.float32)
raw_data_in_d = (A_d.data.ptr, B_d.data.ptr, C_d.data.ptr, D_d.data.ptr)
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A = len(modes_A)
nmode_B = len(modes_B)
nmode_C = len(modes_C)
nmode_D = len(modes_D)
nmode_R = len(modes_R)
###############################
# Create Contraction Descriptor
###############################
modes_in = (modes_A, modes_B, modes_C, modes_D)
extents_in = (extent_A, extent_B, extent_C, extent_D)
num_modes_in = (nmode_A, nmode_B, nmode_C, nmode_D)
# Strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides_in = (0, 0, 0, 0)
# Set up the tensor qualifiers for all input tensors
qualifiers_in = np.zeros(num_inputs, dtype=cutn.tensor_qualifiers_dtype)
# Set up tensor network
desc_net = cutn.create_network_descriptor(handle,
num_inputs, num_modes_in, extents_in, strides_in, modes_in, qualifiers_in, # inputs
nmode_R, extent_R, 0, modes_R, # output
data_type, compute_type)
if rank == root:
print("Initialize the cuTensorNet library and create a network descriptor.")
#####################################################
# Choose workspace limit based on available resources
#####################################################
free_mem, total_mem = dev.mem_info
free_mem = comm.allreduce(free_mem, MPI.MIN)
workspace_limit = int(free_mem * 0.9)
cutn_comm = comm.Dup()
cutn.distributed_reset_configuration(handle, MPI._addressof(cutn_comm), MPI._sizeof(cutn_comm))
if rank == root:
print("Reset distributed MPI configuration")
##############################################
# Find "optimal" contraction order and slicing
##############################################
optimizer_config = cutn.create_contraction_optimizer_config(handle)
optimizer_info = cutn.create_contraction_optimizer_info(handle, desc_net)
# Force slicing
min_slices_dtype = cutn.contraction_optimizer_config_get_attribute_dtype(
cutn.ContractionOptimizerConfigAttribute.SLICER_MIN_SLICES)
min_slices_factor = np.asarray((size,), dtype=min_slices_dtype)
cutn.contraction_optimizer_config_set_attribute(
handle, optimizer_config, cutn.ContractionOptimizerConfigAttribute.SLICER_MIN_SLICES,
min_slices_factor.ctypes.data, min_slices_factor.dtype.itemsize)
cutn.contraction_optimize(
handle, desc_net, optimizer_config, workspace_limit, optimizer_info)
num_slices_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.NUM_SLICES)
num_slices = np.zeros((1,), dtype=num_slices_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.NUM_SLICES,
num_slices.ctypes.data, num_slices.dtype.itemsize)
num_slices = int(num_slices)
assert num_slices > 0
if rank == root:
print("Find an optimized contraction path with cuTensorNet optimizer.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_contraction_sizes(handle, desc_net, optimizer_info, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH)
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
if rank == root:
print("Allocate workspace.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
plan = cutn.create_contraction_plan(handle, desc_net, optimizer_info, work_desc)
###################################################################################
# Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
###################################################################################
pref = cutn.create_contraction_autotune_preference(handle)
num_autotuning_iterations = 5 # may be 0
n_iter_dtype = cutn.contraction_autotune_preference_get_attribute_dtype(
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS)
num_autotuning_iterations = np.asarray([num_autotuning_iterations], dtype=n_iter_dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref,
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS,
num_autotuning_iterations.ctypes.data, num_autotuning_iterations.dtype.itemsize)
# modify the plan again to find the best pair-wise contractions
cutn.contraction_autotune(
handle, plan, raw_data_in_d, R_d.data.ptr,
work_desc, pref, stream.ptr)
cutn.destroy_contraction_autotune_preference(pref)
if rank == root:
print("Create a contraction plan for cuTENSOR and optionally auto-tune it.")
###########
# Execution
###########
minTimeCUTENSOR = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
slice_group = cutn.create_slice_group_from_id_range(handle, 0, num_slices, 1)
for i in range(num_runs):
# Contract over all slices.
# A user may choose to parallelize over the slices across multiple devices.
e1.record()
cutn.contract_slices(
handle, plan, raw_data_in_d, R_d.data.ptr, False,
work_desc, slice_group, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) / 1000 # ms -> s
minTimeCUTENSOR = minTimeCUTENSOR if minTimeCUTENSOR < time else time
if rank == root:
print("Contract the network, each slice uses the same contraction plan.")
# free up the workspace
del work
# Compute the reference result.
if rank == root:
# recall that we set strides to null (0), so the data are in F-contiguous layout
A_d = A_d.reshape(extent_A, order='F')
B_d = B_d.reshape(extent_B, order='F')
C_d = C_d.reshape(extent_C, order='F')
D_d = D_d.reshape(extent_D, order='F')
R_d = R_d.reshape(extent_R, order='F')
path, _ = cuquantum.einsum_path("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d)
out = cp.einsum("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d, optimize=path)
if not cp.allclose(out, R_d):
raise RuntimeError("result is incorrect")
print("Check cuTensorNet result against that of cupy.einsum().")
#######################################################
flops_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT)
flops = np.zeros((1,), dtype=flops_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
flops.ctypes.data, flops.dtype.itemsize)
flops = float(flops)
if rank == root:
print(f"num_slices: {num_slices}")
print(f"{minTimeCUTENSOR * 1000 / num_slices} ms / slice")
print(f"{flops / 1e9 / minTimeCUTENSOR} GFLOPS/s")
cutn.destroy_slice_group(slice_group)
cutn.destroy_contraction_plan(plan)
cutn.destroy_contraction_optimizer_info(optimizer_info)
cutn.destroy_contraction_optimizer_config(optimizer_config)
cutn.destroy_network_descriptor(desc_net)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
if rank == root:
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/tensornet_example_mpi_auto.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
############################################################################################
# Computing: O_{a,m} = A_{a,b,c,d} B_{b,c,d,e} C_{e,f,g,h} D_{g,h,i,j} E_{i,j,k,l} F_{k,l,m}
############################################################################################
print("Include headers and define data types.")
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
num_inputs = 6
# Create an array of modes
modes_A = [ord(c) for c in ('a','b','c','d')]
modes_B = [ord(c) for c in ('b','c','d','e')]
modes_C = [ord(c) for c in ('e','f','g','h')]
modes_D = [ord(c) for c in ('g','h','i','j')]
modes_E = [ord(c) for c in ('i','j','k','l')]
modes_F = [ord(c) for c in ('k','l','m')]
modes_O = [ord(c) for c in ('a','m')]
# Create an array of extents (shapes) for each tensor
dim = 8
extent_A = (dim,) * 4
extent_B = (dim,) * 4
extent_C = (dim,) * 4
extent_D = (dim,) * 4
extent_E = (dim,) * 4
extent_F = (dim,) * 3
extent_O = (dim,) * 2
print("Define network, modes, and extents.")
#################
# Initialize data
#################
A_d = cp.random.random((np.prod(extent_A),), dtype=np.float32)
B_d = cp.random.random((np.prod(extent_B),), dtype=np.float32)
C_d = cp.random.random((np.prod(extent_C),), dtype=np.float32)
D_d = cp.random.random((np.prod(extent_D),), dtype=np.float32)
E_d = cp.random.random((np.prod(extent_E),), dtype=np.float32)
F_d = cp.random.random((np.prod(extent_F),), dtype=np.float32)
O_d = cp.zeros((np.prod(extent_O),), dtype=np.float32)
raw_data_in_d = (A_d.data.ptr, B_d.data.ptr, C_d.data.ptr, D_d.data.ptr, E_d.data.ptr, F_d.data.ptr)
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A = len(modes_A)
nmode_B = len(modes_B)
nmode_C = len(modes_C)
nmode_D = len(modes_D)
nmode_E = len(modes_E)
nmode_F = len(modes_F)
nmode_O = len(modes_O)
###############################
# Create Contraction Descriptor
###############################
modes_in = (modes_A, modes_B, modes_C, modes_D, modes_E, modes_F)
extents_in = (extent_A, extent_B, extent_C, extent_D, extent_E, extent_F)
num_modes_in = (nmode_A, nmode_B, nmode_C, nmode_D, nmode_E, nmode_F)
# Strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides_in = (0, 0, 0, 0, 0, 0)
# Set up the tensor qualifiers for all input tensors
qualifiers_in = np.zeros(num_inputs, dtype=cutn.tensor_qualifiers_dtype)
for i in range(5):
qualifiers_in[i]['is_constant'] = True
# Set up tensor network
desc_net = cutn.create_network_descriptor(handle,
num_inputs, num_modes_in, extents_in, strides_in, modes_in, qualifiers_in, # inputs
nmode_O, extent_O, 0, modes_O, # output
data_type, compute_type)
print("Initialize the cuTensorNet library and create a network descriptor.")
#####################################################
# Choose workspace limit based on available resources
#####################################################
free_mem, total_mem = dev.mem_info
workspace_limit = int(free_mem * 0.9)
##############################################
# Set contraction order and slicing
##############################################
optimizer_info = cutn.create_contraction_optimizer_info(handle, desc_net)
path_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(cutn.ContractionOptimizerInfoAttribute.PATH)
path = np.asarray([(0, 1), (0, 4), (0, 3), (0, 2), (0, 1)], dtype=np.int32)
path_obj = np.zeros((1,), dtype=path_dtype)
path_obj["num_contractions"] = num_inputs - 1
path_obj["data"] = path.ctypes.data
cutn.contraction_optimizer_info_set_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.PATH,
path_obj.ctypes.data, path_obj.dtype.itemsize)
num_slices_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.NUM_SLICES)
num_slices = np.zeros((1,), dtype=num_slices_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.NUM_SLICES,
num_slices.ctypes.data, num_slices.dtype.itemsize)
num_slices = int(num_slices)
assert num_slices > 0
print("Set contraction path into cuTensorNet optimizer.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_contraction_sizes(handle, desc_net, optimizer_info, work_desc)
required_scratch_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH)
work_scratch = cp.cuda.alloc(required_scratch_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work_scratch.ptr, required_scratch_workspace_size)
required_cache_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.CACHE)
work_cache = cp.cuda.alloc(required_cache_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.CACHE,
work_cache.ptr, required_cache_workspace_size)
plan = cutn.create_contraction_plan(handle, desc_net, optimizer_info, work_desc)
###################################################################################
# Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
###################################################################################
pref = cutn.create_contraction_autotune_preference(handle)
num_autotuning_iterations = 5 # may be 0
n_iter_dtype = cutn.contraction_autotune_preference_get_attribute_dtype(
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS)
num_autotuning_iterations = np.asarray([num_autotuning_iterations], dtype=n_iter_dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref,
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS,
num_autotuning_iterations.ctypes.data, num_autotuning_iterations.dtype.itemsize)
# Modify the plan again to find the best pair-wise contractions
cutn.contraction_autotune(
handle, plan, raw_data_in_d, O_d.data.ptr,
work_desc, pref, stream.ptr)
cutn.destroy_contraction_autotune_preference(pref)
print("Create a contraction plan for cuTENSOR and optionally auto-tune it.")
###########
# Execution
###########
minTimeCUTENSORNET = 1e100
firstTimeCUTENSORNET = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
slice_group = cutn.create_slice_group_from_id_range(handle, 0, num_slices, 1)
for i in range(num_runs):
# Contract over all slices.
# A user may choose to parallelize over the slices across multiple devices.
e1.record()
cutn.contract_slices(
handle, plan, raw_data_in_d, O_d.data.ptr, False,
work_desc, slice_group, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) / 1000 # ms -> s
minTimeCUTENSORNET = minTimeCUTENSORNET if minTimeCUTENSORNET < time else time
firstTimeCUTENSORNET = firstTimeCUTENSORNET if i > 0 else time
print("Contract the network, each slice uses the same contraction plan.")
# free up the workspace
del work_scratch
del work_cache
# Recall that we set strides to null (0), so the data are in F-contiguous layout
A_d = A_d.reshape(extent_A, order='F')
B_d = B_d.reshape(extent_B, order='F')
C_d = C_d.reshape(extent_C, order='F')
D_d = D_d.reshape(extent_D, order='F')
E_d = E_d.reshape(extent_E, order='F')
F_d = F_d.reshape(extent_F, order='F')
O_d = O_d.reshape(extent_O, order='F')
# Compute the reference using cupy.einsum with the same path
path = ['einsum_path'] + path.tolist()
out = cp.einsum("abcd,bcde,efgh,ghij,ijkl,klm->am", A_d, B_d, C_d, D_d, E_d, F_d, optimize=path)
if not cp.allclose(out, O_d):
raise RuntimeError("result is incorrect")
print("Check cuTensorNet result against that of cupy.einsum().")
#######################################################
flops_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT)
flops = np.zeros((1,), dtype=flops_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
flops.ctypes.data, flops.dtype.itemsize)
flops = float(flops)
print(f"num_slices: {num_slices}")
print(f"First run (intermediate tensors get cached): {firstTimeCUTENSORNET * 1000 / num_slices} ms / slice")
print(f"Subsequent run (cache reused): {minTimeCUTENSORNET * 1000 / num_slices} ms / slice")
print(f"{flops / 1e9 / minTimeCUTENSORNET} GFLOPS/s")
cutn.destroy_slice_group(slice_group)
cutn.destroy_contraction_plan(plan)
cutn.destroy_contraction_optimizer_info(optimizer_info)
cutn.destroy_network_descriptor(desc_net)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/tensornet_example_reuse.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
######################################################################################
# Computing: R_{k,l} = A_{a,b,c,d,e,f} B_{b,g,h,e,i,j} C_{m,a,g,f,i,k} D_{l,c,h,d,j,m}
######################################################################################
print("Include headers and define data types.")
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
num_inputs = 4
# Create an array of modes
modes_A = [ord(c) for c in ('a','b','c','d','e','f')]
modes_B = [ord(c) for c in ('b','g','h','e','i','j')]
modes_C = [ord(c) for c in ('m','a','g','f','i','k')]
modes_D = [ord(c) for c in ('l','c','h','d','j','m')]
modes_R = [ord(c) for c in ('k','l')]
# Create an array of extents (shapes) for each tensor
dim = 8
extent_A = (dim,) * 6
extent_B = (dim,) * 6
extent_C = (dim,) * 6
extent_D = (dim,) * 6
extent_R = (dim,) * 2
print("Define network, modes, and extents.")
#################
# Initialize data
#################
A_d = cp.random.random((np.prod(extent_A),), dtype=np.float32)
B_d = cp.random.random((np.prod(extent_B),), dtype=np.float32)
C_d = cp.random.random((np.prod(extent_C),), dtype=np.float32)
D_d = cp.random.random((np.prod(extent_D),), dtype=np.float32)
R_d = cp.zeros((np.prod(extent_R),), dtype=np.float32)
raw_data_in_d = (A_d.data.ptr, B_d.data.ptr, C_d.data.ptr, D_d.data.ptr)
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A = len(modes_A)
nmode_B = len(modes_B)
nmode_C = len(modes_C)
nmode_D = len(modes_D)
nmode_R = len(modes_R)
###############################
# Create Contraction Descriptor
###############################
modes_in = (modes_A, modes_B, modes_C, modes_D)
extents_in = (extent_A, extent_B, extent_C, extent_D)
num_modes_in = (nmode_A, nmode_B, nmode_C, nmode_D)
# Strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides_in = (0, 0, 0, 0)
# Set up the tensor qualifiers for all input tensors
qualifiers_in = np.zeros(num_inputs, dtype=cutn.tensor_qualifiers_dtype)
# Set up tensor network
desc_net = cutn.create_network_descriptor(handle,
num_inputs, num_modes_in, extents_in, strides_in, modes_in, qualifiers_in, # inputs
nmode_R, extent_R, 0, modes_R, # output
data_type, compute_type)
print("Initialize the cuTensorNet library and create a network descriptor.")
#####################################################
# Choose workspace limit based on available resources
#####################################################
free_mem, total_mem = dev.mem_info
workspace_limit = int(free_mem * 0.9)
##############################################
# Find "optimal" contraction order and slicing
##############################################
optimizer_config = cutn.create_contraction_optimizer_config(handle)
# Set the value of the partitioner imbalance factor to 30 (if desired)
imbalance_dtype = cutn.contraction_optimizer_config_get_attribute_dtype(
cutn.ContractionOptimizerConfigAttribute.GRAPH_IMBALANCE_FACTOR)
imbalance_factor = np.asarray((30,), dtype=imbalance_dtype)
cutn.contraction_optimizer_config_set_attribute(
handle, optimizer_config, cutn.ContractionOptimizerConfigAttribute.GRAPH_IMBALANCE_FACTOR,
imbalance_factor.ctypes.data, imbalance_factor.dtype.itemsize)
optimizer_info = cutn.create_contraction_optimizer_info(handle, desc_net)
cutn.contraction_optimize(handle, desc_net, optimizer_config, workspace_limit, optimizer_info)
num_slices_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.NUM_SLICES)
num_slices = np.zeros((1,), dtype=num_slices_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.NUM_SLICES,
num_slices.ctypes.data, num_slices.dtype.itemsize)
num_slices = int(num_slices)
assert num_slices > 0
print("Find an optimized contraction path with cuTensorNet optimizer.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_contraction_sizes(handle, desc_net, optimizer_info, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH)
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
plan = cutn.create_contraction_plan(handle, desc_net, optimizer_info, work_desc)
###################################################################################
# Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
###################################################################################
pref = cutn.create_contraction_autotune_preference(handle)
num_autotuning_iterations = 5 # may be 0
n_iter_dtype = cutn.contraction_autotune_preference_get_attribute_dtype(
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS)
num_autotuning_iterations = np.asarray([num_autotuning_iterations], dtype=n_iter_dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref,
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS,
num_autotuning_iterations.ctypes.data, num_autotuning_iterations.dtype.itemsize)
# Modify the plan again to find the best pair-wise contractions
cutn.contraction_autotune(
handle, plan, raw_data_in_d, R_d.data.ptr,
work_desc, pref, stream.ptr)
cutn.destroy_contraction_autotune_preference(pref)
print("Create a contraction plan for cuTENSOR and optionally auto-tune it.")
###########
# Execution
###########
minTimeCUTENSOR = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
slice_group = cutn.create_slice_group_from_id_range(handle, 0, num_slices, 1)
for i in range(num_runs):
# Contract over all slices.
# A user may choose to parallelize over the slices across multiple devices.
e1.record()
cutn.contract_slices(
handle, plan, raw_data_in_d, R_d.data.ptr, False,
work_desc, slice_group, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) / 1000 # ms -> s
minTimeCUTENSOR = minTimeCUTENSOR if minTimeCUTENSOR < time else time
print("Contract the network, each slice uses the same contraction plan.")
# free up the workspace
del work
# Recall that we set strides to null (0), so the data are in F-contiguous layout
A_d = A_d.reshape(extent_A, order='F')
B_d = B_d.reshape(extent_B, order='F')
C_d = C_d.reshape(extent_C, order='F')
D_d = D_d.reshape(extent_D, order='F')
R_d = R_d.reshape(extent_R, order='F')
path, _ = cuquantum.einsum_path("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d)
out = cp.einsum("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d, optimize=path)
if not cp.allclose(out, R_d):
raise RuntimeError("result is incorrect")
print("Check cuTensorNet result against that of cupy.einsum().")
#######################################################
flops_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT)
flops = np.zeros((1,), dtype=flops_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
flops.ctypes.data, flops.dtype.itemsize)
flops = float(flops)
print(f"num_slices: {num_slices}")
print(f"{minTimeCUTENSOR * 1000 / num_slices} ms / slice")
print(f"{flops / 1e9 / minTimeCUTENSOR} GFLOPS/s")
cutn.destroy_slice_group(slice_group)
cutn.destroy_contraction_plan(plan)
cutn.destroy_contraction_optimizer_info(optimizer_info)
cutn.destroy_contraction_optimizer_config(optimizer_config)
cutn.destroy_network_descriptor(desc_net)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/tensornet_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
try:
import torch
except ImportError:
torch = None
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
##########################################################################################
# Computing: O_{a,m} = A_{a,b,c,d} B_{b,c,d,e} C_{e,g,h} D_{g,h,i,j} E_{i,j,k,l} F_{k,l,m}
# We will execute the contraction and compute the gradients of input tensors A, B, C
##########################################################################################
print("Include headers and define data types.")
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
num_inputs = 6
grad_input_ids = np.asarray((0, 1, 2), dtype=np.int32)
# Create an array of modes
modes_A = [ord(c) for c in ('a','b','c','d')]
modes_B = [ord(c) for c in ('b','c','d','e')]
modes_C = [ord(c) for c in ('e','g','h')]
modes_D = [ord(c) for c in ('g','h','i','j')]
modes_E = [ord(c) for c in ('i','j','k','l')]
modes_F = [ord(c) for c in ('k','l','m')]
modes_O = [ord(c) for c in ('a','m')]
# Create an array of extents (shapes) for each tensor
dim = 36
extent_A = (dim,) * len(modes_A)
extent_B = (dim,) * len(modes_B)
extent_C = (dim,) * len(modes_C)
extent_D = (dim,) * len(modes_D)
extent_E = (dim,) * len(modes_E)
extent_F = (dim,) * len(modes_F)
extent_O = (dim,) * len(modes_O)
print("Define network, modes, and extents.")
#################
# Initialize data
#################
A_d = cp.random.random((np.prod(extent_A),), dtype=np.float32)
B_d = cp.random.random((np.prod(extent_B),), dtype=np.float32)
C_d = cp.random.random((np.prod(extent_C),), dtype=np.float32)
D_d = cp.random.random((np.prod(extent_D),), dtype=np.float32)
E_d = cp.random.random((np.prod(extent_E),), dtype=np.float32)
F_d = cp.random.random((np.prod(extent_F),), dtype=np.float32)
O_d = cp.zeros((np.prod(extent_O),), dtype=np.float32)
raw_data_in_d = (A_d.data.ptr, B_d.data.ptr, C_d.data.ptr, D_d.data.ptr, E_d.data.ptr, F_d.data.ptr)
# allocate buffers for holding the gradients w.r.t. the first 3 input tensors
grads_d = [cp.empty_like(A_d),
cp.empty_like(B_d),
cp.empty_like(C_d),
None,
None,
None]
grads_d_ptr = [grad.data.ptr if grad is not None else 0 for grad in grads_d]
# output gradients (w.r.t itself, so it's all one)
output_grads_d = cp.ones(extent_O, dtype=np.float32, order='F')
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A = len(modes_A)
nmode_B = len(modes_B)
nmode_C = len(modes_C)
nmode_D = len(modes_D)
nmode_E = len(modes_E)
nmode_F = len(modes_F)
nmode_O = len(modes_O)
###############################
# Create Contraction Descriptor
###############################
modes_in = (modes_A, modes_B, modes_C, modes_D, modes_E, modes_F)
extents_in = (extent_A, extent_B, extent_C, extent_D, extent_E, extent_F)
num_modes_in = (nmode_A, nmode_B, nmode_C, nmode_D, nmode_E, nmode_F)
# Strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides_in = (0, 0, 0, 0, 0, 0)
# Set up tensor network
desc_net = cutn.create_network_descriptor(handle,
num_inputs, num_modes_in, extents_in, strides_in, modes_in, 0, # inputs
nmode_O, extent_O, 0, modes_O, # output
data_type, compute_type)
# In this sample we use the new network attributes interface to mark certain
# input tensors as constant, but we can also use the tensor qualifiers as shown
# in other samples (ex: tensornet_example_reuse.py)
net_attr_dtype = cutn.network_get_attribute_dtype(cutn.NetworkAttribute.INPUT_TENSORS_REQUIRE_GRAD)
tensor_ids = np.zeros(1, dtype=net_attr_dtype)
tensor_ids['num_tensors'] = grad_input_ids.size
tensor_ids['data'] = grad_input_ids.ctypes.data
cutn.network_set_attribute(
handle, desc_net, cutn.NetworkAttribute.INPUT_TENSORS_REQUIRE_GRAD,
tensor_ids.ctypes.data, tensor_ids.dtype.itemsize)
print("Initialize the cuTensorNet library and create a network descriptor.")
#####################################################
# Choose workspace limit based on available resources
#####################################################
free_mem, total_mem = dev.mem_info
workspace_limit = int(free_mem * 0.9)
#######################
# Set contraction order
#######################
# create contraction optimizer info
optimizer_info = cutn.create_contraction_optimizer_info(handle, desc_net)
# set a predetermined contraction path
path_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(cutn.ContractionOptimizerInfoAttribute.PATH)
path = np.asarray([(0, 1), (0, 4), (0, 3), (0, 2), (0, 1)], dtype=np.int32)
path_obj = np.zeros((1,), dtype=path_dtype)
path_obj["num_contractions"] = num_inputs - 1
path_obj["data"] = path.ctypes.data
# provide user-specified contract path
cutn.contraction_optimizer_info_set_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.PATH,
path_obj.ctypes.data, path_obj.dtype.itemsize)
num_slices = 1
print("Set predetermined contraction path into cuTensorNet optimizer.")
#############################################################
# Create workspace descriptor, allocate workspace, and set it
#############################################################
work_desc = cutn.create_workspace_descriptor(handle)
# set SCRATCH workspace, which will be used during each network contraction operation, not needed afterwords
cutn.workspace_compute_contraction_sizes(handle, desc_net, optimizer_info, work_desc)
required_scratch_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH)
work_scratch = cp.cuda.alloc(required_scratch_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work_scratch.ptr, required_scratch_workspace_size)
# set CACHE workspace, which will be used across network contraction operations
required_cache_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.CACHE)
work_cache = cp.cuda.alloc(required_cache_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.CACHE,
work_cache.ptr, required_cache_workspace_size)
print("Allocated and set up the GPU workspace")
###########################################################
# Initialize the pair-wise contraction plans (for cuTENSOR)
###########################################################
plan = cutn.create_contraction_plan(handle, desc_net, optimizer_info, work_desc)
###################################################################################
# Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
###################################################################################
pref = cutn.create_contraction_autotune_preference(handle)
num_autotuning_iterations = 5 # may be 0
n_iter_dtype = cutn.contraction_autotune_preference_get_attribute_dtype(
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS)
num_autotuning_iterations = np.asarray([num_autotuning_iterations], dtype=n_iter_dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref,
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS,
num_autotuning_iterations.ctypes.data, num_autotuning_iterations.dtype.itemsize)
# Modify the plan again to find the best pair-wise contractions
cutn.contraction_autotune(
handle, plan, raw_data_in_d, O_d.data.ptr,
work_desc, pref, stream.ptr)
cutn.destroy_contraction_autotune_preference(pref)
print("Create a contraction plan for cuTENSOR and optionally auto-tune it.")
###########
# Execution
###########
# create a cutensornetSliceGroup_t object from a range of slice IDs
slice_group = cutn.create_slice_group_from_id_range(handle, 0, num_slices, 1)
min_time_cutn = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
for i in range(num_runs):
# Contract over all slices.
e1.record(stream)
cutn.contract_slices(
handle, plan, raw_data_in_d,
O_d.data.ptr,
False, work_desc, slice_group, stream.ptr)
cutn.compute_gradients_backward(
handle, plan, raw_data_in_d,
output_grads_d.data.ptr,
grads_d_ptr,
False, work_desc, stream.ptr)
cutn.workspace_purge_cache(handle, work_desc, cutn.Memspace.DEVICE)
e2.record(stream)
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) / 1000 # ms -> s
min_time_cutn = min_time_cutn if min_time_cutn < time else time
print("Contract the network and compute gradients.")
# free up the workspace
del work_scratch
del work_cache
# Recall that we set strides to null (0), so the data are in F-contiguous layout,
# including the gradients (which follow the layout of the input tensors)
A_d = A_d.reshape(extent_A, order='F')
B_d = B_d.reshape(extent_B, order='F')
C_d = C_d.reshape(extent_C, order='F')
D_d = D_d.reshape(extent_D, order='F')
E_d = E_d.reshape(extent_E, order='F')
F_d = F_d.reshape(extent_F, order='F')
O_d = O_d.reshape(extent_O, order='F')
grads_d[0] = grads_d[0].reshape(extent_A, order='F')
grads_d[1] = grads_d[1].reshape(extent_B, order='F')
grads_d[2] = grads_d[2].reshape(extent_C, order='F')
# Compute the contraction reference using cupy.einsum with the same path
path = ['einsum_path'] + path.tolist()
out = cp.einsum("abcd,bcde,egh,ghij,ijkl,klm->am", A_d, B_d, C_d, D_d, E_d, F_d, optimize=path)
if not cp.allclose(out, O_d):
raise RuntimeError("result is incorrect")
print("Check cuTensorNet contraction result against that of cupy.einsum().")
# Compute the gradient reference using PyTorch
if torch:
if not torch.cuda.is_available():
# copy data back to CPU
dev = "cpu"
func = cp.asnumpy
torch_cuda = False
else:
# zero-copy from CuPy to PyTorch!
dev = "cuda"
func = (lambda x: x) # no op
torch_cuda = True
A = torch.as_tensor(func(A_d), device=dev)
B = torch.as_tensor(func(B_d), device=dev)
C = torch.as_tensor(func(C_d), device=dev)
D = torch.as_tensor(func(D_d), device=dev)
E = torch.as_tensor(func(E_d), device=dev)
F = torch.as_tensor(func(F_d), device=dev)
output_grads = torch.as_tensor(func(output_grads_d), device=dev)
# do not need gradient for the last 3 tensors
A.requires_grad_(True)
B.requires_grad_(True)
C.requires_grad_(True)
D.requires_grad_(False)
E.requires_grad_(False)
F.requires_grad_(False)
# We can use either torch.einsum or opt_einsum.contract to establish the
# computational graph of an einsum op over the PyTorch tensors. Note that
# torch.einsum does not support passing custom contraction paths.
out = torch.einsum("abcd,bcde,egh,ghij,ijkl,klm->am", A, B, C, D, E, F)
out.backward(output_grads) # backprop to populate the inputs' .grad attributes
if not cp.allclose(cp.asarray(out.detach()), O_d):
raise RuntimeError("result is incorrect")
# If using PyTorch CPU tensors, these move data back to GPU for comparison;
# otherwise, PyTorch GPU tensors are zero-copied as CuPy arrays.
assert cp.allclose(cp.asarray(A.grad), grads_d[0])
assert cp.allclose(cp.asarray(B.grad), grads_d[1])
assert cp.allclose(cp.asarray(C.grad), grads_d[2])
# Note: D.grad, E.grad, and F.grad do not exist
print("Check cuTensorNet gradient results against those from "
f"PyTorch ({'GPU' if torch_cuda else 'GPU'}).")
#######################################################
print(f"Tensor network contraction and back-propagation time (ms): = {min_time_cutn * 1000}")
cutn.destroy_slice_group(slice_group)
cutn.destroy_contraction_plan(plan)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_contraction_optimizer_info(optimizer_info)
cutn.destroy_network_descriptor(desc_net)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/tensornet_example_gradients.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
from mpi4py import MPI
import cuquantum
from cuquantum import cutensornet as cutn
root = 0
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
if rank == root:
print("*** Printing is done only from the root process to prevent jumbled messages ***")
print(f"The number of processes is {size}")
num_devices = cp.cuda.runtime.getDeviceCount()
device_id = rank % num_devices
dev = cp.cuda.Device(device_id)
dev.use()
props = cp.cuda.runtime.getDeviceProperties(dev.id)
if rank == root:
print("cuTensorNet-vers:", cutn.get_version())
print("===== root process device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
######################################################################################
# Computing: R_{k,l} = A_{a,b,c,d,e,f} B_{b,g,h,e,i,j} C_{m,a,g,f,i,k} D_{l,c,h,d,j,m}
######################################################################################
if rank == root:
print("Include headers and define data types.")
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
num_inputs = 4
# Create an array of modes
modes_A = [ord(c) for c in ('a','b','c','d','e','f')]
modes_B = [ord(c) for c in ('b','g','h','e','i','j')]
modes_C = [ord(c) for c in ('m','a','g','f','i','k')]
modes_D = [ord(c) for c in ('l','c','h','d','j','m')]
modes_R = [ord(c) for c in ('k','l')]
# Create an array of extents (shapes) for each tensor
dim = 8
extent_A = (dim,) * 6
extent_B = (dim,) * 6
extent_C = (dim,) * 6
extent_D = (dim,) * 6
extent_R = (dim,) * 2
if rank == root:
print("Define network, modes, and extents.")
#################
# Initialize data
#################
if rank == root:
A = np.random.random(np.prod(extent_A)).astype(np.float32)
B = np.random.random(np.prod(extent_B)).astype(np.float32)
C = np.random.random(np.prod(extent_C)).astype(np.float32)
D = np.random.random(np.prod(extent_D)).astype(np.float32)
else:
A = np.empty(np.prod(extent_A), dtype=np.float32)
B = np.empty(np.prod(extent_B), dtype=np.float32)
C = np.empty(np.prod(extent_C), dtype=np.float32)
D = np.empty(np.prod(extent_D), dtype=np.float32)
R = np.empty(extent_R)
comm.Bcast(A, root)
comm.Bcast(B, root)
comm.Bcast(C, root)
comm.Bcast(D, root)
A_d = cp.asarray(A)
B_d = cp.asarray(B)
C_d = cp.asarray(C)
D_d = cp.asarray(D)
R_d = cp.empty(np.prod(extent_R), dtype=np.float32)
raw_data_in_d = (A_d.data.ptr, B_d.data.ptr, C_d.data.ptr, D_d.data.ptr)
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A = len(modes_A)
nmode_B = len(modes_B)
nmode_C = len(modes_C)
nmode_D = len(modes_D)
nmode_R = len(modes_R)
###############################
# Create Contraction Descriptor
###############################
modes_in = (modes_A, modes_B, modes_C, modes_D)
extents_in = (extent_A, extent_B, extent_C, extent_D)
num_modes_in = (nmode_A, nmode_B, nmode_C, nmode_D)
# Strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides_in = (0, 0, 0, 0)
# Set up the tensor qualifiers for all input tensors
qualifiers_in = np.zeros(num_inputs, dtype=cutn.tensor_qualifiers_dtype)
# Set up tensor network
desc_net = cutn.create_network_descriptor(handle,
num_inputs, num_modes_in, extents_in, strides_in, modes_in, qualifiers_in, # inputs
nmode_R, extent_R, 0, modes_R, # output
data_type, compute_type)
if rank == root:
print("Initialize the cuTensorNet library and create a network descriptor.")
#####################################################
# Choose workspace limit based on available resources
#####################################################
free_mem, total_mem = dev.mem_info
free_mem = comm.allreduce(free_mem, MPI.MIN)
workspace_limit = int(free_mem * 0.9)
##############################################
# Find "optimal" contraction order and slicing
##############################################
optimizer_config = cutn.create_contraction_optimizer_config(handle)
optimizer_info = cutn.create_contraction_optimizer_info(handle, desc_net)
# Force slicing
min_slices_dtype = cutn.contraction_optimizer_config_get_attribute_dtype(
cutn.ContractionOptimizerConfigAttribute.SLICER_MIN_SLICES)
min_slices_factor = np.asarray((size,), dtype=min_slices_dtype)
cutn.contraction_optimizer_config_set_attribute(
handle, optimizer_config, cutn.ContractionOptimizerConfigAttribute.SLICER_MIN_SLICES,
min_slices_factor.ctypes.data, min_slices_factor.dtype.itemsize)
cutn.contraction_optimize(
handle, desc_net, optimizer_config, workspace_limit, optimizer_info)
flops_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT)
flops = np.zeros((1,), dtype=flops_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
flops.ctypes.data, flops.dtype.itemsize)
flops = float(flops)
# Choose the path with the lowest cost.
flops, sender = comm.allreduce(sendobj=(flops, rank), op=MPI.MINLOC)
if rank == root:
print(f"Process {sender} has the path with the lowest FLOP count {flops}.")
# Get buffer size for optimizer_info and broadcast it.
if rank == sender:
bufSize = cutn.contraction_optimizer_info_get_packed_size(handle, optimizer_info)
else:
bufSize = 0 # placeholder
bufSize = comm.bcast(bufSize, sender)
# Allocate buffer.
buf = np.empty((bufSize,), dtype=np.int8)
# Pack optimizer_info on sender and broadcast it.
if rank == sender:
cutn.contraction_optimizer_info_pack_data(handle, optimizer_info, buf, bufSize)
comm.Bcast(buf, sender)
# Unpack optimizer_info from buffer.
if rank != sender:
cutn.update_contraction_optimizer_info_from_packed_data(
handle, buf, bufSize, optimizer_info)
num_slices_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.NUM_SLICES)
num_slices = np.zeros((1,), dtype=num_slices_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.NUM_SLICES,
num_slices.ctypes.data, num_slices.dtype.itemsize)
num_slices = int(num_slices)
assert num_slices > 0
# Calculate each process's share of the slices.
proc_chunk = num_slices / size
extra = num_slices % size
proc_slice_begin = rank * proc_chunk + min(rank, extra)
proc_slice_end = num_slices if rank == size - 1 else (rank + 1) * proc_chunk + min(rank + 1, extra)
if rank == root:
print("Find an optimized contraction path with cuTensorNet optimizer.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_contraction_sizes(handle, desc_net, optimizer_info, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(
handle, work_desc,
cutn.WorksizePref.MIN,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH)
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
if rank == root:
print("Allocate workspace.")
###########################################################
# Initialize all pair-wise contraction plans (for cuTENSOR)
###########################################################
plan = cutn.create_contraction_plan(handle, desc_net, optimizer_info, work_desc)
###################################################################################
# Optional: Auto-tune cuTENSOR's cutensorContractionPlan to pick the fastest kernel
###################################################################################
pref = cutn.create_contraction_autotune_preference(handle)
num_autotuning_iterations = 5 # may be 0
n_iter_dtype = cutn.contraction_autotune_preference_get_attribute_dtype(
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS)
num_autotuning_iterations = np.asarray([num_autotuning_iterations], dtype=n_iter_dtype)
cutn.contraction_autotune_preference_set_attribute(
handle, pref,
cutn.ContractionAutotunePreferenceAttribute.MAX_ITERATIONS,
num_autotuning_iterations.ctypes.data, num_autotuning_iterations.dtype.itemsize)
# modify the plan again to find the best pair-wise contractions
cutn.contraction_autotune(
handle, plan, raw_data_in_d, R_d.data.ptr,
work_desc, pref, stream.ptr)
cutn.destroy_contraction_autotune_preference(pref)
if rank == root:
print("Create a contraction plan for cuTENSOR and optionally auto-tune it.")
###########
# Execution
###########
minTimeCUTENSOR = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
slice_group = cutn.create_slice_group_from_id_range(handle, proc_slice_begin, proc_slice_end, 1)
for i in range(num_runs):
# Contract over all slices.
# A user may choose to parallelize over the slices across multiple devices.
e1.record()
cutn.contract_slices(
handle, plan, raw_data_in_d, R_d.data.ptr, False,
work_desc, slice_group, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) / 1000 # ms -> s
minTimeCUTENSOR = minTimeCUTENSOR if minTimeCUTENSOR < time else time
if rank == root:
print("Contract the network, each slice uses the same contraction plan.")
# free up the workspace
del work
R[...] = cp.asnumpy(R_d).reshape(extent_R, order='F')
# Reduce on root process.
if rank == root:
comm.Reduce(MPI.IN_PLACE, R, root=root)
else:
comm.Reduce(R, R, root=root)
# Compute the reference result.
if rank == root:
# Recall that we set strides to null (0), so the data are in F-contiguous layout
A_d = A_d.reshape(extent_A, order='F')
B_d = B_d.reshape(extent_B, order='F')
C_d = C_d.reshape(extent_C, order='F')
D_d = D_d.reshape(extent_D, order='F')
path, _ = cuquantum.einsum_path("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d)
out = cp.einsum("abcdef,bgheij,magfik,lchdjm->kl", A_d, B_d, C_d, D_d, optimize=path)
if not cp.allclose(out, R):
raise RuntimeError("result is incorrect")
print("Check cuTensorNet result against that of cupy.einsum().")
#######################################################
flops_dtype = cutn.contraction_optimizer_info_get_attribute_dtype(
cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT)
flops = np.zeros((1,), dtype=flops_dtype)
cutn.contraction_optimizer_info_get_attribute(
handle, optimizer_info, cutn.ContractionOptimizerInfoAttribute.FLOP_COUNT,
flops.ctypes.data, flops.dtype.itemsize)
flops = float(flops)
if rank == root:
print(f"num_slices: {num_slices}")
print(f"{minTimeCUTENSOR * 1000 / num_slices} ms / slice")
print(f"{flops / 1e9 / minTimeCUTENSOR} GFLOPS/s")
cutn.destroy_slice_group(slice_group)
cutn.destroy_contraction_plan(plan)
cutn.destroy_contraction_optimizer_info(optimizer_info)
cutn.destroy_contraction_optimizer_config(optimizer_config)
cutn.destroy_network_descriptor(desc_net)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
if rank == root:
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/tensornet_example_mpi.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of Ellipses notation with contract_decompose
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract
from cuquantum.cutensornet.experimental import contract_decompose
a = np.ones((2,2,2,2))
b = np.ones((2,2,2,2))
# here we use contract and SVD decomposition to show usage of ellipsis
abs_cutoff = 1e-12
algorithm = {
'qr_method' : False,
'svd_method': {'abs_cutoff':abs_cutoff, 'partition': None} # singular values are partitioned onto A/B equally
}
################################################
### Case I. Ellipses in one input and one output
################################################
ellipsis_subscripts = 'abcd,cd...->abx,...x'
equivalent_subscripts = 'abcd,cdef->abx,efx'
a0, s0, b0 = contract_decompose(ellipsis_subscripts, a, b, algorithm=algorithm)
a1, s1, b1 = contract_decompose(equivalent_subscripts, a, b, algorithm=algorithm)
equal = np.allclose(s0, s1)
print(f"For the given operands, ``{ellipsis_subscripts}`` equal to ``{equivalent_subscripts}`` ? : {equal}")
assert equal
##################################
### Case II. Ellipses in one input
##################################
ellipsis_subscripts = 'abcd,d...->abx,cx'
equivalent_subscripts = 'abcd,defg->abx,cx'
a0, s0, b0 = contract_decompose(ellipsis_subscripts, a, b, algorithm=algorithm)
a1, s1, b1 = contract_decompose(equivalent_subscripts, a, b, algorithm=algorithm)
equal = np.allclose(s0, s1)
print(f"For the given operands, ``{ellipsis_subscripts}`` equal to ``{equivalent_subscripts}`` ? : {equal}")
assert equal
#############################################
### Case III. Ellipses in more than one input
#############################################
ellipsis_subscripts = 'ab...,bc...->ax,cx'
equivalent_subscripts = 'abef,bcef->ax,cx'
a0, s0, b0 = contract_decompose(ellipsis_subscripts, a, b, algorithm=algorithm)
a1, s1, b1 = contract_decompose(equivalent_subscripts, a, b, algorithm=algorithm)
equal = np.allclose(s0, s1)
print(f"For the given operands, ``{ellipsis_subscripts}`` equal to ``{equivalent_subscripts}`` ? : {equal}")
assert equal
###########################################################
### Case IV. Ellipses in more than one input and one output
##########################################################
ellipsis_subscripts = 'ab...,bc...->ax...,cx'
equivalent_subscripts = 'abef,bcef->axef,cx'
a0, s0, b0 = contract_decompose(ellipsis_subscripts, a, b, algorithm=algorithm)
a1, s1, b1 = contract_decompose(equivalent_subscripts, a, b, algorithm=algorithm)
equal = np.allclose(s0, s1)
print(f"For the given operands, ``{ellipsis_subscripts}`` equal to ``{equivalent_subscripts}`` ? : {equal}")
assert equal
| cuQuantum-main | python/samples/cutensornet/experimental/example06-ellipses_notation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of contract and SVD decompose an arbitrary network
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract, tensor
from cuquantum.cutensornet.experimental import contract_decompose
inputs = ('ab', 'bcd', 'cde', 'exg', 'ayg')
outputs = ('xz', 'zy')
# creating random input tensors
np.random.seed(0)
size_dict = {}
operands = []
for modes in inputs:
shape = []
for m in modes:
if m not in size_dict:
size_dict[m] = np.random.randint(2,6)
shape.append(size_dict[m])
operands.append(np.random.random(shape))
subscripts = ",".join(inputs) + "->" + ",".join(outputs)
# contraction followed by SVD decomposition
algorithm = {'qr_method':False, 'svd_method':{'partition':'UV'}} # S is equally partitioned onto u and v
u, _, v = contract_decompose(subscripts, *operands, algorithm=algorithm)
# compute the full network contraction using the original input operands
result = contract(",".join(inputs), *operands)
# compute the full network contraction using the decomposed outputs
result_reference = contract(",".join(outputs), u, v)
diff = abs(result - result_reference).max()
print(f"After contract and SVD decomposition")
print(f"Max diff={diff}")
| cuQuantum-main | python/samples/cutensornet/experimental/example05-contract_SVD_decompose.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of pairwise tensor compression with contract_decompose
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract
from cuquantum.cutensornet.experimental import contract_decompose
a = np.ones((2,2,2))
b = np.ones((2,2,2))
# use SVD to compress two tensors:
# i k m i k m
# =====A=====B===== ===> =====A-----B=====
# |j l| |j l|
abs_cutoff = 1e-12
compress_algorithm = {
'qr_method' : False,
'svd_method': {'abs_cutoff':abs_cutoff, 'partition': 'UV'} # singular values are partitioned onto A/B equally
}
# compare the difference after compression
a_svd, _, b_svd = contract_decompose('ijk,klm->ijk,klm', a, b, algorithm=compress_algorithm)
diff = contract('ijk,klm', a, b) - contract('ijk,klm', a_svd, b_svd)
print(f"After compression with cutoff {abs_cutoff}")
print(f" Shape of A, B: {a_svd.shape} {b_svd.shape}")
print(f" Maxdiff error: {abs(diff).max()}") | cuQuantum-main | python/samples/cutensornet/experimental/example02-pairwise_compression.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of contract and QR decompose an arbitrary network
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract, tensor
from cuquantum.cutensornet.experimental import contract_decompose
inputs = ('ab', 'bcd', 'cde', 'exg', 'ayg')
outputs = ('xz', 'zy')
# creating random input tensors
np.random.seed(0)
size_dict = {}
operands = []
for modes in inputs:
shape = []
for m in modes:
if m not in size_dict:
size_dict[m] = np.random.randint(2,6)
shape.append(size_dict[m])
operands.append(np.random.random(shape))
subscripts = ",".join(inputs) + "->" + ",".join(outputs)
# contraction followed by QR decomposition
algorithm = {'qr_method':{}, 'svd_method':False}
q, r = contract_decompose(subscripts, *operands, algorithm=algorithm)
# compute the full network contraction using the original input operands
result = contract(",".join(inputs), *operands)
# compute the full network contraction using the decomposed outputs
result_reference = contract(",".join(outputs), q, r)
diff = abs(result - result_reference).max()
print(f"After contract and QR decomposition")
print(f"max diff={diff}")
| cuQuantum-main | python/samples/cutensornet/experimental/example04-contract_QR_decompose.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of applying gate operand to two connecting tensors with contract_decompose
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract
from cuquantum.cutensornet.experimental import contract_decompose
a = np.ones((2,2,2))
b = np.ones((2,2,2))
gate = np.ones((2,2,2,2))
# absorb the gate tensor onto two connecting tensors
# i | k | m
# =====A=====B===== i k m
# |j l| ===> =====A-----B=====
# GGGGGGG p| q|
# |p q|
abs_cutoff = 1e-12
# use QR to assist in contraction decomposition
# note this is currently only supported for fully connected network with three tensors
gate_algorithm = {
'qr_method' : {},
'svd_method': {'abs_cutoff':abs_cutoff, 'partition': 'UV'} # singular values are partitioned onto A/B equally
}
# compare the difference after compression
a_svd, _, b_svd = contract_decompose('ijk,klm,jlpq->ipk,kqm', a, b, gate, algorithm=gate_algorithm)
diff = contract('ijk,klm,ijpq', a, b, gate) - contract('ipk,kqm', a_svd, b_svd)
print(f"After compression with cutoff {abs_cutoff}")
print(f" Shape of A, B: {a_svd.shape} {b_svd.shape}")
print(f" Maxdiff error: {abs(diff).max()}") | cuQuantum-main | python/samples/cutensornet/experimental/example03-apply_gate.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example of pairwise tensor canonicalization with contract_decompose
NumPy ndarrays are used as inputs.
"""
import numpy as np
from cuquantum import contract
from cuquantum.cutensornet.experimental import contract_decompose
a = np.ones((2,2,2))
b = np.ones((2,2,2))
# use QR to canonicalize two tensors:
# i k m i k m
# =====A=====B===== ===> =====A---->B=====
# |j l| |j l|
canonicalize_algorithm = {
'qr_method': {},
'svd_method': False
}
a_qr, b_qr = contract_decompose('ijk,klm->ijk,klm', a, b, algorithm=canonicalize_algorithm)
# compare the difference after canonicalization
diff = contract('ijk,klm', a, b) - contract('ijk,klm', a_qr, b_qr)
print("After canonicalization")
print(f" Shape of A, B: {a_qr.shape} {b_qr.shape}")
print(f" Maxdiff error: {abs(diff).max()}") | cuQuantum-main | python/samples/cutensornet/experimental/example01-pairwise_canonicalization.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
###################################################################################
# Gate Split: A_{i,j,k,l} B_{k,o,p,q} G_{m,n,l,o}-> A'_{i,j,x,m} S_{x} B'_{x,n,p,q}
###################################################################################
data_type = cuquantum.cudaDataType.CUDA_R_32F
compute_type = cuquantum.ComputeType.COMPUTE_32F
# Create an array of modes
modes_A_in = [ord(c) for c in ('i','j','k','l')] # input
modes_B_in = [ord(c) for c in ('k','o','p','q')]
modes_G_in = [ord(c) for c in ('m','n','l','o')]
modes_A_out = [ord(c) for c in ('i','j','x','m')] # output
modes_B_out = [ord(c) for c in ('x','n','p','q')]
# Create an array of extent (shapes) for each tensor
extent_A_in = (16, 16, 16, 2)
extent_B_in = (16, 2, 16, 16)
extent_G_in = (2, 2, 2, 2)
shared_extent_out = 16 # truncate shared extent to 16
extent_A_out = (16, 16, shared_extent_out, 2)
extent_B_out = (shared_extent_out, 2, 16, 16)
############################
# Allocate & initialize data
############################
cp.random.seed(1)
A_in_d = cp.random.random(extent_A_in, dtype=np.float32).astype(np.float32, order='F') # we use fortran layout throughout this example
B_in_d = cp.random.random(extent_B_in, dtype=np.float32).astype(np.float32, order='F')
G_in_d = cp.random.random(extent_G_in, dtype=np.float32).astype(np.float32, order='F')
A_out_d = cp.empty(extent_A_out, dtype=np.float32, order='F')
S_out_d = cp.empty(shared_extent_out, dtype=np.float32)
B_out_d = cp.empty(extent_B_out, dtype=np.float32, order='F')
print("Allocate memory for data and initialize data.")
free_mem, total_mem = dev.mem_info
worksize = free_mem *.7
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_A_in = len(modes_A_in)
nmode_B_in = len(modes_B_in)
nmode_G_in = len(modes_G_in)
nmode_A_out = len(modes_A_out)
nmode_B_out = len(modes_B_out)
###############################
# Create tensor descriptors
###############################
# strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides = 0
desc_tensor_A_in = cutn.create_tensor_descriptor(handle, nmode_A_in, extent_A_in, strides, modes_A_in, data_type)
desc_tensor_B_in = cutn.create_tensor_descriptor(handle, nmode_B_in, extent_B_in, strides, modes_B_in, data_type)
desc_tensor_G_in = cutn.create_tensor_descriptor(handle, nmode_G_in, extent_G_in, strides, modes_G_in, data_type)
desc_tensor_A_out = cutn.create_tensor_descriptor(handle, nmode_A_out, extent_A_out, strides, modes_A_out, data_type)
desc_tensor_B_out = cutn.create_tensor_descriptor(handle, nmode_B_out, extent_B_out, strides, modes_B_out, data_type)
########################################
# Setup gate split truncation parameters
########################################
svd_config = cutn.create_tensor_svd_config(handle)
absCutoff_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.ABS_CUTOFF)
absCutoff = np.array(1e-2, dtype=absCutoff_dtype)
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.ABS_CUTOFF, absCutoff.ctypes.data, absCutoff.dtype.itemsize)
relCutoff_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.REL_CUTOFF)
relCutoff = np.array(1e-2, dtype=relCutoff_dtype)
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.REL_CUTOFF, relCutoff.ctypes.data, relCutoff.dtype.itemsize)
# create SVDInfo to record truncation information
svd_info = cutn.create_tensor_svd_info(handle)
gate_algo = cutn.GateSplitAlgo.REDUCED
print("Setup gate split truncation options.")
###############################
# Query Workspace Size
###############################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_gate_split_sizes(handle,
desc_tensor_A_in, desc_tensor_B_in, desc_tensor_G_in,
desc_tensor_A_out, desc_tensor_B_out,
gate_algo, svd_config, compute_type, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(handle,
work_desc, cutn.WorksizePref.MIN, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if worksize < required_workspace_size:
raise MemoryError("Not enough workspace memory is available.")
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
print("Query and allocate required workspace.")
###########
# Execution
###########
min_time_cutensornet = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
for i in range(num_runs):
# restore output
A_out_d[:] = 0
S_out_d[:] = 0
B_out_d[:] = 0
dev.synchronize()
# restore output tensor descriptors as `cutensornet.gate_split` can potentially update the shared extent in desc_tensor_U/V.
# therefore we here restore desc_tensor_U/V to the original problem
cutn.destroy_tensor_descriptor(desc_tensor_A_out)
cutn.destroy_tensor_descriptor(desc_tensor_B_out)
desc_tensor_A_out = cutn.create_tensor_descriptor(handle, nmode_A_out, extent_A_out, strides, modes_A_out, data_type)
desc_tensor_B_out = cutn.create_tensor_descriptor(handle, nmode_B_out, extent_B_out, strides, modes_B_out, data_type)
e1.record()
# execution
cutn.gate_split(handle,
desc_tensor_A_in, A_in_d.data.ptr,
desc_tensor_B_in, B_in_d.data.ptr,
desc_tensor_G_in, G_in_d.data.ptr,
desc_tensor_A_out, A_out_d.data.ptr,
S_out_d.data.ptr,
desc_tensor_B_out, B_out_d.data.ptr,
gate_algo, svd_config, compute_type, svd_info, work_desc, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) # ms
min_time_cutensornet = min_time_cutensornet if min_time_cutensornet < time else time
full_extent_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.FULL_EXTENT)
full_extent = np.empty(1, dtype=full_extent_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.FULL_EXTENT, full_extent.ctypes.data, full_extent.itemsize)
full_extent = int(full_extent)
reduced_extent_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.REDUCED_EXTENT)
reduced_extent = np.empty(1, dtype=reduced_extent_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.REDUCED_EXTENT, reduced_extent.ctypes.data, reduced_extent.itemsize)
reduced_extent = int(reduced_extent)
discarded_weight_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT)
discarded_weight = np.empty(1, dtype=discarded_weight_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT, discarded_weight.ctypes.data, discarded_weight.itemsize)
discarded_weight = float(discarded_weight)
print(f"Execution time: {min_time_cutensornet} ms")
print("SVD truncation info:")
print(f"For fixed extent truncation of {shared_extent_out}, an absolute cutoff value of {float(absCutoff)}, and a relative cutoff value of {float(relCutoff)}, full extent {full_extent} is reduced to {reduced_extent}")
print(f"Discarded weight: {discarded_weight}")
# Recall that when we do value-based truncation through absolute or relative cutoff,
# the extent found at runtime maybe lower than we specified in desc_tensor_.
# Therefore we may need to create new containers to hold the new data which takes on fortran layout corresponding to the new extent
if reduced_extent != shared_extent_out:
extent_A_out_reduced, strides_A_out = cutn.get_tensor_details(handle, desc_tensor_A_out)[2:]
extent_B_out_reduced, strides_B_out = cutn.get_tensor_details(handle, desc_tensor_B_out)[2:]
# note strides in cutensornet are in the unit of count and strides in cupy/numpy are in the unit of nbytes
strides_A_out = [i * A_out_d.itemsize for i in strides_A_out]
strides_B_out = [i * B_out_d.itemsize for i in strides_B_out]
A_out_d = cp.ndarray(extent_A_out_reduced, dtype=np.float32, memptr=A_out_d.data, strides=strides_A_out)
S_out_d = cp.ndarray(reduced_extent, dtype=np.float32, memptr=S_out_d.data, order='F')
B_out_d = cp.ndarray(extent_B_out_reduced, dtype=np.float32, memptr=B_out_d.data, strides=strides_B_out)
T_d = cp.einsum("ijkl,kopq,mnlo->ijmnpq", A_in_d, B_in_d, G_in_d)
out = cp.einsum("ijxm,x,xnpq->ijmnpq", A_out_d, S_out_d, B_out_d)
print(f"max diff after truncation {abs(out-T_d).max()}")
print("Check cuTensorNet result.")
#######################################################
cutn.destroy_tensor_descriptor(desc_tensor_A_in)
cutn.destroy_tensor_descriptor(desc_tensor_B_in)
cutn.destroy_tensor_descriptor(desc_tensor_G_in)
cutn.destroy_tensor_descriptor(desc_tensor_A_out)
cutn.destroy_tensor_descriptor(desc_tensor_B_out)
cutn.destroy_tensor_svd_config(svd_config)
cutn.destroy_tensor_svd_info(svd_info)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/approxTN/gate_split_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
######################################################
# Tensor SVD: T_{i,j,m,n} -> U_{i,x,m} S_{x} V_{n,x,j}
######################################################
data_type = cuquantum.cudaDataType.CUDA_R_32F
# Create an array of modes
modes_T = [ord(c) for c in ('i','j','m','n')] # input
modes_U = [ord(c) for c in ('i','x','m')] # SVD output
modes_V = [ord(c) for c in ('n','x','j')]
# Create an array of extent (shapes) for each tensor
extent_T = (16, 16, 16, 16)
shared_extent = 256 // 2 # truncate shared extent from 256 to 128
extent_U = (16, shared_extent, 16)
extent_V = (16, shared_extent, 16)
############################
# Allocate & initialize data
############################
cp.random.seed(1)
T_d = cp.random.random(extent_T, dtype=np.float32).astype(np.float32, order='F') # we use fortran layout throughout this example
U_d = cp.empty(extent_U, dtype=np.float32, order='F')
S_d = cp.empty(shared_extent, dtype=np.float32)
V_d = cp.empty(extent_V, dtype=np.float32, order='F')
print("Allocate memory for data and initialize data.")
free_mem, total_mem = dev.mem_info
worksize = free_mem *.7
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_T = len(modes_T)
nmode_U = len(modes_U)
nmode_V = len(modes_V)
###############################
# Create tensor descriptor
###############################
# strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides = 0
desc_tensor_T = cutn.create_tensor_descriptor(handle, nmode_T, extent_T, strides, modes_T, data_type)
desc_tensor_U = cutn.create_tensor_descriptor(handle, nmode_U, extent_U, strides, modes_U, data_type)
desc_tensor_V = cutn.create_tensor_descriptor(handle, nmode_V, extent_V, strides, modes_V, data_type)
##################################
# Setup SVD truncation parameters
##################################
svd_config = cutn.create_tensor_svd_config(handle)
abs_cutoff_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.ABS_CUTOFF)
abs_cutoff = np.array(1e-2, dtype=abs_cutoff_dtype)
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.ABS_CUTOFF, abs_cutoff.ctypes.data, abs_cutoff.dtype.itemsize)
rel_cutoff_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.REL_CUTOFF)
rel_cutoff = np.array(4e-2, dtype=rel_cutoff_dtype)
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.REL_CUTOFF, rel_cutoff.ctypes.data, rel_cutoff.dtype.itemsize)
# optional: choose gesvdj algorithm with customized parameters. Default is gesvd.
algorithm_dtype = cutn.tensor_svd_config_get_attribute_dtype(cutn.TensorSVDConfigAttribute.ALGO)
algorithm = np.array(cutn.TensorSVDAlgo.GESVDJ, dtype=algorithm_dtype)
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.ALGO, algorithm.ctypes.data, algorithm.dtype.itemsize)
algo_params_dtype = cutn.tensor_svd_algo_params_get_dtype(cutn.TensorSVDAlgo.GESVDJ)
algo_params = np.zeros(1, dtype=algo_params_dtype)
algo_params['tol'] = 1e-12
algo_params['max_sweeps'] = 80
cutn.tensor_svd_config_set_attribute(handle,
svd_config, cutn.TensorSVDConfigAttribute.ALGO_PARAMS, algo_params.ctypes.data, algo_params.dtype.itemsize)
print("Set up SVDConfig to use gesvdj algorithm with truncation")
# create SVDInfo to record truncation information
svd_info = cutn.create_tensor_svd_info(handle)
###############################
# Query Workspace Size
###############################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_svd_sizes(handle, desc_tensor_T, desc_tensor_U, desc_tensor_V, svd_config, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(handle,
work_desc, cutn.WorksizePref.MIN, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if worksize < required_workspace_size:
raise MemoryError("Not enough workspace memory is available.")
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
print("Query and allocate required workspace.")
#####
# Run
#####
min_time_cutensornet = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
for i in range(num_runs):
# restore output
U_d[:] = 0
S_d[:] = 0
V_d[:] = 0
dev.synchronize()
# restore output tensor descriptors as `cutensornet.tensor_svd` can potentially update the shared extent in desc_tensor_U/V.
# therefore we here restore desc_tensor_U/V to the original problem
cutn.destroy_tensor_descriptor(desc_tensor_U)
cutn.destroy_tensor_descriptor(desc_tensor_V)
desc_tensor_U = cutn.create_tensor_descriptor(handle, nmode_U, extent_U, strides, modes_U, data_type)
desc_tensor_V = cutn.create_tensor_descriptor(handle, nmode_V, extent_V, strides, modes_V, data_type)
e1.record()
# execution
cutn.tensor_svd(handle, desc_tensor_T, T_d.data.ptr,
desc_tensor_U, U_d.data.ptr,
S_d.data.ptr,
desc_tensor_V, V_d.data.ptr,
svd_config, svd_info,
work_desc, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) # ms
min_time_cutensornet = min_time_cutensornet if min_time_cutensornet < time else time
full_extent_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.FULL_EXTENT)
full_extent = np.empty(1, dtype=full_extent_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.FULL_EXTENT, full_extent.ctypes.data, full_extent.itemsize)
full_extent = int(full_extent)
reduced_extent_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.REDUCED_EXTENT)
reduced_extent = np.empty(1, dtype=reduced_extent_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.REDUCED_EXTENT, reduced_extent.ctypes.data, reduced_extent.itemsize)
reduced_extent = int(reduced_extent)
discarded_weight_dtype = cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT)
discarded_weight = np.empty(1, dtype=discarded_weight_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT, discarded_weight.ctypes.data, discarded_weight.itemsize)
discarded_weight = float(discarded_weight)
algo_status_dtype = cutn.tensor_svd_algo_status_get_dtype(cutn.TensorSVDAlgo.GESVDJ)
algo_status = np.empty(1, dtype=algo_status_dtype)
cutn.tensor_svd_info_get_attribute(handle,
svd_info, cutn.TensorSVDInfoAttribute.ALGO_STATUS, algo_status.ctypes.data, algo_status.itemsize)
print(f"Execution time: {min_time_cutensornet} ms")
print("SVD truncation info:")
print(f"GESVDJ residual: {algo_status['residual'].item()}, runtime sweeps = {algo_status['sweeps'].item()}")
print(f"For fixed extent truncation of {shared_extent}, an absolute cutoff value of {float(abs_cutoff)}, and a relative cutoff value of {float(rel_cutoff)}, full extent {full_extent} is reduced to {reduced_extent}")
print(f"Discarded weight: {discarded_weight}")
# Recall that when we do value-based truncation through absolute or relative cutoff,
# the extent found at runtime maybe lower than we specified in desc_tensor_.
# Therefore we may need to create new containers to hold the new data which takes on fortran layout corresponding to the new extent
extent_U_out, strides_U_out = cutn.get_tensor_details(handle, desc_tensor_U)[2:]
extent_V_out, strides_V_out = cutn.get_tensor_details(handle, desc_tensor_V)[2:]
if extent_U_out[1] != shared_extent:
# note strides in cutensornet are in the unit of count and strides in cupy/numpy are in the unit of nbytes
strides_U_out = [i * U_d.itemsize for i in strides_U_out]
strides_V_out = [i * V_d.itemsize for i in strides_V_out]
U_d = cp.ndarray(extent_U_out, dtype=np.float32, memptr=U_d.data, strides=strides_U_out)
S_d = cp.ndarray(extent_U_out[1], dtype=np.float32, memptr=S_d.data, order='F')
V_d = cp.ndarray(extent_V_out, dtype=np.float32, memptr=V_d.data, strides=strides_V_out)
out = cp.einsum("ixm,x,nxj->ijmn", U_d, S_d, V_d)
print(f"max diff after truncation {abs(out-T_d).max()}")
print("Check cuTensorNet result.")
#######################################################
cutn.destroy_tensor_descriptor(desc_tensor_T)
cutn.destroy_tensor_descriptor(desc_tensor_U)
cutn.destroy_tensor_descriptor(desc_tensor_V)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_tensor_svd_config(svd_config)
cutn.destroy_tensor_svd_info(svd_info)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/approxTN/tensor_svd_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import itertools
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
class MPSHelper:
"""
MPSHelper(num_sites, phys_extent, max_virtual_extent, initial_state, data_type, compute_type)
Create an MPSHelper object for gate splitting algorithm.
i j
-------A-------B------- i j k
p| |q -------> -------A`-------B`-------
GGGGGGGGG r| |s
r| |s
Args:
num_sites: The number of sites in the MPS.
phys_extents: The extent for the physical mode where the gate tensors are acted on.
max_virtual_extent: The maximal extent allowed for the virtual mode shared between adjacent MPS tensors.
initial_state: A sequence of :class:`cupy.ndarray` representing the initial state of the MPS.
data_type (cuquantum.cudaDataType): The data type for all tensors and gates.
compute_type (cuquantum.ComputeType): The compute type for all gate splitting.
"""
def __init__(self, num_sites, phys_extent, max_virtual_extent, initial_state, data_type, compute_type):
self.num_sites = num_sites
self.phys_extent = phys_extent
self.data_type = data_type
self.compute_type = compute_type
self.phys_modes = []
self.virtual_modes = []
self.new_mode = itertools.count(start=0, step=1)
for i in range(num_sites+1):
self.virtual_modes.append(next(self.new_mode))
if i != num_sites:
self.phys_modes.append(next(self.new_mode))
untruncated_max_extent = phys_extent ** (num_sites // 2)
if max_virtual_extent == 0:
self.max_virtual_extent = untruncated_max_extent
else:
self.max_virtual_extent = min(max_virtual_extent, untruncated_max_extent)
self.handle = cutn.create()
self.work_desc = cutn.create_workspace_descriptor(self.handle)
self.svd_config = cutn.create_tensor_svd_config(self.handle)
self.svd_info = cutn.create_tensor_svd_info(self.handle)
self.gate_algo = cutn.GateSplitAlgo.DIRECT
self.desc_tensors = []
self.state_tensors = []
# create tensor descriptors
for i in range(self.num_sites):
self.state_tensors.append(initial_state[i].astype(tensor.dtype, order="F"))
extent = self.get_tensor_extent(i)
modes = self.get_tensor_modes(i)
desc_tensor = cutn.create_tensor_descriptor(self.handle, 3, extent, 0, modes, self.data_type)
self.desc_tensors.append(desc_tensor)
def get_tensor(self, site):
"""Get the tensor operands for a specific site."""
return self.state_tensors[site]
def get_tensor_extent(self, site):
"""Get the extent of the MPS tensor at a specific site."""
return self.state_tensors[site].shape
def get_tensor_modes(self, site):
"""Get the current modes of the MPS tensor at a specific site."""
return (self.virtual_modes[site], self.phys_modes[site], self.virtual_modes[site+1])
def set_svd_config(self, abs_cutoff, rel_cutoff, renorm, partition):
"""Update the SVD truncation setting.
Args:
abs_cutoff: The cutoff value for absolute singular value truncation.
rel_cutoff: The cutoff value for relative singular value truncation.
renorm (cuquantum.cutensornet.TensorSVDNormalization): The option for renormalization of the truncated singular values.
partition (cuquantum.cutensornet.TensorSVDPartition): The option for partitioning of the singular values.
"""
if partition != cutn.TensorSVDPartition.UV_EQUAL:
raise NotImplementedError("this basic example expects partition to be cutensornet.TensorSVDPartition.UV_EQUAL")
svd_config_attributes = [cutn.TensorSVDConfigAttribute.ABS_CUTOFF,
cutn.TensorSVDConfigAttribute.REL_CUTOFF,
cutn.TensorSVDConfigAttribute.S_NORMALIZATION,
cutn.TensorSVDConfigAttribute.S_PARTITION]
for (attr, value) in zip(svd_config_attributes, [abs_cutoff, rel_cutoff, renorm, partition]):
dtype = cutn.tensor_svd_config_get_attribute_dtype(attr)
value = np.array([value], dtype=dtype)
cutn.tensor_svd_config_set_attribute(self.handle,
self.svd_config, attr, value.ctypes.data, value.dtype.itemsize)
def set_gate_algorithm(self, gate_algo):
"""Set the algorithm to use for all gate split operations.
Args:
gate_algo (cuquantum.cutensornet.GateSplitAlgo): The gate splitting algorithm to use.
"""
self.gate_algo = gate_algo
def compute_max_workspace_sizes(self):
"""Compute the maximal workspace needed for MPS gating algorithm."""
modes_in_A = [ord(c) for c in ('i', 'p', 'j')]
modes_in_B = [ord(c) for c in ('j', 'q', 'k')]
modes_in_G = [ord(c) for c in ('p', 'q', 'r', 's')]
modes_out_A = [ord(c) for c in ('i', 'r', 'j')]
modes_out_B = [ord(c) for c in ('j', 's', 'k')]
max_extents_AB = (self.max_virtual_extent, self.phys_extent, self.max_virtual_extent)
extents_in_G = (self.phys_extent, self.phys_extent, self.phys_extent, self.phys_extent)
desc_tensor_in_A = cutn.create_tensor_descriptor(self.handle, 3, max_extents_AB, 0, modes_in_A, self.data_type)
desc_tensor_in_B = cutn.create_tensor_descriptor(self.handle, 3, max_extents_AB, 0, modes_in_B, self.data_type)
desc_tensor_in_G = cutn.create_tensor_descriptor(self.handle, 4, extents_in_G, 0, modes_in_G, self.data_type)
desc_tensor_out_A = cutn.create_tensor_descriptor(self.handle, 3, max_extents_AB, 0, modes_out_A, self.data_type)
desc_tensor_out_B = cutn.create_tensor_descriptor(self.handle, 3, max_extents_AB, 0, modes_out_B, self.data_type)
cutn.workspace_compute_gate_split_sizes(self.handle,
desc_tensor_in_A, desc_tensor_in_B, desc_tensor_in_G,
desc_tensor_out_A, desc_tensor_out_B,
self.gate_algo, self.svd_config, self.compute_type, self.work_desc)
workspace_size = cutn.workspace_get_memory_size(self.handle, self.work_desc, cutn.WorksizePref.MIN, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
# free resources
cutn.destroy_tensor_descriptor(desc_tensor_in_A)
cutn.destroy_tensor_descriptor(desc_tensor_in_B)
cutn.destroy_tensor_descriptor(desc_tensor_in_G)
cutn.destroy_tensor_descriptor(desc_tensor_out_A)
cutn.destroy_tensor_descriptor(desc_tensor_out_B)
return workspace_size
def set_workspace(self, work, workspace_size):
"""Compute the maximal workspace needed for MPS gating algorithm.
Args:
work: Pointer to the allocated workspace.
workspace_size: The required workspace size on the device.
"""
cutn.workspace_set_memory(self.handle, self.work_desc, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH, work.ptr, workspace_size)
def apply_gate(self, site_A, site_B, gate, verbose, stream):
"""Inplace execution of the apply gate algoritm on site A and site B.
Args:
site_A: The first site on which the gate is applied to.
site_B: The second site on which the gate is applied to.
gate (cupy.ndarray): The input data for the gate tensor.
verbose: Whether to print out the runtime information during truncation.
stream (cupy.cuda.Stream): The CUDA stream on which the computation is performed.
"""
if site_B - site_A != 1:
raise ValueError("Site B must be the right site of site A")
if site_B >= self.num_sites:
raise ValueError("Site index cannot exceed maximum number of sites")
desc_tensor_in_A = self.desc_tensors[site_A]
desc_tensor_in_B = self.desc_tensors[site_B]
phys_mode_in_A = self.phys_modes[site_A]
phys_mode_in_B = self.phys_modes[site_B]
phys_mode_out_A = next(self.new_mode)
phys_mode_out_B = next(self.new_mode)
modes_G = (phys_mode_in_A, phys_mode_in_B, phys_mode_out_A, phys_mode_out_B)
extent_G = (self.phys_extent, self.phys_extent, self.phys_extent, self.phys_extent)
desc_tensor_in_G = cutn.create_tensor_descriptor(self.handle, 4, extent_G, 0, modes_G, self.data_type)
# construct and initialize the expected output A and B
tensor_in_A = self.state_tensors[site_A]
tensor_in_B = self.state_tensors[site_B]
left_extent_A = tensor_in_A.shape[0]
extent_AB_in = tensor_in_A.shape[2]
right_extent_B = tensor_in_B.shape[2]
combined_extent_left = min(left_extent_A, extent_AB_in * self.phys_extent) * self.phys_extent
combined_extent_right = min(right_extent_B, extent_AB_in * self.phys_extent) * self.phys_extent
extent_Aout_B = min(combined_extent_left, combined_extent_right, self.max_virtual_extent)
extent_out_A = (left_extent_A, self.phys_extent, extent_Aout_B)
extent_out_B = (extent_Aout_B, self.phys_extent, right_extent_B)
tensor_out_A = cp.zeros(extent_out_A, dtype=tensor_in_A.dtype, order="F")
tensor_out_B = cp.zeros(extent_out_B, dtype=tensor_in_B.dtype, order="F")
# create tensor descriptors for output A and B
modes_out_A = (self.virtual_modes[site_A], phys_mode_out_A, self.virtual_modes[site_A+1])
modes_out_B = (self.virtual_modes[site_B], phys_mode_out_B, self.virtual_modes[site_B+1])
desc_tensor_out_A = cutn.create_tensor_descriptor(self.handle, 3, extent_out_A, 0, modes_out_A, self.data_type)
desc_tensor_out_B = cutn.create_tensor_descriptor(self.handle, 3, extent_out_B, 0, modes_out_B, self.data_type)
cutn.gate_split(self.handle,
desc_tensor_in_A, tensor_in_A.data.ptr,
desc_tensor_in_B, tensor_in_B.data.ptr,
desc_tensor_in_G, gate.data.ptr,
desc_tensor_out_A, tensor_out_A.data.ptr,
0, # we factorize singular values equally onto output A and B.
desc_tensor_out_B, tensor_out_B.data.ptr,
self.gate_algo, self.svd_config, self.compute_type,
self.svd_info, self.work_desc, stream.ptr)
if verbose:
full_extent = np.array([0], dtype=cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.FULL_EXTENT))
reduced_extent = np.array([0], dtype=cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.REDUCED_EXTENT))
discarded_weight = np.array([0], dtype=cutn.tensor_svd_info_get_attribute_dtype(cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT))
cutn.tensor_svd_info_get_attribute(
self.handle, self.svd_info, cutn.TensorSVDInfoAttribute.FULL_EXTENT,
full_extent.ctypes.data, full_extent.dtype.itemsize)
cutn.tensor_svd_info_get_attribute(
self.handle, self.svd_info, cutn.TensorSVDInfoAttribute.REDUCED_EXTENT,
reduced_extent.ctypes.data, reduced_extent.dtype.itemsize)
cutn.tensor_svd_info_get_attribute(
self.handle, self.svd_info, cutn.TensorSVDInfoAttribute.DISCARDED_WEIGHT,
discarded_weight.ctypes.data, discarded_weight.dtype.itemsize)
print("Virtual bond truncated from {0} to {1} with a discarded weight of {2:.6f}".format(full_extent[0], reduced_extent[0], discarded_weight[0]))
self.phys_modes[site_A] = phys_mode_out_A
self.phys_modes[site_B] = phys_mode_out_B
self.desc_tensors[site_A] = desc_tensor_out_A
self.desc_tensors[site_B] = desc_tensor_out_B
extent_out_A = np.zeros((3,), dtype=np.int64)
extent_out_B = np.zeros((3,), dtype=np.int64)
extent_out_A, strides_out_A = cutn.get_tensor_details(self.handle, desc_tensor_out_A)[2:]
extent_out_B, strides_out_B = cutn.get_tensor_details(self.handle, desc_tensor_out_B)[2:]
# Recall that `cutensornet.gate_split` can potentially find reduced extent during SVD truncation when value-based truncation is used.
# Therefore we here update the container for output tensor A and B.
if extent_out_A[2] != extent_Aout_B:
# note strides in cutensornet are in the unit of count and strides in cupy/numpy are in the unit of nbytes
strides_out_A = [i * tensor_out_A.itemsize for i in strides_out_A]
strides_out_B = [i * tensor_out_B.itemsize for i in strides_out_B]
tensor_out_A = cp.ndarray(extent_out_A, dtype=tensor_out_A.dtype, memptr=tensor_out_A.data, strides=strides_out_A)
tensor_out_B = cp.ndarray(extent_out_B, dtype=tensor_out_B.dtype, memptr=tensor_out_B.data, strides=strides_out_B)
self.state_tensors[site_A] = tensor_out_A
self.state_tensors[site_B] = tensor_out_B
cutn.destroy_tensor_descriptor(desc_tensor_in_A)
cutn.destroy_tensor_descriptor(desc_tensor_in_B)
cutn.destroy_tensor_descriptor(desc_tensor_in_G)
def __del__(self):
"""Free all resources owned by the object."""
for desc_tensor in self.desc_tensors:
cutn.destroy_tensor_descriptor(desc_tensor)
cutn.destroy(self.handle)
cutn.destroy_workspace_descriptor(self.work_desc)
cutn.destroy_tensor_svd_config(self.svd_config)
cutn.destroy_tensor_svd_info(self.svd_info)
if __name__ == '__main__':
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
data_type = cuquantum.cudaDataType.CUDA_C_64F
compute_type = cuquantum.ComputeType.COMPUTE_64F
num_sites = 16
phys_extent = 2
max_virtual_extent = 12
## we initialize the MPS state as a product state |000...000>
initial_state = []
for i in range(num_sites):
# we create dummpy indices for MPS tensors on the boundary for easier bookkeeping
# we'll use Fortran layout throughout this example
tensor = cp.zeros((1,2,1), dtype=np.complex128, order="F")
tensor[0,0,0] = 1.0
initial_state.append(tensor)
##################################
# Initialize an MPSHelper object
##################################
mps_helper = MPSHelper(num_sites, phys_extent, max_virtual_extent, initial_state, data_type, compute_type)
##################################
# Setup options for gate operation
##################################
abs_cutoff = 1e-2
rel_cutoff = 1e-2
renorm = cutn.TensorSVDNormalization.L2
partition = cutn.TensorSVDPartition.UV_EQUAL
mps_helper.set_svd_config(abs_cutoff, rel_cutoff, renorm, partition)
gate_algo = cutn.GateSplitAlgo.REDUCED
mps_helper.set_gate_algorithm(gate_algo)
#####################################
# Workspace estimation and allocation
#####################################
free_mem, total_mem = dev.mem_info
worksize = free_mem *.7
required_workspace_size = mps_helper.compute_max_workspace_sizes()
work = cp.cuda.alloc(worksize)
print(f"Maximal workspace size requried: {required_workspace_size / 1024 ** 3:.3f} GB")
mps_helper.set_workspace(work, required_workspace_size)
###########
# Execution
###########
stream = cp.cuda.Stream()
cp.random.seed(0)
num_layers = 10
for i in range(num_layers):
start_site = i % 2
print(f"Cycle {i}:")
verbose = (i == num_layers-1)
for j in range(start_site, num_sites-1, 2):
# initialize a random 2-qubit gate
gate = cp.random.random([phys_extent,]*4) + 1.j * cp.random.random([phys_extent,]*4)
gate = gate.astype(gate.dtype, order="F")
mps_helper.apply_gate(j, j+1, gate, verbose, stream)
stream.synchronize()
print("========================")
print("After gate application")
for i in range(num_sites):
tensor = mps_helper.get_tensor(i)
modes = mps_helper.get_tensor_modes(i)
print(f"Site {i}, extent: {tensor.shape}, modes: {modes}") | cuQuantum-main | python/samples/cutensornet/approxTN/mps_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
###############################################
# Tensor QR: T_{i,j,m,n} -> Q_{i,x,m} R_{n,x,j}
###############################################
data_type = cuquantum.cudaDataType.CUDA_R_32F
# Create an array of modes
modes_T = [ord(c) for c in ('i','j','m','n')] # input
modes_Q = [ord(c) for c in ('i','x','m')] # QR output
modes_R = [ord(c) for c in ('n','x','j')]
# Create an array of extent (shapes) for each tensor
extent_T = (16, 16, 16, 16)
extent_Q = (16, 256, 16)
extent_R = (16, 256, 16)
############################
# Allocate & initialize data
############################
T_d = cp.random.random(extent_T, dtype=np.float32).astype(np.float32, order='F') # we use fortran layout throughout this example
Q_d = cp.empty(extent_Q, dtype=np.float32, order='F')
R_d = cp.empty(extent_R, dtype=np.float32, order='F')
print("Allocate memory for data and initialize data.")
free_mem, total_mem = dev.mem_info
worksize = free_mem *.7
#############
# cuTensorNet
#############
stream = cp.cuda.Stream()
handle = cutn.create()
nmode_T = len(modes_T)
nmode_Q = len(modes_Q)
nmode_R = len(modes_R)
###############################
# Create tensor descriptors
###############################
# strides are optional; if no stride (0) is provided, then cuTensorNet assumes a generalized column-major data layout
strides = 0
desc_tensor_T = cutn.create_tensor_descriptor(handle, nmode_T, extent_T, strides, modes_T, data_type)
desc_tensor_Q = cutn.create_tensor_descriptor(handle, nmode_Q, extent_Q, strides, modes_Q, data_type)
desc_tensor_R = cutn.create_tensor_descriptor(handle, nmode_R, extent_R, strides, modes_R, data_type)
#######################################
# Query and allocate required workspace
#######################################
work_desc = cutn.create_workspace_descriptor(handle)
cutn.workspace_compute_qr_sizes(handle, desc_tensor_T, desc_tensor_Q, desc_tensor_R, work_desc)
required_workspace_size = cutn.workspace_get_memory_size(handle,
work_desc, cutn.WorksizePref.MIN, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if worksize < required_workspace_size:
raise MemoryError("Not enough workspace memory is available.")
work = cp.cuda.alloc(required_workspace_size)
cutn.workspace_set_memory(
handle, work_desc,
cutn.Memspace.DEVICE,
cutn.WorkspaceKind.SCRATCH,
work.ptr, required_workspace_size)
print("Query and allocate required workspace.")
###########
# Execution
###########
min_time_cutensornet = 1e100
num_runs = 3 # to get stable perf results
e1 = cp.cuda.Event()
e2 = cp.cuda.Event()
for i in range(num_runs):
# restore output
Q_d[:] = 0
R_d[:] = 0
dev.synchronize()
e1.record()
# execution
cutn.tensor_qr(handle, desc_tensor_T, T_d.data.ptr,
desc_tensor_Q, Q_d.data.ptr,
desc_tensor_R, R_d.data.ptr,
work_desc, stream.ptr)
e2.record()
# Synchronize and measure timing
e2.synchronize()
time = cp.cuda.get_elapsed_time(e1, e2) # ms
min_time_cutensornet = min_time_cutensornet if min_time_cutensornet < time else time
print(f"Execution time: {min_time_cutensornet} ms")
out = cp.einsum("ixm,nxj->ijmn", Q_d, R_d)
rtol = atol = 1e-5
if not cp.allclose(out, T_d, rtol=rtol, atol=atol):
raise RuntimeError(f"result is incorrect, max diff {abs(out-T_d).max()}")
print("Check cuTensorNet result.")
################
# Free resources
################
cutn.destroy_tensor_descriptor(desc_tensor_T)
cutn.destroy_tensor_descriptor(desc_tensor_Q)
cutn.destroy_tensor_descriptor(desc_tensor_R)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy(handle)
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/approxTN/tensor_qr_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays. Specify network options.
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract, NetworkOptions
a = np.ones((3,2))
b = np.ones((2,3))
o = NetworkOptions(memory_limit="10kb") # As a value with units.
o = NetworkOptions(memory_limit=12345) # As a number of bytes (int or float).
o = NetworkOptions(memory_limit="10%") # As a percentage of device memory.
r = contract("ij,jk", a, b, options=o)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example5.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating ellipsis broadcasting.
"""
import numpy as np
from cuquantum import contract
a = np.arange(3.).reshape(3,1)
b = np.arange(9.).reshape(3,3)
# Double inner product (Frobenuis inner product) of two matrices.
expr = "...,...->"
r = contract(expr, a, b)
print(r)
assert np.allclose(r, 54.), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example19.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays with explicit Einstein summation.
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,3))
r = contract("ij,jk->ik", a, b)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example1.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating ellipsis broadcasting.
"""
import numpy as np
from cuquantum import contract
a = np.random.rand(3,1)
b = np.random.rand(3,3)
# Elementwise product of two matrices.
expr = "...,..."
r = contract(expr, a, b)
s = np.einsum(expr, a, b)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example18.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating automatically parallelizing slice-based tensor network contraction with cuQuantum using MPI.
Here we use:
- the buffer interface APIs offered by mpi4py v3.1.0+ for communicating ndarray-like objects
- CUDA-aware MPI (note: as of cuTensorNet v2.0.0 using non-CUDA-aware MPI is not supported
and would cause segfault).
- cuQuantum 22.11+ (cuTensorNet v2.0.0+) for the new distributed contraction feature
$ mpiexec -n 4 python example22_mpi_auto.py
"""
import os
import cupy as cp
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI # this line initializes MPI
import cuquantum
from cuquantum import cutensornet as cutn
root = 0
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
# Check if the env var is set
if not "CUTENSORNET_COMM_LIB" in os.environ:
raise RuntimeError("need to set CUTENSORNET_COMM_LIB to the path of the MPI wrapper library")
if not os.path.isfile(os.environ["CUTENSORNET_COMM_LIB"]):
raise RuntimeError("CUTENSORNET_COMM_LIB does not point to the path of the MPI wrapper library")
# Assign the device for each process.
device_id = rank % getDeviceCount()
cp.cuda.Device(device_id).use()
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
# Set the operand data on root. Since we use the buffer interface APIs offered by mpi4py for communicating array
# objects, we can directly use device arrays (cupy.ndarray, for example) as we assume mpi4py is built against
# a CUDA-aware MPI.
if rank == root:
operands = [cp.random.rand(*shape) for shape in shapes]
else:
operands = [cp.empty(shape) for shape in shapes]
# Broadcast the operand data. Throughout this sample we take advantage of the upper-case mpi4py APIs
# that support communicating CPU & GPU buffers (without staging) to reduce serialization overhead for
# array-like objects. This capability requires mpi4py v3.10+.
for operand in operands:
comm.Bcast(operand, root)
# Bind the communicator to the library handle
handle = cutn.create()
cutn.distributed_reset_configuration(
handle, *cutn.get_mpi_comm_pointer(comm)
)
# Compute the contraction (with distributed path finding & contraction execution)
result = cuquantum.contract(expr, *operands, options={'device_id' : device_id, 'handle': handle})
# Check correctness.
if rank == root:
result_cp = cp.einsum(expr, *operands, optimize=True)
print("Does the cuQuantum parallel contraction result match the cupy.einsum result?", cp.allclose(result, result_cp))
| cuQuantum-main | python/samples/cutensornet/coarse/example22_mpi_auto.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays with interleaved format (explicit form for output indices).
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,3))
r = contract(a, ['first', 'second'], b, ['second', 'third'], ['first', 'third'])
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example4.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating mode broadcasting.
"""
import numpy as np
from cuquantum import contract
a = np.random.rand(3,1)
b = np.random.rand(3,3)
expr = "ij,jk"
r = contract(expr, a, b)
s = np.einsum(expr, a, b)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example17.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Set sliced modes.
"""
import re
from cuquantum import contract, OptimizerOptions
import numpy as np
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
operands = [np.random.rand(*shape) for shape in shapes]
# Set sliced modes.
o = OptimizerOptions(slicing=(('e', 2), ('h',1)))
r = contract(expr, *operands, optimize=o)
s = np.einsum(expr, *operands)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example13.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Verify FLOPS and largest intermediate size against NumPy for a given path.
"""
import re
from cuquantum import contract_path, OptimizerOptions
import numpy as np
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
operands = [np.random.rand(*shape) for shape in shapes]
# NumPy path and metrics.
path_np, i = np.einsum_path(expr, *operands)
flops_np = float(re.search("Optimized FLOP count:(.*)\n", i).group(1))
largest_np = float(re.search("Largest intermediate:(.*) elements\n", i).group(1))
flops_np -= 1 # NumPy adds 1 to the FLOP count.
# Set path and obtain metrics.
o = OptimizerOptions(path=path_np[1:])
path, i = contract_path(expr, *operands, optimize=o)
assert list(path) == path_np[1:], "Error: path doesn't match what was set."
flops = i.opt_cost
largest = i.largest_intermediate
if flops != flops_np or largest != largest_np:
message = f""" Results don't match.
path = {path_np}
flops: NumPy = {flops_np}, cuTensorNet = {flops},
largest intermediate: NumPy = {largest_np}, cuTensorNet = {largest}
"""
raise ValueError(message)
| cuQuantum-main | python/samples/cutensornet/coarse/example12.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating a batched operation.
"""
import numpy as np
from cuquantum import contract
a = np.random.rand(2,4)
b = np.random.rand(2,4)
# Batched inner product.
expr = "ij,ij->i"
r = contract(expr, a, b)
s = np.einsum(expr, a, b)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example16.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using CuPy ndarrays.
The contraction result is also a CuPy ndarray on the same device.
"""
import cupy as cp
from cuquantum import contract, OptimizerOptions
# dev can be any valid device ID on your system, here let's
# pick the first device
dev = 0
with cp.cuda.Device(dev):
a = cp.ones((3,2))
b = cp.ones((2,3))
r = contract("ij,jk", a, b)
print(f"result type = {type(r)}")
print(f"result device = {r.device}")
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example9.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays. Return contraction path and optimizer information.
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,8))
c = np.ones((8,3))
r, (p, i) = contract("ij,jk,kl->il", a, b, c, return_info=True)
print(f"path = {p}")
print(f"optimizer information = {i}")
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example11.py |
"""
Example illustrating lazy conjugation using tensor qualifiers.
"""
import numpy as np
from cuquantum import contract, tensor_qualifiers_dtype
a = np.random.rand(3, 2) + 1j * np.random.rand(3, 2)
b = np.random.rand(2, 3) + 1j * np.random.rand(2, 3)
# Specify tensor qualifiers for the second tensor operand 'b'.
qualifiers = np.zeros((2,), dtype=tensor_qualifiers_dtype)
qualifiers[1]['is_conjugate'] = True
r = contract("ij,jk", a, b, qualifiers=qualifiers)
s = np.einsum("ij,jk", a, b.conj())
assert np.allclose(r, s), "Incorrect results for a * conjugate(b)"
| cuQuantum-main | python/samples/cutensornet/coarse/example21.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating a generalized Einstein summation expression.
"""
import numpy as np
from cuquantum import contract
a = np.random.rand(3,2)
b = np.random.rand(3,3)
c = np.random.rand(3,2)
d = np.random.rand(3,4)
# A hyperedge example.
expr = "ij,ik,ij,kl->l"
r = contract(expr, a, b, c, d)
s = np.einsum(expr, a, b, c, d)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example15.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating a simple memory manager plugin using a PyTorch tensor as a memory buffer.
"""
import logging
import torch
from cuquantum import contract, MemoryPointer
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
class TorchMemMgr:
def __init__(self, device):
self.device = device
self.logger = logging.getLogger()
def memalloc(self, size):
buffer = torch.empty((size, ), device=self.device, dtype=torch.int8, requires_grad=False)
device_pointer = buffer.data_ptr()
self.logger.info(f"The user memory allocator has allocated {size} bytes at pointer {device_pointer}.")
def create_finalizer():
def finalizer():
buffer # Keep buffer alive for as long as it is needed.
self.logger.info("The memory allocation has been released.")
return finalizer
return MemoryPointer(device_pointer, size, finalizer=create_finalizer())
device_id = 0
a = torch.rand((3,2), device=device_id)
b = torch.rand((2,3), device=device_id)
r = contract("ij,jk", a, b, options={'allocator' : TorchMemMgr(device_id)})
| cuQuantum-main | python/samples/cutensornet/coarse/example20.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating a generalized Einstein summation expression.
"""
import numpy as np
from cuquantum import contract
a = np.arange(16.).reshape(4,4)
b = np.arange(64.).reshape(4,4,4)
# Elementwise multiplication of tensor diagonals.
expr = "ii,iii->i"
r = contract(expr, a, b)
s = np.einsum(expr, a, b)
assert np.allclose(r, s), "Incorrect results."
| cuQuantum-main | python/samples/cutensornet/coarse/example14.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays. Provide contraction path.
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract, OptimizerOptions
a = np.ones((3,2))
b = np.ones((2,3))
c = np.ones((3,3))
o = OptimizerOptions(path=[(0,2), (0,1)])
r = contract("ij,jk,kl->il", a, b, c, optimize=o)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example8.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using PyTorch tensors.
The contraction result is also a PyTorch tensor on the same device.
"""
import torch
from cuquantum import contract, OptimizerOptions
# dev can be any valid device ID on your system, here let's
# pick the first device
dev = 0
a = torch.ones((3,2), device=f'cuda:{dev}')
b = torch.ones((2,3), device=f'cuda:{dev}')
r = contract("ij,jk", a, b)
print(f"result type = {type(r)}")
print(f"result device = {r.device}")
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example10.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays with explicit Einstein summation (Unicode characters).
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,3))
r = contract("αβ,βγ->αγ", a, b)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example3.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays. Specify logging options.
The contraction result is also a NumPy ndarray.
"""
import logging
import numpy as np
from cuquantum import contract, NetworkOptions
a = np.ones((3,2))
b = np.ones((2,3))
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
r = contract("ij,jk", a, b)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example7.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays. Specify CUDA stream for the computation.
The contraction result is also a NumPy ndarray.
"""
import cupy as cp
import numpy as np
from cuquantum import contract, NetworkOptions
a = np.ones((3,2))
b = np.ones((2,3))
s = cp.cuda.Stream()
r = contract("αβ,βγ->αγ", a, b, stream=s)
print(r)
| cuQuantum-main | python/samples/cutensornet/coarse/example6.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using NumPy ndarrays with implicit Einstein summation.
The contraction result is also a NumPy ndarray.
"""
import numpy as np
from cuquantum import contract
a = np.ones((3,2))
b = np.ones((2,4))
r = contract("ij,jh", a, b) # output modes = "hi" (lexicographically sorted in implicit form).
print(r)
n = np.einsum("ij,jh", a, b)
assert np.allclose(r, n), 'Incorrect results for "ij,jh".'
| cuQuantum-main | python/samples/cutensornet/coarse/example2.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
SVD Example using CuPy ndarray.
The decomposition results are also CuPy ndarrays.
"""
import cupy as cp
from cuquantum import tensor
a = cp.ones((3,2,4,5))
u, s, v = tensor.decompose("ijab->ixa,xbj", a, method=tensor.SVDMethod())
print(s)
| cuQuantum-main | python/samples/cutensornet/tensor/example05-svd_cupy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
truncated SVD Example using NumPy ndarray with various SVD algorithms.
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
from cuquantum import tensor
a = np.ones((3,2,4,5))
base_options = {'max_extent': 4,
'abs_cutoff': 0.1,
'rel_cutoff': 0.1}
for algorithm in ('gesvd', 'gesvdj', 'gesvdr', 'gesvdp'):
method = tensor.SVDMethod(algorithm=algorithm, **base_options)
u, s, v, info = tensor.decompose("ijab->ixa,xbj", a, method=method, return_info=True)
print(s)
print(info)
| cuQuantum-main | python/samples/cutensornet/tensor/example11-svd_algorithms.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using NumPy ndarray.
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
from cuquantum import tensor
a = np.ones((3,2,4,5))
q, r = tensor.decompose("ijab->ixa,xbj", a)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example01-qr_numpy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
SVD Example using NumPy ndarray.
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
from cuquantum import tensor
a = np.ones((3,2,4,5))
u, s, v = tensor.decompose("ijab->ixa,xbj", a, method=tensor.SVDMethod())
print(s)
| cuQuantum-main | python/samples/cutensornet/tensor/example04-svd_numpy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
truncated SVD Example using NumPy ndarray. Return truncation information.
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
from cuquantum import tensor
import cuquantum.cutensornet as cutn
a = np.ones((3,2,4,5))
handle = cutn.create()
options = {'device_id': 0,
'handle': handle}
q, r = tensor.decompose("ijab->ixa,xbj", a, options=options)
cutn.destroy(handle)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example10-decomposition_options.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
truncated SVD Example using NumPy ndarray. Return truncation information.
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
from cuquantum import tensor
a = np.ones((3,2,4,5))
method = {'max_extent': 4,
'abs_cutoff': 0.1,
'rel_cutoff': 0.1}
# equivalent usage below:
#method = tensor.SVDMethod(**method)
u, s, v, info = tensor.decompose("ijab->ixa,xbj", a, method=method, return_info=True)
print(s)
print(info)
| cuQuantum-main | python/samples/cutensornet/tensor/example09-svd_truncation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using NumPy ndarray. Specify the logging options
The decomposition results are also NumPy ndarrays.
"""
import logging
import numpy as np
from cuquantum import tensor
a = np.ones((3,2,4,5))
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
q, r = tensor.decompose("ijab->ixa,xbj", a)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example08-logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using CuPy ndarray with ellipsis notation.
The decomposition results are also CuPy ndarrays.
"""
import cupy as cp
from cuquantum import tensor
a = cp.ones((3,2,4,5))
q, r = tensor.decompose("ij...->ix,xj...", a)
q1, r1 = tensor.decompose("ijab->ix,xjab", a)
assert q.shape == q1.shape
assert r.shape == r1.shape
| cuQuantum-main | python/samples/cutensornet/tensor/example11-qr_ellipses.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using NumPy ndarray. Specify the CUDA stream for the computation
The decomposition results are also NumPy ndarrays.
"""
import numpy as np
import cupy as cp
from cuquantum import tensor
a = np.ones((3,2,4,5))
stream = cp.cuda.Stream()
q, r = tensor.decompose("ijab->ixa,xbj", a, stream=stream)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example07-stream.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using CuPy ndarray.
The decomposition results are also CuPy ndarrays.
"""
import cupy as cp
from cuquantum import tensor
a = cp.ones((3,2,4,5))
q, r = tensor.decompose("ijab->ixa,xbj", a)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example02-qr_cupy.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
SVD Example using PyTorch Tensor.
The decomposition results are also PyTorch Tensors.
"""
import torch
from cuquantum import tensor
a = torch.ones((3,2,4,5))
u, s, v = tensor.decompose("ijab->ixa,xbj", a, method=tensor.SVDMethod())
print(s)
| cuQuantum-main | python/samples/cutensornet/tensor/example06-svd_torch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
QR Example using PyTorch Tensor.
The decomposition results are also PyTorch Tensors.
"""
import torch
from cuquantum import tensor
a = torch.ones((3,2,4,5))
q, r = tensor.decompose("ijab->ixa,xbj", a)
print(q)
print(r)
| cuQuantum-main | python/samples/cutensornet/tensor/example03-qr_torch.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example using operations on the Network object with torch tensors. This can be used to
amortize the cost of finding the best contraction path and autotuning the network across
multiple contractions.
The contraction result is also a torch tensor on the same device as the operands.
"""
import torch
from cuquantum import Network
# The parameters of the tensor network.
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
device = 'cuda'
# Create torch tensors.
operands = [torch.rand(*shape, dtype=torch.float64, device=device) for shape in shapes]
# Create the network.
with Network(expr, *operands) as n:
# Find the contraction path.
path, info = n.contract_path({'samples': 500})
# Autotune the network.
n.autotune(iterations=5)
# Perform the contraction.
r1 = n.contract()
print("Contract the network (r1):")
print(r1)
# Create new operands.
operands = [i*operand for i, operand in enumerate(operands, start=1)]
# Reset the network operands.
n.reset_operands(*operands)
# Perform the contraction with the new operands.
print("Reset the operands and perform the contraction (r2):")
r2 = n.contract()
print(r2)
from math import factorial
print(f"Is r2 the expected result?: {torch.allclose(r2, factorial(len(operands))*r1)}")
# The operands can also be updated using in-place operations if they are on the GPU.
for i, operand in enumerate(operands, start=1):
operand /= i
#The operands don't have to be reset for in-place operations. Perform the contraction.
print("Reset the operands in-place and perform the contraction (r3):")
r3 = n.contract()
print(r3)
print(f"Is r3 the expected result?: {torch.allclose(r3, r1)}")
# The context manages the network resources, so n.free() doesn't have to be called.
| cuQuantum-main | python/samples/cutensornet/fine/example1.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating slice-based parallel tensor network contraction with cuQuantum using MPI.
$ mpiexec -n 4 python example2_mpi.py
"""
# Sphinx
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
import numpy as np
from cuquantum import Network
root = 0
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
# Set the operand data on root.
operands = [np.random.rand(*shape) for shape in shapes] if rank == root else None
# Broadcast the operand data.
operands = comm.bcast(operands, root)
# Assign the device for each process.
device_id = rank % getDeviceCount()
# Create network object.
network = Network(expr, *operands, options={'device_id' : device_id})
# Compute the path on all ranks with 8 samples for hyperoptimization. Force slicing to enable parallel contraction.
path, info = network.contract_path(optimize={'samples': 8, 'slicing': {'min_slices': max(16, size)}})
# Select the best path from all ranks.
opt_cost, sender = comm.allreduce(sendobj=(info.opt_cost, rank), op=MPI.MINLOC)
if rank == root:
print(f"Process {sender} has the path with the lowest FLOP count {opt_cost}.")
# Broadcast info from the sender to all other ranks.
info = comm.bcast(info, sender)
# Set path and slices.
path, info = network.contract_path(optimize={'path': info.path, 'slicing': info.slices})
# Calculate this process's share of the slices.
num_slices = info.num_slices
chunk, extra = num_slices // size, num_slices % size
slice_begin = rank * chunk + min(rank, extra)
slice_end = num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)
slices = range(slice_begin, slice_end)
print(f"Process {rank} is processing slice range: {slices}.")
# Contract the group of slices the process is responsible for.
result = network.contract(slices=slices)
# Sum the partial contribution from each process on root.
result = comm.reduce(sendobj=result, op=MPI.SUM, root=root)
# Check correctness.
if rank == root:
result_np = np.einsum(expr, *operands, optimize=True)
print("Does the cuQuantum parallel contraction result match the numpy.einsum result?", np.allclose(result, result_np))
| cuQuantum-main | python/samples/cutensornet/fine/example2_mpi.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating slice-based parallel tensor network contraction with cuQuantum using NCCL and MPI. Here
we create the input tensors directly on the GPU using CuPy since NCCL only supports GPU buffers.
The low-level Python wrapper for NCCL is provided by CuPy. MPI (through mpi4py) is only needed to bootstrap
the multiple processes, set up the NCCL communicator, and to communicate data on the CPU. NCCL can be used
without MPI for a "single process multiple GPU" model.
For users who do not have NCCL installed already, CuPy provides detailed instructions on how to install
it for both pip and conda users when "import cupy.cuda.nccl" fails.
We recommend that those using CuPy v10+ use CuPy's high-level "cupyx.distributed" module to avoid having to
manipulate GPU pointers in Python.
Note that with recent NCCL, GPUs cannot be oversubscribed (not more than one process per GPU). Users will
see an NCCL error if the number of processes on a node exceeds the number of GPUs on that node.
$ mpiexec -n 4 python example4_mpi_nccl.py
"""
import cupy as cp
from cupy.cuda import nccl
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
from cuquantum import Network
# Set up the MPI environment.
root = 0
comm_mpi = MPI.COMM_WORLD
rank, size = comm_mpi.Get_rank(), comm_mpi.Get_size()
# Assign the device for each process.
device_id = rank % getDeviceCount()
# Define the tensor network topology.
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
# Note that all NCCL operations must be performed in the correct device context.
cp.cuda.Device(device_id).use()
# Set up the NCCL communicator.
nccl_id = nccl.get_unique_id() if rank == root else None
nccl_id = comm_mpi.bcast(nccl_id, root)
comm_nccl = nccl.NcclCommunicator(size, nccl_id, rank)
# Set the operand data on root.
if rank == root:
operands = [cp.random.rand(*shape) for shape in shapes]
else:
operands = [cp.empty(shape) for shape in shapes]
# Broadcast the operand data. We pass in the CuPy ndarray data pointers to the NCCL APIs.
stream_ptr = cp.cuda.get_current_stream().ptr
for operand in operands:
comm_nccl.broadcast(operand.data.ptr, operand.data.ptr, operand.size, nccl.NCCL_FLOAT64, root, stream_ptr)
# Create network object.
network = Network(expr, *operands)
# Compute the path on all ranks with 8 samples for hyperoptimization. Force slicing to enable parallel contraction.
path, info = network.contract_path(optimize={'samples': 8, 'slicing': {'min_slices': max(16, size)}})
# Select the best path from all ranks. Note that we still use the MPI communicator here for simplicity.
opt_cost, sender = comm_mpi.allreduce(sendobj=(info.opt_cost, rank), op=MPI.MINLOC)
if rank == root:
print(f"Process {sender} has the path with the lowest FLOP count {opt_cost}.")
# Broadcast info from the sender to all other ranks.
info = comm_mpi.bcast(info, sender)
# Set path and slices.
path, info = network.contract_path(optimize={'path': info.path, 'slicing': info.slices})
# Calculate this process's share of the slices.
num_slices = info.num_slices
chunk, extra = num_slices // size, num_slices % size
slice_begin = rank * chunk + min(rank, extra)
slice_end = num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)
slices = range(slice_begin, slice_end)
print(f"Process {rank} is processing slice range: {slices}.")
# Contract the group of slices the process is responsible for.
result = network.contract(slices=slices)
# Sum the partial contribution from each process on root.
stream_ptr = cp.cuda.get_current_stream().ptr
comm_nccl.reduce(result.data.ptr, result.data.ptr, result.size, nccl.NCCL_FLOAT64, nccl.NCCL_SUM, root, stream_ptr)
# Check correctness.
if rank == root:
result_cp = cp.einsum(expr, *operands, optimize=True)
print("Does the cuQuantum parallel contraction result match the cupy.einsum result?", cp.allclose(result, result_cp))
| cuQuantum-main | python/samples/cutensornet/fine/example4_mpi_nccl.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
"""
Example illustrating slice-based parallel tensor network contraction with cuQuantum using MPI. Here we use
the buffer interface APIs offered by mpi4py for communicating ndarray-like objects.
$ mpiexec -n 4 python example3_mpi_buffered.py
"""
from cupy.cuda.runtime import getDeviceCount
from mpi4py import MPI
import numpy as np
from cuquantum import Network
root = 0
comm = MPI.COMM_WORLD
rank, size = comm.Get_rank(), comm.Get_size()
expr = 'ehl,gj,edhg,bif,d,c,k,iklj,cf,a->ba'
shapes = [(8, 2, 5), (5, 7), (8, 8, 2, 5), (8, 6, 3), (8,), (6,), (5,), (6, 5, 5, 7), (6, 3), (3,)]
# Set the operand data on root. Since we use the buffer interface APIs offered by mpi4py for communicating array
# objects, we can directly use device arrays (cupy.ndarray, for example) here if mpi4py is built against a
# CUDA-aware MPI implementation.
if rank == root:
operands = [np.random.rand(*shape) for shape in shapes]
else:
operands = [np.empty(shape) for shape in shapes]
# Broadcast the operand data. Here and elsewhere in this sample we take advantage of the single-segment buffer
# interface APIs provided by mpi4py to reduce serialization overhead for array-like objects.
for operand in operands:
comm.Bcast(operand, root)
# Assign the device for each process.
device_id = rank % getDeviceCount()
# Create network object.
network = Network(expr, *operands, options={'device_id' : device_id})
# Compute the path on all ranks with 8 samples for hyperoptimization. Force slicing to enable parallel contraction.
path, info = network.contract_path(optimize={'samples': 8, 'slicing': {'min_slices': max(16, size)}})
# Select the best path from all ranks.
opt_cost, sender = comm.allreduce(sendobj=(info.opt_cost, rank), op=MPI.MINLOC)
if rank == root:
print(f"Process {sender} has the path with the lowest FLOP count {opt_cost}.")
# Broadcast info from the sender to all other ranks.
info = comm.bcast(info, sender)
# Set path and slices.
path, info = network.contract_path(optimize={'path': info.path, 'slicing': info.slices})
# Calculate this process's share of the slices.
num_slices = info.num_slices
chunk, extra = num_slices // size, num_slices % size
slice_begin = rank * chunk + min(rank, extra)
slice_end = num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)
slices = range(slice_begin, slice_end)
print(f"Process {rank} is processing slice range: {slices}.")
# Contract the group of slices the process is responsible for.
result = network.contract(slices=slices)
# Sum the partial contribution from each process on root.
if rank == root:
comm.Reduce(sendbuf=MPI.IN_PLACE, recvbuf=result, op=MPI.SUM, root=root)
else:
comm.Reduce(sendbuf=result, recvbuf=None, op=MPI.SUM, root=root)
# Check correctness.
if rank == root:
result_np = np.einsum(expr, *operands, optimize=True)
print("Does the cuQuantum parallel contraction result match the numpy.einsum result?", np.allclose(result, result_np))
| cuQuantum-main | python/samples/cutensornet/fine/example3_mpi_buffered.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
#####################################
# Sampling of a quantum circuit state
#####################################
# Quantum state configuration
num_samples = 100
num_qubits = 16
dim = 2
qubits_dims = (dim, ) * num_qubits # qubit size
print(f"Quantum circuit with {num_qubits} qubits")
#############
# cuTensorNet
#############
handle = cutn.create()
stream = cp.cuda.Stream()
data_type = cuquantum.cudaDataType.CUDA_C_64F
# Define quantum gate tensors in host memory
gate_h = 2**-0.5 * cp.asarray([[1,1], [1,-1]], dtype='complex128', order='F')
gate_h_strides = 0
gate_cx = cp.asarray([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]], dtype='complex128').reshape(2,2,2,2, order='F')
gate_cx_strides = 0
# Allocate device memory for the samples
samples = np.empty((num_qubits, num_samples), dtype='int64', order='F') # samples are stored in F order with shape (num_qubits, num_qubits)
# Create the initial quantum state
quantum_state = cutn.create_state(handle, cutn.StatePurity.PURE, num_qubits, qubits_dims, data_type)
print("Created the initial quantum state")
# Construct the quantum circuit state with gate application
tensor_id = cutn.state_apply_tensor(
handle, quantum_state, 1, (0, ),
gate_h.data.ptr, gate_h_strides, 1, 0, 1)
for i in range(1, num_qubits):
tensor_id = cutn.state_apply_tensor(
handle, quantum_state, 2, (i-1, i), # target on i-1 while control on i
gate_cx.data.ptr, gate_cx_strides, 1, 0, 1)
print("Quantum gates applied")
# Create the quantum circuit sampler
sampler = cutn.create_sampler(handle, quantum_state, num_qubits, 0)
free_mem = dev.mem_info[0]
# use half of the totol free size
scratch_size = free_mem // 2
scratch_space = cp.cuda.alloc(scratch_size)
print(f"Allocated {scratch_size} bytes of scratch memory on GPU")
num_hyper_samples_dtype = cutn.sampler_get_attribute_dtype(cutn.SamplerAttribute.OPT_NUM_HYPER_SAMPLES)
num_hyper_samples = np.asarray(8, dtype=num_hyper_samples_dtype)
cutn.sampler_configure(handle, sampler,
cutn.SamplerAttribute.OPT_NUM_HYPER_SAMPLES,
num_hyper_samples.ctypes.data, num_hyper_samples.dtype.itemsize)
# Prepare the quantum circuit sampler
work_desc = cutn.create_workspace_descriptor(handle)
cutn.sampler_prepare(handle, sampler, scratch_size, work_desc, stream.ptr)
print("Prepared the specified quantum circuit state sampler")
workspace_size_d = cutn.workspace_get_memory_size(handle,
work_desc, cutn.WorksizePref.RECOMMENDED, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if workspace_size_d <= scratch_size:
cutn.workspace_set_memory(handle, work_desc, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH, scratch_space.ptr, workspace_size_d)
else:
print("Error:Insufficient workspace size on Device")
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_sampler(sampler)
cutn.destroy_state(quantum_state)
cutn.destroy(handle)
del scratch
print("Free resource and exit.")
exit()
print("Set the workspace buffer")
# Sample the quantum circuit state
cutn.sampler_sample(handle, sampler, num_samples, work_desc, samples.ctypes.data, stream.ptr)
stream.synchronize()
print("Performed quantum circuit state sampling")
print("Bit-string samples:")
print(samples.T)
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_sampler(sampler)
cutn.destroy_state(quantum_state)
cutn.destroy(handle)
del scratch_space
print("Free resource and exit.")
| cuQuantum-main | python/samples/cutensornet/high_level/sampling_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import cupy as cp
import numpy as np
import cuquantum
from cuquantum import cutensornet as cutn
print("cuTensorNet-vers:", cutn.get_version())
dev = cp.cuda.Device() # get current device
props = cp.cuda.runtime.getDeviceProperties(dev.id)
print("===== device info ======")
print("GPU-name:", props["name"].decode())
print("GPU-clock:", props["clockRate"])
print("GPU-memoryClock:", props["memoryClockRate"])
print("GPU-nSM:", props["multiProcessorCount"])
print("GPU-major:", props["major"])
print("GPU-minor:", props["minor"])
print("========================")
#################################################
# Marginal computation of a quantum circuit state
#################################################
# Quantum state configuration
num_qubits = 16
dim = 2
qubits_dims = (dim, ) * num_qubits # qubit size
marginal_modes = (0, 1) # open qubits
num_marginal_modes = len(marginal_modes)
print(f"Quantum circuit with {num_qubits} qubits")
#############
# cuTensorNet
#############
handle = cutn.create()
stream = cp.cuda.Stream()
data_type = cuquantum.cudaDataType.CUDA_C_64F
# Define quantum gate tensors on device
gate_h = 2**-0.5 * cp.asarray([[1,1], [1,-1]], dtype='complex128', order='F')
gate_h_strides = 0
gate_cx = cp.asarray([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]], dtype='complex128').reshape(2,2,2,2, order='F')
gate_cx_strides = 0
# Allocate device memory for the reduced density matrix (marginal)
rdm_shape = (dim, ) * 2 * len(marginal_modes)
rdm = cp.empty(rdm_shape, dtype='complex128')
rdm_strides = [stride_in_bytes // rdm.itemsize for stride_in_bytes in rdm.strides]
# Create the initial quantum state
quantum_state = cutn.create_state(handle, cutn.StatePurity.PURE, num_qubits, qubits_dims, data_type)
print("Created the initial quantum state")
# Construct the quantum circuit state with gate application
tensor_id = cutn.state_apply_tensor(
handle, quantum_state, 1, (0, ),
gate_h.data.ptr, gate_h_strides, 1, 0, 1)
for i in range(1, num_qubits):
tensor_id = cutn.state_apply_tensor(
handle, quantum_state, 2, (i-1, i), # target on i-1 while control on i
gate_cx.data.ptr, gate_cx_strides, 1, 0, 1)
print("Quantum gates applied")
# Specify the desired reduced density matrix (marginal)
marginal = cutn.create_marginal(handle, quantum_state, num_marginal_modes, marginal_modes, 0, 0, rdm_strides)
free_mem = dev.mem_info[0]
# use half of the totol free size
scratch_size = free_mem // 2
scratch_space = cp.cuda.alloc(scratch_size)
print(f"Allocated {scratch_size} bytes of scratch memory on GPU")
num_hyper_samples_dtype = cutn.marginal_get_attribute_dtype(cutn.MarginalAttribute.OPT_NUM_HYPER_SAMPLES)
num_hyper_samples = np.asarray(8, dtype=num_hyper_samples_dtype)
cutn.marginal_configure(handle, marginal,
cutn.MarginalAttribute.OPT_NUM_HYPER_SAMPLES,
num_hyper_samples.ctypes.data, num_hyper_samples.dtype.itemsize)
# Prepare the specified quantum circuit reduced densitry matrix (marginal)
work_desc = cutn.create_workspace_descriptor(handle)
cutn.marginal_prepare(handle, marginal, scratch_size, work_desc, stream.ptr)
print("Prepared the specified quantum circuit reduced density matrix (marginal)")
workspace_size_d = cutn.workspace_get_memory_size(handle,
work_desc, cutn.WorksizePref.RECOMMENDED, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH)
if workspace_size_d <= scratch_size:
cutn.workspace_set_memory(handle, work_desc, cutn.Memspace.DEVICE, cutn.WorkspaceKind.SCRATCH, scratch_space.ptr, workspace_size_d)
else:
print("Error:Insufficient workspace size on Device")
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_marginal(marginal)
cutn.destroy_state(quantum_state)
cutn.destroy(handle)
del scratch
print("Free resource and exit.")
exit()
print("Set the workspace buffer")
# Compute the specified quantum circuit reduced density matrix (marginal)
cutn.marginal_compute(handle, marginal, 0, work_desc, rdm.data.ptr, stream.ptr)
stream.synchronize()
print("Computed the specified quantum circuit reduced density matrix (marginal)")
print(f"Reduced density matrix for {num_marginal_modes} qubits")
print(rdm.reshape(dim**num_marginal_modes, dim**num_marginal_modes))
cutn.destroy_workspace_descriptor(work_desc)
cutn.destroy_marginal(marginal)
cutn.destroy_state(quantum_state)
cutn.destroy(handle)
del scratch_space
print("Free resource and exit.") | cuQuantum-main | python/samples/cutensornet/high_level/marginal_example.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
adjoint = 0
targets = [2]
n_targets = 1
n_controls = 0
d_sv = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv_res = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2-0.2j, 0.3-0.3j, 0.4-0.3j, 0.5-0.4j], dtype=np.complex64)
diagonals = np.asarray([1.0+0.0j, 0.0-1.0j], dtype=np.complex64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
workspaceSize = cusv.apply_generalized_permutation_matrix_get_workspace_size(
handle, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, 0, diagonals.ctypes.data, cuquantum.cudaDataType.CUDA_C_32F,
targets, n_targets, n_controls)
if workspaceSize > 0:
workspace = cp.cuda.memory.alloc(workspaceSize)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply matrix
cusv.apply_generalized_permutation_matrix(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
0, diagonals.ctypes.data, cuquantum.cudaDataType.CUDA_C_32F, adjoint,
targets, n_targets, 0, 0, n_controls,
workspace_ptr, workspaceSize)
# destroy handle
cusv.destroy(handle)
# check result
if not np.allclose(d_sv, d_sv_res):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/diagonal_matrix.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType
nSVs = 2
nIndexBits = 3
svStride = (1 << nIndexBits)
# 2 state vectors are allocated contiguously in single memory chunk.
d_svs = cp.asarray([[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j],
[0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j]], dtype=cp.complex64)
d_svs_res = cp.asarray([[0.0+0.0j, 0.0+1.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[0.0+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j,
0.0+0.0j, 0.0+0.0j, 0.6+0.8j, 0.0+0.0j]], dtype=cp.complex64)
# 2 bitStrings are allocated contiguously in single memory chunk.
# The 1st SV collapses to |001> and the 2nd to |110>
# Note: bitStrings can also live on the host.
bitStrings = cp.asarray([0b001, 0b110], dtype=cp.int64)
# bit ordering should only live on host.
bitOrdering = np.asarray([0, 1, 2], dtype=np.int32)
bitStringLen = bitOrdering.size
# 2 norms are allocated contiguously in single memory chunk.
# Note: norms can also live on the host.
norms = cp.asarray([0.01, 0.25], dtype=cp.float64)
###################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
extraWorkspaceSizeInBytes = cusv.collapse_by_bitstring_batched_get_workspace_size(
handle, nSVs, bitStrings.data.ptr, norms.data.ptr)
# allocate external workspace if necessary
if extraWorkspaceSizeInBytes > 0:
workspace = cp.cuda.alloc(extraWorkspaceSizeInBytes)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# collapse the quantum states to the target bitstrings
cusv.collapse_by_bitstring_batched(
handle, d_svs.data.ptr, cudaDataType.CUDA_C_32F, nIndexBits, nSVs, svStride,
bitStrings.data.ptr, bitOrdering.ctypes.data, bitStringLen, norms.data.ptr,
workspace_ptr, extraWorkspaceSizeInBytes)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(d_svs_res, d_svs):
raise ValueError("results mismatch")
print("test passed")
| cuQuantum-main | python/samples/custatevec/batched_collapse.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
bitOrdering = (2, 1)
maskBitString = (1,)
maskOrdering = (0,)
assert len(maskBitString) == len(maskOrdering)
maskLen = len(maskBitString)
bufferSize = 3
accessBegin = 1
accessEnd = 4
d_sv = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
h_buf = np.empty(bufferSize, dtype=np.complex64)
h_buf_res = np.asarray([0.3+0.3j, 0.1+0.2j, 0.4+0.5j], dtype=np.complex64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# create accessor and check the size of external workspace
accessor, workspace_size = cusv.accessor_create_view(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, bitOrdering, len(bitOrdering),
maskBitString, maskOrdering, maskLen)
if workspace_size > 0:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# set external workspace
cusv.accessor_set_extra_workspace(
handle, accessor, workspace_ptr, workspace_size)
# get state vector components
cusv.accessor_get(
handle, accessor, h_buf.ctypes.data, accessBegin, accessEnd)
# destroy accessor
cusv.accessor_destroy(accessor)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(h_buf, h_buf_res):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/accessor_get.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
bitOrdering = (1, 2, 0)
maskLen = 0
d_sv = cp.zeros(nSvSize, dtype=np.complex64)
d_sv_res = cp.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
h_buf = np.asarray([0.0+0.0j, 0.1+0.1j, 0.2+0.2j, 0.3+0.4j,
0.0+0.1j, 0.1+0.2j, 0.3+0.3j, 0.4+0.5j], dtype=np.complex64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# create accessor and check the size of external workspace
accessor, workspace_size = cusv.accessor_create(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, bitOrdering, len(bitOrdering),
0, 0, maskLen)
if workspace_size > 0:
workspace = cp.cuda.alloc(workspace_size)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# set external workspace
cusv.accessor_set_extra_workspace(
handle, accessor, workspace_ptr, workspace_size)
# set state vector components
cusv.accessor_set(
handle, accessor, h_buf.ctypes.data, 0, nSvSize)
# destroy accessor
cusv.accessor_destroy(accessor)
# destroy handle
cusv.destroy(handle)
# check result
if not cp.allclose(d_sv, d_sv_res):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/accessor_set.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import numpy as np
import cupy as cp
import cuquantum
from cuquantum import custatevec as cusv
nIndexBits = 3
nSvSize = (1 << nIndexBits)
nBasisBits = 1
basisBits = np.asarray([1], dtype=np.int32)
h_sv = np.asarray([0.0+0.0j, 0.0+0.1j, 0.1+0.1j, 0.1+0.2j,
0.2+0.2j, 0.3+0.3j, 0.3+0.4j, 0.4+0.5j], dtype=np.complex64)
d_sv = cp.asarray(h_sv)
# the gate matrix can live on either host (np) or device (cp)
matrix = cp.asarray([1.0+0.0j, 2.0+1.0j, 2.0-1.0j, 3.0+0.0j], dtype=np.complex64)
if isinstance(matrix, cp.ndarray):
matrix_ptr = matrix.data.ptr
elif isinstance(matrix, np.ndarray):
matrix_ptr = matrix.ctypes.data
else:
raise ValueError
# expectation values must stay on host
expect = np.empty((2,), dtype=np.float64)
expect_expected = np.asarray([4.1, 0.0], dtype=np.float64)
####################################################################################
# cuStateVec handle initialization
handle = cusv.create()
# check the size of external workspace
workspaceSize = cusv.compute_expectation_get_workspace_size(
handle, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits, matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F,
cusv.MatrixLayout.ROW, nBasisBits, cuquantum.ComputeType.COMPUTE_32F)
if workspaceSize > 0:
workspace = cp.cuda.memory.alloc(workspaceSize)
workspace_ptr = workspace.ptr
else:
workspace_ptr = 0
# apply gate
cusv.compute_expectation(
handle, d_sv.data.ptr, cuquantum.cudaDataType.CUDA_C_32F, nIndexBits,
expect.ctypes.data, cuquantum.cudaDataType.CUDA_C_64F,
matrix_ptr, cuquantum.cudaDataType.CUDA_C_32F, cusv.MatrixLayout.ROW,
basisBits.ctypes.data, nBasisBits,
cuquantum.ComputeType.COMPUTE_32F, workspace_ptr, workspaceSize)
# destroy handle
cusv.destroy(handle)
# check result
if not np.allclose(expect, expect_expected, atol=1E-6):
raise ValueError("results mismatch")
else:
print("test passed")
| cuQuantum-main | python/samples/custatevec/expectation.py |
# Copyright (c) 2021-2023, NVIDIA CORPORATION & AFFILIATES
#
# SPDX-License-Identifier: BSD-3-Clause
import sys
import cupy as cp
import numpy as np
from cuquantum import custatevec as cusv
from cuquantum import cudaDataType, ComputeType
nGlobalBits = 2
nLocalBits = 2
nSubSvs = (1 << nGlobalBits)
subSvSize = (1 << nLocalBits)
bitStringLen = 2
bitOrdering = (1, 0)
bitString = np.empty(bitStringLen, dtype=np.int32)
bitString_result = np.asarray((0, 0), dtype=np.int32)
# In real appliction, random number in range [0, 1) will be used.
randnum = 0.72;
h_sv = np.asarray([[ 0.000+0.000j, 0.000+0.125j, 0.000+0.250j, 0.000+0.375j],
[ 0.000+0.000j, 0.000-0.125j, 0.000-0.250j, 0.000-0.375j],
[ 0.125+0.000j, 0.125-0.125j, 0.125-0.250j, 0.125-0.375j],
[-0.125+0.000j, -0.125-0.125j, -0.125-0.250j, -0.125-0.375j]],
dtype=np.complex128)
h_sv_result = np.asarray([[ 0.0 +0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.0 +0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[ 0.707107+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j],
[-0.707107+0.0j, 0.0+0.0j, 0.0+0.0j, 0.0+0.0j]],
dtype=np.complex128)
# device allocation
if len(sys.argv) == 1:
numDevices = cp.cuda.runtime.getDeviceCount()
devices = [i % numDevices for i in range(nSubSvs)]
else:
numDevices = min(len(sys.argv) - 1, nSubSvs)
devices = [int(sys.argv[i+1]) for i in range(numDevices)]
for i in range(numDevices, nSubSvs):
devices.append(devices[i % numDevices])
print("The following devices will be used in this sample:")
for iSv in range(nSubSvs):
print(f" sub-SV {iSv} : device id {devices[iSv]}")
d_sv = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]):
d_sv.append(cp.asarray(h_sv[iSv]))
# custatevec handle initialization
handle = []
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]):
handle.append(cusv.create())
# get abs2sum for each sub state vector
abs2SumArray = np.empty((nSubSvs,), dtype=np.float64)
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
cusv.abs2sum_array(
handle[iSv], d_sv[iSv].data.ptr, cudaDataType.CUDA_C_64F, nLocalBits,
# when sliced into a 0D array, NumPy returns a scalar, so we can't do
# abs2SumArray[iSv].ctypes.data and need this workaround
abs2SumArray.ctypes.data + iSv * abs2SumArray.dtype.itemsize,
0, 0, 0, 0, 0)
dev.synchronize()
# get cumulative array
cumulativeArray = np.zeros((nSubSvs + 1,), dtype=np.float64)
cumulativeArray[1:] = np.cumsum(abs2SumArray)
# measurement
for iSv in range(nSubSvs):
if (cumulativeArray[iSv] <= randnum and randnum < cumulativeArray[iSv + 1]):
norm = cumulativeArray[nSubSvs]
offset = cumulativeArray[iSv]
with cp.cuda.Device(devices[iSv]) as dev:
cusv.batch_measure_with_offset(
handle[iSv], d_sv[iSv].data.ptr, cudaDataType.CUDA_C_64F, nLocalBits,
bitString.ctypes.data, bitOrdering, bitStringLen, randnum,
cusv.Collapse.NONE, offset, norm)
dev.synchronize()
# get abs2Sum after collapse
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
cusv.abs2sum_array(
handle[iSv], d_sv[iSv].data.ptr, cudaDataType.CUDA_C_64F, nLocalBits,
abs2SumArray.ctypes.data + iSv * abs2SumArray.dtype.itemsize, 0, 0,
bitString.ctypes.data, bitOrdering, bitStringLen)
dev.synchronize()
# get norm after collapse
norm = np.sum(abs2SumArray, dtype=np.float64)
# collapse sub state vectors
for iSv in range(nSubSvs):
with cp.cuda.Device(devices[iSv]) as dev:
cusv.collapse_by_bitstring(
handle[iSv], d_sv[iSv].data.ptr, cudaDataType.CUDA_C_64F, nLocalBits,
bitString.ctypes.data, bitOrdering, bitStringLen, norm)
dev.synchronize()
# destroy handle when done
cusv.destroy(handle[iSv])
h_sv[iSv] = cp.asnumpy(d_sv[iSv])
correct = np.allclose(h_sv, h_sv_result)
correct &= np.allclose(bitString, bitString_result)
if correct:
print("mgpu_batch_measure example PASSED");
else:
raise RuntimeError("mgpu_batch_measure example FAILED: wrong result")
| cuQuantum-main | python/samples/custatevec/mgpu_batch_measure.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.