python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .utility.args import parse_arguments
from importlib import import_module
def main():
args, xgboost_args = parse_arguments()
getattr(import_module(args.mainClass), 'main')(args, xgboost_args)
| spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/examples/main.py |
spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/examples/utility/__init__.py |
|
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from pyspark.ml.evaluation import *
from pyspark.ml.feature import VectorAssembler
from pyspark.sql import DataFrame
from pyspark.sql.functions import col
from pyspark.sql.types import FloatType
from com.nvidia.spark.examples.taxi.pre_process import pre_process
from time import time
def merge_dicts(dict_x, dict_y):
result = dict_x.copy()
result.update(dict_y)
return result
def show_sample(args, data_frame, label):
data_frame = data_frame if args.showFeatures else data_frame.select(label, 'prediction')
data_frame.show(args.numRows)
def vectorize_data_frame(data_frame, label):
features = [x.name for x in data_frame.schema if x.name != label]
to_floats = [col(x.name).cast(FloatType()) for x in data_frame.schema]
return (VectorAssembler()
.setInputCols(features)
.setOutputCol('features')
.transform(data_frame.select(to_floats))
.select(col('features'), col(label)))
def vectorize_data_frames(data_frames, label):
return [vectorize_data_frame(x, label) for x in data_frames]
def with_benchmark(phrase, action):
start = time()
result = action()
end = time()
print('-' * 100)
print('{} takes {} seconds'.format(phrase, round(end - start, 2)))
return result
def check_classification_accuracy(data_frame, label):
accuracy = (MulticlassClassificationEvaluator()
.setLabelCol(label)
.evaluate(data_frame))
print('-' * 100)
print('Accuracy is ' + str(accuracy))
def check_regression_accuracy(data_frame, label):
accuracy = (RegressionEvaluator()
.setLabelCol(label)
.evaluate(data_frame))
print('-' * 100)
print('RMSE is ' + str(accuracy))
def prepare_data(spark, args, schema, dataPath):
reader = (spark
.read
.format(args.format))
if args.format == 'csv':
reader.schema(schema).option('header', args.hasHeader)
return reader.load(dataPath)
def extract_paths(paths, prefix):
results = [path[len(prefix):] for path in paths if path.startswith(prefix)]
return results
def transform_data(
df: DataFrame,
label: str,
use_gpu: typing.Optional[bool],
) -> (DataFrame, typing.Union[str, typing.List[str]]):
if use_gpu:
features = [x.name for x in df.schema if x.name != label]
else:
df = vectorize_data_frame(df, label)
features = 'features'
return df, features
def valid_input_data(spark, args, raw_schema, final_schema):
e2e = False
for path in args.dataPaths:
if 'raw' in path:
e2e = True
break
raw_train_path = ''
raw_eval_path = ''
raw_trans_path = ''
eval_path = ''
if e2e:
raw_train_path = extract_paths(args.dataPaths, 'rawTrain::')
raw_eval_path = extract_paths(args.dataPaths, 'rawEval::')
raw_trans_path = extract_paths(args.dataPaths, 'rawTrans::')
train_data = ''
eval_data = ''
trans_data = ''
# if this is an e2e run
if raw_train_path or raw_eval_path or raw_trans_path:
raw_train_data = prepare_data(spark, args, raw_schema, raw_train_path)
raw_eval_data = ''
raw_trans_data = ''
if raw_eval_path:
raw_eval_data = prepare_data(spark, args, raw_schema, raw_eval_path)
if raw_trans_path:
raw_trans_data = prepare_data(spark, args, raw_schema, raw_trans_path)
train_data = pre_process(raw_train_data)
if raw_eval_data:
eval_data = pre_process(raw_eval_data)
if raw_trans_data:
trans_data = pre_process(raw_trans_data)
# if this is just a train/transform
else:
train_path = extract_paths(args.dataPaths, 'train::')
eval_path = extract_paths(args.dataPaths, 'eval::')
trans_path = extract_paths(args.dataPaths, 'trans::')
if train_path:
train_data = prepare_data(spark, args, final_schema, train_path)
if eval_path:
eval_data = prepare_data(spark, args, final_schema, eval_path)
if trans_path:
trans_data = prepare_data(spark, args, final_schema, trans_path)
return (train_data, eval_data, trans_data)
| spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/examples/utility/utils.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import typing
from argparse import ArgumentParser
from distutils.util import strtobool
from re import match
from sys import exit
def _to_bool(literal):
return bool(strtobool(literal))
def _to_ratio_pair(literal): # e.g., '80:20'
return match(r'^\d+:\d+$', literal) and [int(x) for x in literal.split(':')]
MAX_CHUNK_SIZE = 2 ** 31 - 1
_examples = [
'com.nvidia.spark.examples.agaricus.main',
'com.nvidia.spark.examples.mortgage.main',
'com.nvidia.spark.examples.mortgage.etl_main',
'com.nvidia.spark.examples.mortgage.cross_validator_main',
'com.nvidia.spark.examples.taxi.main',
'com.nvidia.spark.examples.taxi.etl_main',
'com.nvidia.spark.examples.taxi.cross_validator_main',
]
def _validate_args(args):
usage = ''
if not args.dataPaths:
usage += ' --dataPaths is required.\n'
if not (args.dataRatios
and 0 <= args.dataRatios[0] <= 100
and 0 <= args.dataRatios[1] <= 100
and args.dataRatios[0] + args.dataRatios[1] <= 100):
usage += ' --dataRatios should be in format \'Int:Int\', these two ints should be' \
' in range [0, 100] and the sum should be less than or equal to 100.\n'
if not (1 <= args.maxRowsPerChunk <= MAX_CHUNK_SIZE):
usage += ' --maxRowsPerChunk should be in range [1, {}].\n'.format(MAX_CHUNK_SIZE)
if usage:
print('-' * 80)
print('Usage:\n' + usage)
exit(1)
def _attach_derived_args(args):
args.trainRatio = args.dataRatios[0]
args.evalRatio = args.dataRatios[1]
args.trainEvalRatio = 100 - args.trainRatio - args.evalRatio
args.splitRatios = [args.trainRatio, args.trainEvalRatio, args.evalRatio]
def _inspect_xgb_parameters() -> typing.Dict[str, type]:
"""inspect XGBModel parameters from __init__"""
from xgboost import XGBModel
from typing import get_type_hints, get_origin
xgb_parameters = {}
xgb_model_sig = get_type_hints(XGBModel.__init__)
for k, v in xgb_model_sig.items():
if k != "kwargs" and k != "return":
if get_origin(v) == typing.Union:
xgb_parameters[k] = v.__args__[0]
else:
xgb_parameters[k] = v
# some extra parameters used by xgboost pyspark
xgb_parameters['objective'] = str
xgb_parameters['force_repartition'] = _to_bool
xgb_parameters['use_gpu'] = _to_bool
xgb_parameters['num_workers'] = int
xgb_parameters['enable_sparse_data_optim'] = _to_bool
return xgb_parameters
def parse_arguments():
parser = ArgumentParser()
# application arguments
parser.add_argument('--mainClass', required=True, choices=_examples)
parser.add_argument('--mode', choices=['all', 'train', 'transform'], default='all')
parser.add_argument('--format', required=True, choices=['csv', 'parquet', 'orc'])
parser.add_argument('--hasHeader', type=_to_bool, default=True)
parser.add_argument('--asFloats', type=_to_bool, default=True)
parser.add_argument('--maxRowsPerChunk', type=int, default=MAX_CHUNK_SIZE)
parser.add_argument('--modelPath')
parser.add_argument('--overwrite', type=_to_bool, default=False)
parser.add_argument('--dataPath', dest='dataPaths', action='append')
parser.add_argument('--dataRatios', type=_to_ratio_pair, default=[80, 20])
parser.add_argument('--numRows', type=int, default=5)
parser.add_argument('--showFeatures', type=_to_bool, default=True)
xgboost_all_args = _inspect_xgb_parameters()
for arg, tp in xgboost_all_args.items():
parser.add_argument('--' + arg, type=tp)
parsed_all = parser.parse_args()
_validate_args(parsed_all)
_attach_derived_args(parsed_all)
parsed_xgboost = {
k: v
for k, v in vars(parsed_all).items()
if k in xgboost_all_args and v is not None
}
return parsed_all, parsed_xgboost
| spark-rapids-examples-main | examples/XGBoost-Examples/utility/python/com/nvidia/spark/examples/utility/args.py |
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) 2022, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyspark
import pyspark.sql
import pyspark.sql.functions as F
from collections import defaultdict
options = defaultdict(lambda: None)
session = None
ETL_VERSION = '0.7'
def register_options(**kwargs):
global options
for k, v in kwargs.items():
options[k] = v
def _register_session(s):
global session
session = s
def _register_views(lvars, *names):
for n in names:
if n in lvars:
lvars[n].createOrReplaceTempView(n)
def withsession(df_arg=0):
def decorate(fn):
def wrapped(*args, **kwargs):
_register_session(args[df_arg].sql_ctx.sparkSession)
fn(*args, **kwargs)
return wrapped
return decorate
def read_df(session, fn):
kwargs = {}
_register_session(session)
input_kind = options["input_kind"]
if input_kind == "csv":
kwargs["header"] = True
return getattr(session.read, input_kind)("%s.%s" % (fn, input_kind), **kwargs)
def find_customers(billing_events_df):
customers = billing_events_df.select("customerID").distinct()
if 'cache_customers' in options:
customers.cache()
customers.createOrReplaceTempView("customers")
return customers
def customers():
global session
return session.table("customers")
def join_billing_data(billing_events_df):
_register_session(billing_events_df.sql_ctx.sparkSession)
billing_events = billing_events_df.withColumn("value", billing_events_df.value)
customers = find_customers(billing_events)
counts_and_charges = billing_events.groupBy("customerID", "kind").agg(
F.count(billing_events.value).alias("event_counts"),
F.sum(billing_events.value).alias("total_charges"),
)
counts_and_charges.createOrReplaceTempView("counts_and_charges")
terminations = billing_events.where(F.col("kind") == "AccountTermination").select(
F.col("customerID").alias("Churn")
)
churned = customers.join(
terminations, customers.customerID == terminations.Churn, how="leftouter"
).select(
"customerID", F.when(F.col("Churn").isNull(), F.lit(False)).otherwise(F.lit(True)).alias("Churn")
)
customer_charges = customers.join(
counts_and_charges.where(F.col("kind") == "Charge"), "customerID", how="leftouter"
).select(
"customerID",
F.col("event_counts").alias("tenure"),
F.col("total_charges").alias("TotalCharges"),
).fillna({'tenure': 0, 'TotalCharges': 0.0})
_register_views(locals(), "counts_and_charges", "terminations", "churned", "customer_charges")
# counts_and_charges.createOrReplaceTempView("counts_and_charges")
# terminations.createOrReplaceTempView("terminations")
# churned.createOrReplaceTempView("churned")
# customer_charges.createOrReplaceTempView("customer_charges")
customer_billing = churned.join(customer_charges, "customerID")
_register_views(locals(), "counts_and_charges", "terminations", "churned", "customer_charges", "customer_billing")
return customer_billing
def join_phone_features(phone_features_df):
phone_features = phone_features_df
phone_service = phone_features.where(F.col("feature") == "PhoneService").select(
"customerID", F.lit("Yes").alias("PhoneService")
)
multiple_lines = phone_features.where(F.col("feature") == "MultipleLines").select(
"customerID", F.lit("Yes").alias("MultipleLines")
)
customer_phone_features = (
customers().join(phone_service, "customerID", how="leftouter")
.join(multiple_lines, "customerID", how="leftouter")
.select(
"customerID",
F.when(F.col("PhoneService").isNull(), "No")
.otherwise("Yes")
.alias("PhoneService"),
"MultipleLines",
)
.select(
"customerID",
"PhoneService",
F.when(F.col("PhoneService") == "No", "No phone service")
.otherwise(F.when(F.col("MultipleLines").isNull(), "No").otherwise("Yes"))
.alias("MultipleLines"),
)
)
_register_views(locals(), "phone_service", "multiple_lines", "customer_phone_features")
return customer_phone_features
def untidy_feature(df, feature):
""" 'untidies' a feature by turning it into a column """
return df.where(F.col("feature") == feature).select(
"customerID", F.col("value").alias(feature)
)
def chained_join(column, base_df, dfs, how="leftouter"):
""" repeatedly joins a sequence of data frames on the same column """
acc = base_df
for df in dfs:
acc = acc.join(df, column, how=how)
return acc
def resolve_nullable_column(df, col, null_val="No"):
return F.when(df[col].isNull(), null_val).otherwise(df[col]).alias(col)
def resolve_dependent_column(
df,
col,
parent_col="InternetService",
null_val="No",
null_parent_val="No internet service",
):
return (
F.when((df[parent_col] == "No") | (df[parent_col].isNull()), null_parent_val)
.otherwise(F.when(df[col].isNull(), null_val).otherwise(df[col]))
.alias(col)
)
def join_internet_features(internet_features_df):
internet_features = internet_features_df
internet_service = untidy_feature(internet_features, "InternetService")
online_security = untidy_feature(internet_features, "OnlineSecurity")
online_backup = untidy_feature(internet_features, "OnlineBackup")
device_protection = untidy_feature(internet_features, "DeviceProtection")
tech_support = untidy_feature(internet_features, "TechSupport")
streaming_tv = untidy_feature(internet_features, "StreamingTV")
streaming_movies = untidy_feature(internet_features, "StreamingMovies")
customer_internet_features = chained_join(
"customerID",
customers(),
[
internet_service,
online_security,
online_backup,
device_protection,
tech_support,
streaming_tv,
streaming_movies,
],
)
customer_internet_features = customer_internet_features.select(
"customerID",
resolve_nullable_column(customer_internet_features, "InternetService"),
resolve_dependent_column(
customer_internet_features, "OnlineSecurity", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "OnlineBackup", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "DeviceProtection", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "TechSupport", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "StreamingTV", "InternetService"
),
resolve_dependent_column(
customer_internet_features, "StreamingMovies", "InternetService"
),
)
_register_views(locals(),
"internet_service",
"online_security",
"online_backup",
"device_protection",
"tech_support",
"streaming_tv",
"streaming_movies",
"customer_internet_features"
)
return customer_internet_features
def join_account_features(account_features_df):
account_features = account_features_df
contracts = untidy_feature(account_features, "Contract")
paperless = untidy_feature(account_features, "PaperlessBilling")
payment = untidy_feature(account_features, "PaymentMethod")
customer_account_features = chained_join(
"customerID", customers(), [contracts, paperless, payment]
)
customer_account_features = customer_account_features.select(
"customerID",
"Contract",
resolve_nullable_column(customer_account_features, "PaperlessBilling"),
"PaymentMethod",
)
_register_views(locals(), "contracts", "paperless", "payment", "customer_account_features")
return customer_account_features
def process_account_meta(account_meta_df, usecal=None):
def is_senior_citizen(nowcol, dobcol):
if options['use_calendar_arithmetic']:
return F.when(
F.col("now") >= F.add_months(
F.col("dateOfBirth"), 65 * 12
), F.lit(True)
).otherwise(F.lit(False))
else:
return (F.year(F.col(nowcol)) > (F.year(F.col(dobcol)) + 65)) | \
(F.year(F.col(nowcol)) == (F.year(F.col(dobcol)) + 65)) & \
(
(F.month(F.col(nowcol)) < F.month(F.col(dobcol))) | \
(
(F.month(F.col(nowcol)) == F.month(F.col(dobcol))) & \
(F.dayofmonth(F.col(nowcol)) <= F.dayofmonth(F.col(nowcol)))
)
)
customer_account_meta = account_meta_df.select(
"customerID",
is_senior_citizen("now", "dateOfBirth").alias("SeniorCitizen"),
"Partner",
"Dependents",
"gender",
"MonthlyCharges",
)
_register_views(locals(), "customer_account_meta")
return customer_account_meta
def forcefloat(c):
return F.col(c).cast("float").alias(c)
def join_wide_table(customer_billing, customer_phone_features, customer_internet_features, customer_account_features, customer_account_meta):
wide_data = chained_join(
"customerID",
customers(),
[
customer_billing,
customer_phone_features,
customer_internet_features,
customer_account_features,
customer_account_meta,
],
).select(
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
"MonthlyCharges",
"TotalCharges",
"Churn",
)
return wide_data
# In[ ]:
def cast_and_coalesce_wide_data(wd):
if options["coalesce_output"] > 0:
wd = wd.coalesce(options["coalesce_output"])
return wd.select(
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
forcefloat("MonthlyCharges"),
forcefloat("TotalCharges"),
"Churn",
)
def write_df(df, name):
output_kind = options["output_kind"]
output_mode = options["output_mode"]
output_prefix = options["output_prefix"]
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
kwargs = {}
if output_kind == "csv":
kwargs["header"] = True
getattr(df.write.mode(output_mode), output_kind)(name, **kwargs)
| spark-rapids-examples-main | examples/SQL+DF-Examples/customer-churn/notebooks/python/churn/etl.py |
# Copyright (c) 2022, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pyspark
from pyspark.sql.types import StructType, StructField, StringType, DoubleType, DecimalType
import pyspark.sql.functions as F
from collections import defaultdict
options = defaultdict(lambda: None)
now = datetime.datetime.now(datetime.timezone.utc)
AUGMENT_VERSION = "0.7"
AUGMENT_CUSTOMER_TAG = "0007"
session = None
currencyType = None
def get_currency_type():
global options
global currencyType
if currencyType is not None:
return currencyType
if "use_decimal" in options and options["use_decimal"]:
if "decimal_precision" in options :
assert options["decimal_precision"] > 5, "Decimal precision is too small; was %d but should be at least 6" % options["decimal_precision"]
currencyType = DecimalType(options["decimal_precision"], 2)
else:
# "999,999.99 should be enough for anyone"
currencyType = DecimalType(8, 2)
else:
currencyType = DoubleType()
return currencyType
def _register_session(s):
global session
session = s
def _get_uniques(ct):
global session
table_names = set([table.name for table in session.catalog.listTables()])
if ("uniques_%d" % ct) in table_names:
return session.table("uniques_%d" % ct)
else:
def str_part(seed=0x5CA1AB1E):
"generate the string part of a unique ID"
import random
r = random.Random(seed)
from base64 import b64encode
while True:
yield "%s-%s" % (b64encode(r.getrandbits(72).to_bytes(9, "big"), b"@_").decode(
"utf-8"
), AUGMENT_CUSTOMER_TAG)
sp = str_part()
uniques = (
session.createDataFrame(
schema=StructType([StructField("u_value", StringType())]),
data=[dict(u_value=next(sp)) for _ in range(min(int(ct * 1.02), ct + 2))],
)
.distinct()
.orderBy("u_value")
.limit(ct)
).cache()
uc = uniques.count()
assert (uc == ct), "due to prng collision we had %d instead of %d replicas" % (uc, ct)
uniques.createOrReplaceTempView("uniques_%d" % ct)
return uniques
def register_options(**kwargs):
global options
for k, v in kwargs.items():
options[k] = v
def load_supplied_data(session, input_file):
_register_session(session)
fields = [
"customerID",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"tenure",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
"MonthlyCharges",
"TotalCharges",
"Churn",
]
double_fields = set(["tenure", "MonthlyCharges", "TotalCharges"])
schema = pyspark.sql.types.StructType(
[
pyspark.sql.types.StructField(
f, DoubleType() if f in double_fields else StringType()
)
for f in fields
]
)
df = session.read.csv(input_file, header=True, schema=schema)
source_count = df.count()
df = df.dropna()
nn_count = df.count()
if source_count == nn_count:
print("read %d records from source dataset with no nulls -- is this what you expect?" % source_count)
else:
print("read %d records from source dataset (%d non-null records)" % (source_count, nn_count))
return df
def replicate_df(df, duplicates):
if duplicates > 1:
uniques = _get_uniques(duplicates)
df = (
df.crossJoin(uniques.distinct())
.withColumn("customerID", F.format_string("%s-%s", "customerID", "u_value"))
.drop("u_value")
)
return df
def examine_categoricals(df, columns=None):
""" Returns (to driver memory) a list of tuples consisting of every unique value
for each column in `columns` or for every categorical column in the source
data if no columns are specified """
default_columns = [
"SeniorCitizen",
"Partner",
"Dependents",
"PhoneService",
"MultipleLines",
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
"Contract",
"PaperlessBilling",
"PaymentMethod",
]
columns = columns or default_columns
return [(c, [row[0] for row in df.select(c).distinct().rdd.collect()]) for c in columns]
def billing_events(df):
import datetime
MAX_MONTH = 72
def get_last_month(col):
h = F.abs(F.xxhash64(col))
h1 = (h.bitwiseAND(0xff)) % (MAX_MONTH // 2)
h2 = (F.shiftRight(h, 8).bitwiseAND(0xff)) % (MAX_MONTH // 3)
h3 = (F.shiftRight(h, 16).bitwiseAND(0xff)) % (MAX_MONTH // 5)
h4 = (F.shiftRight(h, 24).bitwiseAND(0xff)) % (MAX_MONTH // 7)
h5 = (F.shiftRight(h, 32).bitwiseAND(0xff)) % (MAX_MONTH // 11)
return -(h1 + h2 + h3 + h4 + h5)
w = pyspark.sql.Window.orderBy(F.lit("")).partitionBy(df.customerID)
charges = (
df.select(
df.customerID,
F.lit("Charge").alias("kind"),
F.explode(
F.array_repeat((df.TotalCharges / df.tenure).cast(get_currency_type()), df.tenure.cast("int"))
).alias("value"),
F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0).alias("last_month")
)
.withColumn("now", F.lit(now).cast("date"))
.withColumn("month_number", -(F.row_number().over(w) + F.col("last_month")))
.withColumn("date", F.expr("add_months(now, month_number)"))
.drop("now", "month_number", "last_month")
)
serviceStarts = (
df.withColumn("last_month", F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0)).select(
df.customerID,
F.lit("AccountCreation").alias("kind"),
F.lit(0.0).cast(get_currency_type()).alias("value"),
F.lit(now).alias("now"),
(-df.tenure - 1 + F.col("last_month")).alias("month_number"),
)
.withColumn("date", F.expr("add_months(now, month_number)"))
.drop("now", "month_number")
)
serviceTerminations = df.withColumn("last_month", F.when(df.Churn == "Yes", get_last_month(df.customerID)).otherwise(0)).where(
df.Churn == "Yes"
).withColumn("now", F.lit(now)).select(
df.customerID,
F.lit("AccountTermination").alias("kind"),
F.lit(0.0).cast(get_currency_type()).alias("value"),
F.expr("add_months(now, last_month)").alias("date")
)
billingEvents = charges.union(serviceStarts).union(serviceTerminations).orderBy("date").withColumn("month", F.substring("date", 0, 7))
return billingEvents
def resolve_path(name):
output_prefix = options["output_prefix"] or ""
output_mode = options["output_mode"] or "overwrite"
output_kind = options["output_kind"] or "parquet"
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
return name
def write_df(df, name, skip_replication=False, partition_by=None):
dup_times = options["dup_times"] or 1
output_prefix = options["output_prefix"] or ""
output_mode = options["output_mode"] or "overwrite"
output_kind = options["output_kind"] or "parquet"
if not skip_replication:
df = replicate_df(df, dup_times)
write = df.write
if partition_by is not None:
if type(partition_by) == str:
partition_by = [partition_by]
write = write.partitionBy(*partition_by)
name = "%s.%s" % (name, output_kind)
if output_prefix != "":
name = "%s%s" % (output_prefix, name)
kwargs = {}
if output_kind == "csv":
kwargs["header"] = True
getattr(write.mode(output_mode), output_kind)(name, **kwargs)
def customer_meta(df):
SENIOR_CUTOFF = 65
ADULT_CUTOFF = 18
DAYS_IN_YEAR = 365.25
EXPONENTIAL_DIST_SCALE = 6.3
augmented_original = replicate_df(df, options["dup_times"] or 1)
customerMetaRaw = augmented_original.select(
"customerID",
F.lit(now).alias("now"),
(F.abs(F.hash(augmented_original.customerID)) % 4096 / 4096).alias("choice"),
"SeniorCitizen",
"gender",
"Partner",
"Dependents",
F.col("MonthlyCharges").cast(get_currency_type()).alias("MonthlyCharges"),
)
customerMetaRaw = customerMetaRaw.withColumn(
"ageInDays",
F.floor(
F.when(
customerMetaRaw.SeniorCitizen == 0,
(
customerMetaRaw.choice
* ((SENIOR_CUTOFF - ADULT_CUTOFF - 1) * DAYS_IN_YEAR)
)
+ (ADULT_CUTOFF * DAYS_IN_YEAR),
).otherwise(
(SENIOR_CUTOFF * DAYS_IN_YEAR)
+ (
DAYS_IN_YEAR
* (-F.log1p(-customerMetaRaw.choice) * EXPONENTIAL_DIST_SCALE)
)
)
).cast("int"),
)
customerMetaRaw = customerMetaRaw.withColumn(
"dateOfBirth", F.expr("date_sub(now, ageInDays)")
)
return customerMetaRaw.select(
"customerID",
"dateOfBirth",
"gender",
"SeniorCitizen",
"Partner",
"Dependents",
"MonthlyCharges",
"now",
).orderBy("customerID")
def phone_features(df):
phoneService = df.select(
"customerID", F.lit("PhoneService").alias("feature"), F.lit("Yes").alias("value")
).where(df.PhoneService == "Yes")
multipleLines = df.select(
"customerID", F.lit("MultipleLines").alias("feature"), F.lit("Yes").alias("value")
).where(df.MultipleLines == "Yes")
return phoneService.union(multipleLines).orderBy("customerID")
def internet_features(df):
internet_service = df.select(
"customerID",
F.lit("InternetService").alias("feature"),
df.InternetService.alias("value"),
).where(df.InternetService != "No")
customerInternetFeatures = internet_service
for feature in [
"InternetService",
"OnlineSecurity",
"OnlineBackup",
"DeviceProtection",
"TechSupport",
"StreamingTV",
"StreamingMovies",
]:
tmpdf = df.select(
"customerID",
F.lit(feature).alias("feature"),
df[feature].alias("value"),
).where(df[feature] == "Yes")
customerInternetFeatures = customerInternetFeatures.union(tmpdf)
return customerInternetFeatures
def account_features(df):
session = df.sql_ctx.sparkSession
accountSchema = pyspark.sql.types.StructType(
[
pyspark.sql.types.StructField(f, StringType())
for f in ["customerID", "feature", "value"]
]
)
customerAccountFeatures = session.createDataFrame(schema=accountSchema, data=[])
for feature in ["Contract", "PaperlessBilling", "PaymentMethod"]:
tmpdf = df.select(
"customerID",
F.lit(feature).alias("feature"),
df[feature].alias("value"),
).where(df[feature] != "No")
customerAccountFeatures = customerAccountFeatures.union(tmpdf)
return customerAccountFeatures
def debug_augmentation(df):
return (
df.select("customerID")
.distinct()
.select(
"customerID",
F.substring("customerID", 0, 10).alias("originalID"),
F.element_at(F.split("customerID", "-", -1), 3).alias("suffix"),
)
) | spark-rapids-examples-main | examples/SQL+DF-Examples/customer-churn/notebooks/python/churn/augment.py |
# Copyright (c) 2022, NVIDIA Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyspark.sql import types as T
from pyspark.sql import functions as F
eda_options = { 'use_array_ops' : False }
def isnumeric(data_type):
numeric_types = [T.ByteType, T.ShortType, T.IntegerType, T.LongType, T.FloatType, T.DoubleType, T.DecimalType]
return any([isinstance(data_type, t) for t in numeric_types])
def percent_true(df, cols):
denominator = df.count()
return {col : df.where(F.col(col) == True).count() / denominator for col in cols}
def cardinalities(df, cols):
from functools import reduce
counts = df.agg(
F.struct(*[F.countDistinct(F.col(c)).alias(c) for c in cols] + [F.count(F.col(cols[0])).alias('total')]).alias("results")
).select("results").collect()[0][0].asDict()
counts.update({'total' : df.count()})
return counts
def likely_unique(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if k != "total" and abs(total - v) < total * 0.15]
def likely_categoricals(counts):
total = counts["total"]
return [k for (k, v) in counts.items() if v < total * 0.15 or v < 128]
def unique_values(df, cols):
if eda_options['use_array_ops']:
return unique_values_array(df, cols)
else:
return unique_values_driver(df, cols)
def unique_values_array(df, cols):
from functools import reduce
counts = df.groupBy(
F.lit(True).alias("drop_me")
).agg(
*[F.array_sort(F.collect_set(F.col(c))).alias(c) for c in cols]
).drop("drop_me").cache()
result = reduce(lambda l, r: l.unionAll(r), [counts.select(F.lit(c).alias("field"), F.col(c).alias("unique_vals")) for c in counts.columns]).collect()
return dict([(r[0],r[1]) for r in result])
def unique_values_driver(df, cols):
return { col : [v[0] for v in df.select(F.col(col).alias('value')).distinct().orderBy(F.col('value')).collect()] for col in cols}
def approx_ecdf(df, cols):
from functools import reduce
quantiles = [0.0, 0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1.0]
qs = df.approxQuantile(cols, quantiles, 0.01)
result = dict(zip(cols, qs))
return {c: dict(zip(quantiles, vs)) for (c, vs) in result.items()}
def gen_summary(df, output_prefix=""):
summary = {}
string_cols = []
boolean_cols = []
numeric_cols = []
other_cols = []
for field in df.schema.fields:
if isinstance(field.dataType, T.StringType):
string_cols.append(field.name)
elif isinstance(field.dataType, T.BooleanType):
boolean_cols.append(field.name)
elif isnumeric(field.dataType):
numeric_cols.append(field.name)
else:
other_cols.append(field.name)
counts = cardinalities(df, string_cols)
uniques = likely_unique(counts)
categoricals = unique_values(df, likely_categoricals(counts))
for span in [2,3,4,6,12]:
thecube = df.cube("Churn", F.ceil(df.tenure / span).alias("%d_month_spans" % span), "gender", "Partner", "SeniorCitizen", "Contract", "PaperlessBilling", "PaymentMethod", F.ceil(F.log2(F.col("MonthlyCharges"))*10).alias("log_charges")).count()
therollup = df.rollup("Churn", F.ceil(df.tenure / span).alias("%d_month_spans" % span), "SeniorCitizen", "Contract", "PaperlessBilling", "PaymentMethod", F.ceil(F.log2(F.col("MonthlyCharges"))*10).alias("log_charges")).agg(F.sum(F.col("TotalCharges")).alias("sum_charges"))
thecube.write.mode("overwrite").parquet("%scube-%d.parquet" % (output_prefix, span))
therollup.write.mode("overwrite").parquet("%srollup-%d.parquet" % (output_prefix, span))
encoding_struct = {
"categorical" : categoricals,
"numeric" : numeric_cols + boolean_cols,
"unique": uniques
}
summary["schema"] = df.schema.jsonValue()
summary["ecdfs"] = approx_ecdf(df, numeric_cols)
summary["true_percentage"] = percent_true(df, boolean_cols)
summary["encoding"] = encoding_struct
summary["distinct_customers"] = df.select(df.customerID).distinct().count()
return summary
def losses_by_month(be):
customer_lifetime_values = be.groupBy("customerID").sum("value").alias("value")
return be.where(be.kind == "AccountTermination").join(customer_lifetime_values, "customerID").groupBy("month").sum("value").alias("value").sort("month").toPandas().to_json()
def output_reports(df, be=None, report_prefix=""):
import json
summary = gen_summary(df, report_prefix)
if be is not None:
summary["losses_by_month"] = losses_by_month(be)
with open("%ssummary.json" % report_prefix, "w") as sf:
json.dump(summary, sf)
with open("%sencodings.json" % report_prefix, "w") as ef:
json.dump(summary["encoding"], ef)
| spark-rapids-examples-main | examples/SQL+DF-Examples/customer-churn/notebooks/python/churn/eda.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pytest_addoption(parser):
"""Pytest hook to define command line options for pytest"""
parser.addoption(
"--mortgage_format", action="store", default="parquet", help="format of Mortgage data"
)
parser.addoption(
"--mortgage_path", action="store", default=None, help="path to Mortgage data"
)
parser.addoption(
"--std_input_path", action="store", default=None, help="path to standard input files"
)
parser.addoption(
"--tmp_path", action="store", default=None, help="path to store tmp files"
)
parser.addoption(
"--debug_tmp_path", action='store_true', default=False, help="if true don't delete tmp_path contents for debugging"
)
parser.addoption(
"--runtime_env", action='store', default="Apache", help="the runtime environment for the tests - apache or databricks"
)
parser.addoption(
"--cudf_udf", action='store_true', default=False, help="if true enable cudf_udf test"
)
parser.addoption(
"--rapids_udf_example_native", action='store_true', default=False,
help="if true enable tests for RAPIDS UDF examples with native code"
)
parser.addoption(
"--test_type", action='store', default="developer",
help="the type of tests that are being run to help check all the correct tests are run - developer, pre-commit, or nightly"
)
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/conftest.py |
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pytest import main
#import cProfile
if __name__ == '__main__':
#cProfile.run('main(sys.argv[1:])', 'test_profile')
# arguments are the same as for pytest https://docs.pytest.org/en/latest/usage.html
# or run pytest -h
sys.exit(main(sys.argv[1:]))
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/runtests.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import random
from spark_init_internal import get_spark_i_know_what_i_am_doing
from pyspark.sql.dataframe import DataFrame
_approximate_float_args = None
def get_float_check():
if not _approximate_float_args is None:
return lambda lhs,rhs: lhs == pytest.approx(rhs, **_approximate_float_args)
else:
return lambda lhs,rhs: lhs == rhs
_incompat = False
def is_incompat():
return _incompat
_sort_on_spark = False
_sort_locally = False
def should_sort_on_spark():
return _sort_on_spark
def should_sort_locally():
return _sort_locally
_allow_any_non_gpu = False
_non_gpu_allowed = []
def is_allowing_any_non_gpu():
return _allow_any_non_gpu
def get_non_gpu_allowed():
return _non_gpu_allowed
def get_validate_execs_in_gpu_plan():
return _validate_execs_in_gpu_plan
_runtime_env = "apache"
def runtime_env():
return _runtime_env.lower()
def is_apache_runtime():
return runtime_env() == "apache"
def is_databricks_runtime():
return runtime_env() == "databricks"
def is_emr_runtime():
return runtime_env() == "emr"
def is_dataproc_runtime():
return runtime_env() == "dataproc"
_is_nightly_run = False
_is_precommit_run = False
def is_nightly_run():
return _is_nightly_run
def is_at_least_precommit_run():
return _is_nightly_run or _is_precommit_run
def skip_unless_nightly_tests(description):
if (_is_nightly_run):
raise AssertionError(description + ' during nightly test run')
else:
pytest.skip(description)
def skip_unless_precommit_tests(description):
if (_is_nightly_run):
raise AssertionError(description + ' during nightly test run')
elif (_is_precommit_run):
raise AssertionError(description + ' during pre-commit test run')
else:
pytest.skip(description)
_limit = -1
def get_limit():
return _limit
def _get_limit_from_mark(mark):
if mark.args:
return mark.args[0]
else:
return mark.kwargs.get('num_rows', 100000)
def pytest_runtest_setup(item):
global _sort_on_spark
global _sort_locally
order = item.get_closest_marker('ignore_order')
if order:
if order.kwargs.get('local', False):
_sort_on_spark = False
_sort_locally = True
else:
_sort_on_spark = True
_sort_locally = False
else:
_sort_on_spark = False
_sort_locally = False
global _incompat
if item.get_closest_marker('incompat'):
_incompat = True
else:
_incompat = False
global _approximate_float_args
app_f = item.get_closest_marker('approximate_float')
if app_f:
_approximate_float_args = app_f.kwargs
else:
_approximate_float_args = None
global _allow_any_non_gpu
global _non_gpu_allowed
_non_gpu_allowed_databricks = []
_allow_any_non_gpu_databricks = False
non_gpu_databricks = item.get_closest_marker('allow_non_gpu_databricks')
non_gpu = item.get_closest_marker('allow_non_gpu')
if non_gpu_databricks:
if is_databricks_runtime():
if non_gpu_databricks.kwargs and non_gpu_databricks.kwargs['any']:
_allow_any_non_gpu_databricks = True
elif non_gpu_databricks.args:
_non_gpu_allowed_databricks = non_gpu_databricks.args
else:
pytest.warn('allow_non_gpu_databricks marker without anything allowed')
if non_gpu:
if non_gpu.kwargs and non_gpu.kwargs['any']:
_allow_any_non_gpu = True
_non_gpu_allowed = []
elif non_gpu.args:
_allow_any_non_gpu = False
_non_gpu_allowed = non_gpu.args
else:
pytest.warn('allow_non_gpu marker without anything allowed')
_allow_any_non_gpu = False
_non_gpu_allowed = []
else:
_allow_any_non_gpu = False
_non_gpu_allowed = []
_allow_any_non_gpu = _allow_any_non_gpu | _allow_any_non_gpu_databricks
if _non_gpu_allowed and _non_gpu_allowed_databricks:
_non_gpu_allowed = _non_gpu_allowed + _non_gpu_allowed_databricks
elif _non_gpu_allowed_databricks:
_non_gpu_allowed = _non_gpu_allowed_databricks
global _validate_execs_in_gpu_plan
validate_execs = item.get_closest_marker('validate_execs_in_gpu_plan')
if validate_execs and validate_execs.args:
_validate_execs_in_gpu_plan = validate_execs.args
else:
_validate_execs_in_gpu_plan = []
global _limit
limit_mrk = item.get_closest_marker('limit')
if limit_mrk:
_limit = _get_limit_from_mark(limit_mrk)
else:
_limit = -1
def pytest_configure(config):
global _runtime_env
_runtime_env = config.getoption('runtime_env')
global _is_nightly_run
global _is_precommit_run
test_type = config.getoption('test_type').lower()
if "nightly" == test_type:
_is_nightly_run = True
elif "pre-commit" == test_type:
_is_precommit_run = True
elif "developer" != test_type:
raise Exception("not supported test type {}".format(test_type))
def pytest_collection_modifyitems(config, items):
for item in items:
extras = []
order = item.get_closest_marker('ignore_order')
if order:
if order.kwargs:
extras.append('IGNORE_ORDER(' + str(order.kwargs) + ')')
else:
extras.append('IGNORE_ORDER')
if item.get_closest_marker('incompat'):
extras.append('INCOMPAT')
app_f = item.get_closest_marker('approximate_float')
if app_f:
if app_f.kwargs:
extras.append('APPROXIMATE_FLOAT(' + str(app_f.kwargs) + ')')
else:
extras.append('APPROXIMATE_FLOAT')
non_gpu = item.get_closest_marker('allow_non_gpu')
if non_gpu:
if non_gpu.kwargs and non_gpu.kwargs['any']:
extras.append('ALLOW_NON_GPU(ANY)')
elif non_gpu.args:
extras.append('ALLOW_NON_GPU(' + ','.join(non_gpu.args) + ')')
limit_mrk = item.get_closest_marker('limit')
if limit_mrk:
extras.append('LIMIT({})'.format(_get_limit_from_mark(limit_mrk)))
if extras:
# This is not ideal because we are reaching into an internal value
item._nodeid = item.nodeid + '[' + ', '.join(extras) + ']'
@pytest.fixture(scope="session")
def std_input_path(request):
path = request.config.getoption("std_input_path")
if path is None:
skip_unless_precommit_tests("std_input_path is not configured")
else:
yield path
@pytest.fixture
def spark_tmp_path(request):
debug = request.config.getoption('debug_tmp_path')
ret = request.config.getoption('tmp_path')
if ret is None:
ret = '/tmp/pyspark_tests/'
ret = ret + '/' + str(random.randint(0, 1000000)) + '/'
# Make sure it is there and accessible
sc = get_spark_i_know_what_i_am_doing().sparkContext
config = sc._jsc.hadoopConfiguration()
path = sc._jvm.org.apache.hadoop.fs.Path(ret)
fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(config)
fs.mkdirs(path)
yield ret
if not debug:
fs.delete(path)
class TmpTableFactory:
def __init__(self, base_id):
self.base_id = base_id
self.running_id = 0
def get(self):
ret = '{}_{}'.format(self.base_id, self.running_id)
self.running_id = self.running_id + 1
return ret
@pytest.fixture
def spark_tmp_table_factory(request):
base_id = 'tmp_table_{}'.format(random.randint(0, 1000000))
yield TmpTableFactory(base_id)
sp = get_spark_i_know_what_i_am_doing()
tables = sp.sql("SHOW TABLES".format(base_id)).collect()
for row in tables:
t_name = row['tableName']
if (t_name.startswith(base_id)):
sp.sql("DROP TABLE IF EXISTS {}".format(t_name))
def _get_jvm_session(spark):
return spark._jsparkSession
def _get_jvm(spark):
return spark.sparkContext._jvm
def spark_jvm():
return _get_jvm(get_spark_i_know_what_i_am_doing())
class MortgageRunner:
def __init__(self, mortgage_format, mortgage_acq_path, mortgage_perf_path):
self.mortgage_format = mortgage_format
self.mortgage_acq_path = mortgage_acq_path
self.mortgage_perf_path = mortgage_perf_path
def do_test_query(self, spark):
jvm_session = _get_jvm_session(spark)
jvm = _get_jvm(spark)
acq = self.mortgage_acq_path
perf = self.mortgage_perf_path
run = jvm.com.nvidia.spark.rapids.tests.mortgage.Run
if self.mortgage_format == 'csv':
df = run.csv(jvm_session, perf, acq)
elif self.mortgage_format == 'parquet':
df = run.parquet(jvm_session, perf, acq)
elif self.mortgage_format == 'orc':
df = run.orc(jvm_session, perf, acq)
else:
raise AssertionError('Not Supported Format {}'.format(self.mortgage_format))
return DataFrame(df, spark.getActiveSession())
@pytest.fixture(scope="session")
def mortgage(request):
mortgage_format = request.config.getoption("mortgage_format")
mortgage_path = request.config.getoption("mortgage_path")
if mortgage_path is None:
std_path = request.config.getoption("std_input_path")
if std_path is None:
skip_unless_precommit_tests("Mortgage tests are not configured to run")
else:
yield MortgageRunner('parquet', std_path + '/parquet_acq', std_path + '/parquet_perf')
else:
yield MortgageRunner(mortgage_format, mortgage_path + '/acq', mortgage_path + '/perf')
@pytest.fixture(scope="session")
def enable_cudf_udf(request):
enable_udf_cudf = request.config.getoption("cudf_udf")
if not enable_udf_cudf:
# cudf_udf tests are not required for any test runs
pytest.skip("cudf_udf not configured to run")
@pytest.fixture(scope="session")
def enable_rapids_udf_example_native(request):
native_enabled = request.config.getoption("rapids_udf_example_native")
if not native_enabled:
# udf_example_native tests are not required for any test runs
pytest.skip("rapids_udf_example_native is not configured to run")
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/conftest.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from conftest import is_allowing_any_non_gpu, get_non_gpu_allowed, get_validate_execs_in_gpu_plan, is_databricks_runtime
from pyspark.sql import SparkSession, DataFrame
from spark_init_internal import get_spark_i_know_what_i_am_doing, spark_version
def _from_scala_map(scala_map):
ret = {}
# The value we get is a scala map, not a java map, so we need to jump through some hoops
keys = scala_map.keys().iterator()
while keys.hasNext():
key = keys.next()
ret[key] = scala_map.get(key).get()
return ret
_spark = get_spark_i_know_what_i_am_doing()
# Have to reach into a private member to get access to the API we need
_orig_conf = _from_scala_map(_spark.conf._jconf.getAll())
_orig_conf_keys = _orig_conf.keys()
def is_tz_utc(spark=_spark):
"""
true if the tz is UTC else false
"""
# Now we have to do some kind of ugly internal java stuff
jvm = spark.sparkContext._jvm
utc = jvm.java.time.ZoneId.of('UTC').normalized()
sys_tz = jvm.java.time.ZoneId.systemDefault().normalized()
return utc == sys_tz
def _set_all_confs(conf):
for key, value in conf.items():
if _spark.conf.get(key, None) != value:
_spark.conf.set(key, value)
def reset_spark_session_conf():
"""Reset all of the configs for a given spark session."""
_set_all_confs(_orig_conf)
#We should clear the cache
_spark.catalog.clearCache()
# Have to reach into a private member to get access to the API we need
current_keys = _from_scala_map(_spark.conf._jconf.getAll()).keys()
for key in current_keys:
if key not in _orig_conf_keys:
_spark.conf.unset(key)
def _check_for_proper_return_values(something):
"""We don't want to return an DataFrame or Dataset from a with_spark_session. You will not get what you expect"""
if (isinstance(something, DataFrame)):
raise RuntimeError("You should never return a DataFrame from a with_*_session, you will not get the results that you expect")
def with_spark_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set."""
reset_spark_session_conf()
_add_job_description(conf)
_set_all_confs(conf)
ret = func(_spark)
_check_for_proper_return_values(ret)
return ret
def _add_job_description(conf):
is_gpu_job = conf.get('spark.rapids.sql.enabled', False)
job_type = 'GPU' if str(is_gpu_job).lower() == str(True).lower() else 'CPU'
job_desc = '{}[{}]'.format(os.environ.get('PYTEST_CURRENT_TEST'), job_type)
_spark.sparkContext.setJobDescription(job_desc)
def with_cpu_session(func, conf={}):
"""Run func that takes a spark session as input with the given configs set on the CPU."""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'false'
return with_spark_session(func, conf=copy)
def with_gpu_session(func, conf={}):
"""
Run func that takes a spark session as input with the given configs set on the GPU.
Note that this forces you into test mode unless. It is not a requirement, but is
simplest for right now.
"""
copy = dict(conf)
copy['spark.rapids.sql.enabled'] = 'true'
if is_allowing_any_non_gpu():
copy['spark.rapids.sql.test.enabled'] = 'false'
else:
copy['spark.rapids.sql.test.enabled'] = 'true'
copy['spark.rapids.sql.test.allowedNonGpu'] = ','.join(get_non_gpu_allowed())
copy['spark.rapids.sql.test.validateExecsInGpuPlan'] = ','.join(get_validate_execs_in_gpu_plan())
return with_spark_session(func, conf=copy)
def is_before_spark_311():
return spark_version() < "3.1.0"
def is_before_spark_320():
return spark_version() < "3.2.0"
def is_before_spark_330():
return spark_version() < "3.3.0"
def is_databricks91_or_later():
spark = get_spark_i_know_what_i_am_doing()
return spark.conf.get("spark.databricks.clusterUsageTags.sparkVersion", "") >= "9.1"
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/spark_session.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import date, datetime, timedelta, timezone
from decimal import *
import math
from pyspark.context import SparkContext
from pyspark.sql import Row
from pyspark.sql.types import *
import pyspark.sql.functions as f
import pytest
import random
from spark_session import is_tz_utc
import sre_yield
import struct
from conftest import skip_unless_precommit_tests
class DataGen:
"""Base class for data generation"""
def __repr__(self):
if not self.nullable:
return self.__class__.__name__[:-3] + '(not_null)'
return self.__class__.__name__[:-3]
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, data_type, nullable=True, special_cases =[]):
self.data_type = data_type
self.list_of_special_cases = special_cases
self._special_cases = []
if isinstance(nullable, tuple):
self.nullable = nullable[0]
weight = nullable[1]
else:
self.nullable = nullable
weight = 5.0
if self.nullable:
self.with_special_case(None, weight)
# Special cases can be a value or a tuple of (value, weight). If the
# special_case itself is a tuple as in the case of StructGen, it MUST be added with a
# weight like : ((special_case_tuple_v1, special_case_tuple_v2), weight).
for element in special_cases:
if isinstance(element, tuple):
self.with_special_case(element[0], element[1])
else:
self.with_special_case(element)
def copy_special_case(self, special_case, weight=1.0):
# it would be good to do a deepcopy, but sre_yield is not happy with that.
c = copy.copy(self)
c._special_cases = copy.deepcopy(self._special_cases)
return c.with_special_case(special_case, weight=weight)
def with_special_case(self, special_case, weight=1.0):
"""
Add in a special case with a given weight. A special case can either be
a function that takes an instance of Random and returns the generated data
or it can be a constant. By default the weight is 1.0, and the default
number generation's weight is 100.0. The number of lines that are generate in
the data set should be proportional to the its weight/sum weights
"""
if callable(special_case):
sc = special_case
else:
sc = lambda rand: special_case
self._special_cases.append((weight, sc))
return self
def get_types(self):
return 'DataType: {}, nullable: {}, special_cases: {}'.format(self.data_type,
self.nullable, self.list_of_special_cases)
def start(self, rand):
"""Start data generation using the given rand"""
raise TypeError('Children should implement this method and call _start')
def _start(self, rand, gen_func):
"""Start internally, but use the given gen_func as the base"""
if not self._special_cases:
self._gen_func = gen_func
else:
weighted_choices = [(100.0, lambda rand: gen_func())]
weighted_choices.extend(self._special_cases)
total = float(sum(weight for weight,gen in weighted_choices))
normalized_choices = [(weight/total, gen) for weight,gen in weighted_choices]
def choose_one():
pick = rand.random()
total = 0
for (weight, gen) in normalized_choices:
total += weight
if total >= pick:
return gen(rand)
raise RuntimeError('Random did not pick something we expected')
self._gen_func = choose_one
def gen(self, force_no_nulls=False):
"""generate the next line"""
if not self._gen_func:
raise RuntimeError('start must be called before generating any data')
v = self._gen_func()
if force_no_nulls:
while v is None:
v = self._gen_func()
return v
def contains_ts(self):
"""Checks if this contains a TimestampGen"""
return False
class ConvertGen(DataGen):
"""Provides a way to modify the data before it is returned"""
def __init__(self, child_gen, func, data_type=None, nullable=True):
if data_type is None:
data_type = child_gen.data_type
super().__init__(data_type, nullable=nullable)
self._child_gen = child_gen
self._func = func
def __repr__(self):
return super().__repr__() + '(' + str(self._child_gen) + ')'
def start(self, rand):
self._child_gen.start(rand)
def modify():
return self._func(self._child_gen.gen())
self._start(rand, modify)
_MAX_CHOICES = 1 << 64
class StringGen(DataGen):
"""Generate strings that match a pattern"""
def __init__(self, pattern="(.|\n){1,30}", flags=0, charset=sre_yield.CHARSET, nullable=True):
super().__init__(StringType(), nullable=nullable)
self.base_strs = sre_yield.AllStrings(pattern, flags=flags, charset=charset, max_count=_MAX_CHOICES)
def with_special_pattern(self, pattern, flags=0, charset=sre_yield.CHARSET, weight=1.0):
"""
Like with_special_case but you can provide a regexp pattern
instead of a hard coded string value.
"""
strs = sre_yield.AllStrings(pattern, flags=flags, charset=charset, max_count=_MAX_CHOICES)
try:
length = int(len(strs))
except OverflowError:
length = _MAX_CHOICES
return self.with_special_case(lambda rand : strs[rand.randrange(0, length)], weight=weight)
def start(self, rand):
strs = self.base_strs
try:
length = int(len(strs))
except OverflowError:
length = _MAX_CHOICES
self._start(rand, lambda : strs[rand.randrange(0, length)])
BYTE_MIN = -(1 << 7)
BYTE_MAX = (1 << 7) - 1
class ByteGen(DataGen):
"""Generate Bytes"""
def __init__(self, nullable=True, min_val = BYTE_MIN, max_val = BYTE_MAX, special_cases=[]):
super().__init__(ByteType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
SHORT_MIN = -(1 << 15)
SHORT_MAX = (1 << 15) - 1
class ShortGen(DataGen):
"""Generate Shorts, which some built in corner cases."""
def __init__(self, nullable=True, min_val = SHORT_MIN, max_val = SHORT_MAX,
special_cases = [SHORT_MIN, SHORT_MAX, 0, 1, -1]):
super().__init__(ShortType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
INT_MIN = -(1 << 31)
INT_MAX = (1 << 31) - 1
class IntegerGen(DataGen):
"""Generate Ints, which some built in corner cases."""
def __init__(self, nullable=True, min_val = INT_MIN, max_val = INT_MAX,
special_cases = [INT_MIN, INT_MAX, 0, 1, -1]):
super().__init__(IntegerType(), nullable=nullable, special_cases=special_cases)
self._min_val = min_val
self._max_val = max_val
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
class DecimalGen(DataGen):
"""Generate Decimals, with some built in corner cases."""
def __init__(self, precision=None, scale=None, nullable=True, special_cases=[]):
if precision is None:
#Maximum number of decimal digits a Long can represent is 18
precision = 18
scale = 0
DECIMAL_MIN = Decimal('-' + ('9' * precision) + 'e' + str(-scale))
DECIMAL_MAX = Decimal(('9'* precision) + 'e' + str(-scale))
super().__init__(DecimalType(precision, scale), nullable=nullable, special_cases=special_cases)
self.scale = scale
self.precision = precision
pattern = "[0-9]{1,"+ str(precision) + "}e" + str(-scale)
self.base_strs = sre_yield.AllStrings(pattern, flags=0, charset=sre_yield.CHARSET, max_count=_MAX_CHOICES)
def __repr__(self):
return super().__repr__() + '(' + str(self.precision) + ',' + str(self.scale) + ')'
def start(self, rand):
strs = self.base_strs
try:
length = int(strs.length)
except OverflowError:
length = _MAX_CHOICES
self._start(rand, lambda : Decimal(strs[rand.randrange(0, length)]))
LONG_MIN = -(1 << 63)
LONG_MAX = (1 << 63) - 1
class LongGen(DataGen):
"""Generate Longs, which some built in corner cases."""
def __init__(self, nullable=True, min_val = LONG_MIN, max_val = LONG_MAX, special_cases = []):
_special_cases = [min_val, max_val, 0, 1, -1] if not special_cases else special_cases
super().__init__(LongType(), nullable=nullable, special_cases=_special_cases)
self._min_val = min_val
self._max_val = max_val
def start(self, rand):
self._start(rand, lambda : rand.randint(self._min_val, self._max_val))
class LongRangeGen(DataGen):
"""Generate Longs in incrementing order."""
def __init__(self, nullable=False, start_val=0, direction="inc"):
super().__init__(LongType(), nullable=nullable)
self._start_val = start_val
self._current_val = start_val
if (direction == "dec"):
def dec_it():
tmp = self._current_val
self._current_val -= 1
return tmp
self._do_it = dec_it
else:
def inc_it():
tmp = self._current_val
self._current_val += 1
return tmp
self._do_it = inc_it
def start(self, rand):
self._current_val = self._start_val
self._start(rand, self._do_it)
class RepeatSeqGen(DataGen):
"""Generate Repeated seq of `length` random items"""
def __init__(self, child, length):
super().__init__(child.data_type, nullable=False)
self.nullable = child.nullable
self._child = child
self._vals = []
self._length = length
self._index = 0
def __repr__(self):
return super().__repr__() + '(' + str(self._child) + ')'
def _loop_values(self):
ret = self._vals[self._index]
self._index = (self._index + 1) % self._length
return ret
def start(self, rand):
self._index = 0
self._child.start(rand)
self._start(rand, self._loop_values)
self._vals = [self._child.gen() for _ in range(0, self._length)]
class SetValuesGen(DataGen):
"""A set of values that are randomly selected"""
def __init__(self, data_type, data):
super().__init__(data_type, nullable=False)
self.nullable = any(x is None for x in data)
self._vals = data
def __repr__(self):
return super().__repr__() + '(' + str(self._child) + ')'
def start(self, rand):
data = self._vals
length = len(data)
self._start(rand, lambda : data[rand.randrange(0, length)])
FLOAT_MIN = -3.4028235E38
FLOAT_MAX = 3.4028235E38
NEG_FLOAT_NAN_MIN_VALUE = struct.unpack('f', struct.pack('I', 0xffffffff))[0]
NEG_FLOAT_NAN_MAX_VALUE = struct.unpack('f', struct.pack('I', 0xff800001))[0]
POS_FLOAT_NAN_MIN_VALUE = struct.unpack('f', struct.pack('I', 0x7f800001))[0]
POS_FLOAT_NAN_MAX_VALUE = struct.unpack('f', struct.pack('I', 0x7fffffff))[0]
class FloatGen(DataGen):
"""Generate floats, which some built in corner cases."""
def __init__(self, nullable=True,
no_nans=False, special_cases=None):
self._no_nans = no_nans
if special_cases is None:
special_cases = [FLOAT_MIN, FLOAT_MAX, 0.0, -0.0, 1.0, -1.0]
if not no_nans:
special_cases.append(float('inf'))
special_cases.append(float('-inf'))
special_cases.append(float('nan'))
special_cases.append(NEG_FLOAT_NAN_MAX_VALUE)
super().__init__(FloatType(), nullable=nullable, special_cases=special_cases)
def _fixup_nans(self, v):
if self._no_nans and (math.isnan(v) or v == math.inf or v == -math.inf):
v = None if self.nullable else 0.0
return v
def start(self, rand):
def gen_float():
i = rand.randint(INT_MIN, INT_MAX)
p = struct.pack('i', i)
return self._fixup_nans(struct.unpack('f', p)[0])
self._start(rand, gen_float)
DOUBLE_MIN_EXP = -1022
DOUBLE_MAX_EXP = 1023
DOUBLE_MAX_FRACTION = int('1'*52, 2)
DOUBLE_MIN = -1.7976931348623157E308
DOUBLE_MAX = 1.7976931348623157E308
NEG_DOUBLE_NAN_MIN_VALUE = struct.unpack('d', struct.pack('L', 0xffffffffffffffff))[0]
NEG_DOUBLE_NAN_MAX_VALUE = struct.unpack('d', struct.pack('L', 0xfff0000000000001))[0]
POS_DOUBLE_NAN_MIN_VALUE = struct.unpack('d', struct.pack('L', 0x7ff0000000000001))[0]
POS_DOUBLE_NAN_MAX_VALUE = struct.unpack('d', struct.pack('L', 0x7fffffffffffffff))[0]
class DoubleGen(DataGen):
"""Generate doubles, which some built in corner cases."""
def __init__(self, min_exp=DOUBLE_MIN_EXP, max_exp=DOUBLE_MAX_EXP, no_nans=False,
nullable=True, special_cases = None):
self._min_exp = min_exp
self._max_exp = max_exp
self._no_nans = no_nans
self._use_full_range = (self._min_exp == DOUBLE_MIN_EXP) and (self._max_exp == DOUBLE_MAX_EXP)
if special_cases is None:
special_cases = [
self.make_from(1, self._max_exp, DOUBLE_MAX_FRACTION),
self.make_from(0, self._max_exp, DOUBLE_MAX_FRACTION),
self.make_from(1, self._min_exp, DOUBLE_MAX_FRACTION),
self.make_from(0, self._min_exp, DOUBLE_MAX_FRACTION)
]
if self._min_exp <= 0 and self._max_exp >= 0:
special_cases.append(0.0)
special_cases.append(-0.0)
if self._min_exp <= 3 and self._max_exp >= 3:
special_cases.append(1.0)
special_cases.append(-1.0)
if not no_nans:
special_cases.append(float('inf'))
special_cases.append(float('-inf'))
special_cases.append(float('nan'))
special_cases.append(NEG_DOUBLE_NAN_MAX_VALUE)
super().__init__(DoubleType(), nullable=nullable, special_cases=special_cases)
@staticmethod
def make_from(sign, exp, fraction):
sign = sign & 1 # 1 bit
exp = (exp + 1023) & 0x7FF # add bias and 11 bits
fraction = fraction & DOUBLE_MAX_FRACTION
i = (sign << 63) | (exp << 52) | fraction
p = struct.pack('L', i)
ret = struct.unpack('d', p)[0]
return ret
def _fixup_nans(self, v):
if self._no_nans and (math.isnan(v) or v == math.inf or v == -math.inf):
v = None if self.nullable else 0.0
return v
def start(self, rand):
if self._use_full_range:
def gen_double():
i = rand.randint(LONG_MIN, LONG_MAX)
p = struct.pack('l', i)
return self._fixup_nans(struct.unpack('d', p)[0])
self._start(rand, gen_double)
else:
def gen_part_double():
sign = rand.getrandbits(1)
exp = rand.randint(self._min_exp, self._max_exp)
fraction = rand.getrandbits(52)
return self._fixup_nans(self.make_from(sign, exp, fraction))
self._start(rand, gen_part_double)
class BooleanGen(DataGen):
"""Generate Bools (True/False)"""
def __init__(self, nullable=True):
super().__init__(BooleanType(), nullable=nullable)
def start(self, rand):
self._start(rand, lambda : bool(rand.getrandbits(1)))
class StructGen(DataGen):
"""Generate a Struct"""
def __init__(self, children, nullable=True, special_cases=[]):
"""
Initialize the struct with children. The children should be of the form:
[('name', Gen),('name_2', Gen2)]
Where name is the name of the strict field and Gens are Generators of
the type for that entry.
"""
tmp = [StructField(name, child.data_type, nullable=child.nullable) for name, child in children]
super().__init__(StructType(tmp), nullable=nullable, special_cases=special_cases)
self.children = children
def __repr__(self):
return super().__repr__() + '(' + ','.join([str(i) for i in self.children]) + ')'
def start(self, rand):
for name, child in self.children:
child.start(rand)
def make_tuple():
data = [child.gen() for name, child in self.children]
return tuple(data)
self._start(rand, make_tuple)
def contains_ts(self):
return any(child[1].contains_ts() for child in self.children)
class DateGen(DataGen):
"""Generate Dates in a given range"""
def __init__(self, start=None, end=None, nullable=True):
super().__init__(DateType(), nullable=nullable)
if start is None:
# Spark supports times starting at
# "0001-01-01 00:00:00.000000"
start = date(1, 1, 1)
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for start {}'.format(start))
if end is None:
# Spark supports time through
# "9999-12-31 23:59:59.999999"
end = date(9999, 12, 31)
elif isinstance(end, timedelta):
end = start + end
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for end {}'.format(end))
self._start_day = self._to_days_since_epoch(start)
self._end_day = self._to_days_since_epoch(end)
self.with_special_case(start)
self.with_special_case(end)
# we want a few around the leap year if possible
step = int((end.year - start.year) / 5.0)
if (step != 0):
years = {self._guess_leap_year(y) for y in range(start.year, end.year, step)}
for y in years:
leap_day = date(y, 2, 29)
if (leap_day > start and leap_day < end):
self.with_special_case(leap_day)
next_day = date(y, 3, 1)
if (next_day > start and next_day < end):
self.with_special_case(next_day)
@staticmethod
def _guess_leap_year(t):
y = int(math.ceil(t/4.0)) * 4
if ((y % 100) == 0) and ((y % 400) != 0):
y = y + 4
if (y == 10000):
y = y - 4
return y
_epoch = date(1970, 1, 1)
_days = timedelta(days=1)
def _to_days_since_epoch(self, val):
return int((val - self._epoch)/self._days)
def _from_days_since_epoch(self, days):
return self._epoch + timedelta(days=days)
def start(self, rand):
start = self._start_day
end = self._end_day
self._start(rand, lambda : self._from_days_since_epoch(rand.randint(start, end)))
class TimestampGen(DataGen):
"""Generate Timestamps in a given range. All timezones are UTC by default."""
def __init__(self, start=None, end=None, nullable=True):
super().__init__(TimestampType(), nullable=nullable)
if start is None:
# Spark supports times starting at
# "0001-01-01 00:00:00.000000"
# but it has issues if you get really close to that because it tries to do things
# in a different format which causes roundoff, so we have to add a few days,
# just to be sure
start = datetime(1, 1, 3, tzinfo=timezone.utc)
elif not isinstance(start, datetime):
raise RuntimeError('Unsupported type passed in for start {}'.format(start))
if end is None:
# Spark supports time through
# "9999-12-31 23:59:59.999999"
end = datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=timezone.utc)
elif isinstance(end, timedelta):
end = start + end
elif not isinstance(start, date):
raise RuntimeError('Unsupported type passed in for end {}'.format(end))
self._start_time = self._to_ms_since_epoch(start)
self._end_time = self._to_ms_since_epoch(end)
if (self._epoch >= start and self._epoch <= end):
self.with_special_case(self._epoch)
_epoch = datetime(1970, 1, 1, tzinfo=timezone.utc)
_ms = timedelta(milliseconds=1)
def _to_ms_since_epoch(self, val):
return int((val - self._epoch)/self._ms)
def _from_ms_since_epoch(self, ms):
return self._epoch + timedelta(milliseconds=ms)
def start(self, rand):
start = self._start_time
end = self._end_time
self._start(rand, lambda : self._from_ms_since_epoch(rand.randint(start, end)))
def contains_ts(self):
return True
class ArrayGen(DataGen):
"""Generate Arrays of data."""
def __init__(self, child_gen, min_length=0, max_length=20, nullable=True, all_null=False):
super().__init__(ArrayType(child_gen.data_type, containsNull=child_gen.nullable), nullable=nullable)
self._min_length = min_length
self._max_length = max_length
self._child_gen = child_gen
self.all_null = all_null
def __repr__(self):
return super().__repr__() + '(' + str(self._child_gen) + ')'
def start(self, rand):
self._child_gen.start(rand)
def gen_array():
if self.all_null:
return None
length = rand.randint(self._min_length, self._max_length)
return [self._child_gen.gen() for _ in range(0, length)]
self._start(rand, gen_array)
def contains_ts(self):
return self._child_gen.contains_ts()
class MapGen(DataGen):
"""Generate a Map"""
def __init__(self, key_gen, value_gen, min_length=0, max_length=20, nullable=True, special_cases=[]):
# keys cannot be nullable
assert not key_gen.nullable
self._min_length = min_length
self._max_length = max_length
self._key_gen = key_gen
self._value_gen = value_gen
super().__init__(MapType(key_gen.data_type, value_gen.data_type, valueContainsNull=value_gen.nullable), nullable=nullable, special_cases=special_cases)
def __repr__(self):
return super().__repr__() + '(' + str(self._key_gen) + ',' + str(self._value_gen) + ')'
def start(self, rand):
self._key_gen.start(rand)
self._value_gen.start(rand)
def make_dict():
length = rand.randint(self._min_length, self._max_length)
return {self._key_gen.gen(): self._value_gen.gen() for idx in range(0, length)}
self._start(rand, make_dict)
def contains_ts(self):
return self._key_gen.contains_ts() or self._value_gen.contains_ts()
class NullGen(DataGen):
"""Generate NullType values"""
def __init__(self):
super().__init__(NullType(), nullable=True)
def start(self, rand):
def make_null():
return None
self._start(rand, make_null)
def skip_if_not_utc():
if (not is_tz_utc()):
skip_unless_precommit_tests('The java system time zone is not set to UTC')
def gen_df(spark, data_gen, length=2048, seed=0, num_slices=None):
"""Generate a spark dataframe from the given data generators."""
if isinstance(data_gen, list):
src = StructGen(data_gen, nullable=False)
else:
src = data_gen
# we cannot create a data frame from a nullable struct
assert not data_gen.nullable
# Before we get too far we need to verify that we can run with timestamps
if src.contains_ts():
skip_if_not_utc()
rand = random.Random(seed)
src.start(rand)
data = [src.gen() for index in range(0, length)]
# We use `numSlices` to create an RDD with the specific number of partitions,
# which is then turned into a dataframe. If not specified, it is `None` (default spark value)
return spark.createDataFrame(
SparkContext.getOrCreate().parallelize(data, numSlices=num_slices),
src.data_type)
def _mark_as_lit(data, data_type):
# To support nested types, 'data_type' is required.
assert data_type is not None
if data is None:
return f.lit(data).cast(data_type)
if isinstance(data_type, ArrayType):
assert isinstance(data, list)
# Sadly you cannot create a literal from just an array in pyspark
return f.array([_mark_as_lit(x, data_type.elementType) for x in data])
elif isinstance(data_type, StructType):
assert isinstance(data, tuple) and len(data) == len(data_type.fields)
# Sadly you cannot create a literal from just a dict/tuple in pyspark
children = zip(data, data_type.fields)
return f.struct([_mark_as_lit(x, fd.dataType).alias(fd.name) for x, fd in children])
elif isinstance(data_type, DateType):
# Due to https://bugs.python.org/issue13305 we need to zero pad for years prior to 1000,
# but this works for all of them
dateString = data.strftime("%Y-%m-%d").zfill(10)
return f.lit(dateString).cast(data_type)
elif isinstance(data_type, MapType):
assert isinstance(data, dict)
# Sadly you cannot create a literal from just a dict/tuple in pyspark
col_array = []
for k in data:
col_array.append(_mark_as_lit(k, data_type.keyType))
col_array.append(_mark_as_lit(data[k], data_type.valueType))
return f.create_map(*col_array)
else:
# lit does not take a data type so we might have to cast it
return f.lit(data).cast(data_type)
def _gen_scalars_common(data_gen, count, seed=0):
if isinstance(data_gen, list):
src = StructGen(data_gen, nullable=False)
else:
src = data_gen
# Before we get too far we need to verify that we can run with timestamps
if src.contains_ts():
skip_if_not_utc()
rand = random.Random(seed)
src.start(rand)
return src
def gen_scalars(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values."""
if force_no_nulls:
assert(not isinstance(data_gen, NullGen))
src = _gen_scalars_common(data_gen, count, seed=seed)
data_type = src.data_type
return (_mark_as_lit(src.gen(force_no_nulls=force_no_nulls), data_type) for i in range(0, count))
def gen_scalar(data_gen, seed=0, force_no_nulls=False):
"""Generate a single scalar value."""
v = list(gen_scalars(data_gen, 1, seed=seed, force_no_nulls=force_no_nulls))
return v[0]
def gen_scalar_values(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values."""
src = _gen_scalars_common(data_gen, count, seed=seed)
return (src.gen(force_no_nulls=force_no_nulls) for i in range(0, count))
def gen_scalar_value(data_gen, seed=0, force_no_nulls=False):
"""Generate a single scalar value."""
v = list(gen_scalar_values(data_gen, 1, seed=seed, force_no_nulls=force_no_nulls))
return v[0]
def debug_df(df, path = None, file_format = 'json', num_parts = 1):
"""Print out or save the contents and the schema of a dataframe for debugging."""
if path is not None:
# Save the dataframe and its schema
# The schema can be re-created by using DataType.fromJson and used
# for loading the dataframe
file_name = f"{path}.{file_format}"
schema_file_name = f"{path}.schema.json"
df.coalesce(num_parts).write.format(file_format).save(file_name)
print(f"SAVED df output for debugging at {file_name}")
schema_json = df.schema.json()
schema_file = open(schema_file_name , 'w')
schema_file.write(schema_json)
schema_file.close()
print(f"SAVED df schema for debugging along in the output dir")
else:
print('COLLECTED\n{}'.format(df.collect()))
df.explain()
df.printSchema()
return df
def print_params(data_gen):
print('Test Datagen Params=' + str([(a, b.get_types()) for a, b in data_gen]))
def idfn(val):
"""Provide an API to provide display names for data type generators."""
return str(val)
def meta_idfn(meta):
def tmp(something):
return meta + idfn(something)
return tmp
def three_col_df(spark, a_gen, b_gen, c_gen, length=2048, seed=0, num_slices=None):
gen = StructGen([('a', a_gen),('b', b_gen),('c', c_gen)], nullable=False)
return gen_df(spark, gen, length=length, seed=seed, num_slices=num_slices)
def two_col_df(spark, a_gen, b_gen, length=2048, seed=0, num_slices=None):
gen = StructGen([('a', a_gen),('b', b_gen)], nullable=False)
return gen_df(spark, gen, length=length, seed=seed, num_slices=num_slices)
def binary_op_df(spark, gen, length=2048, seed=0, num_slices=None):
return two_col_df(spark, gen, gen, length=length, seed=seed, num_slices=num_slices)
def unary_op_df(spark, gen, length=2048, seed=0, num_slices=None):
return gen_df(spark, StructGen([('a', gen)], nullable=False),
length=length, seed=seed, num_slices=num_slices)
def to_cast_string(spark_type):
if isinstance(spark_type, ByteType):
return 'BYTE'
elif isinstance(spark_type, ShortType):
return 'SHORT'
elif isinstance(spark_type, IntegerType):
return 'INT'
elif isinstance(spark_type, LongType):
return 'LONG'
elif isinstance(spark_type, FloatType):
return 'FLOAT'
elif isinstance(spark_type, DoubleType):
return 'DOUBLE'
elif isinstance(spark_type, BooleanType):
return 'BOOLEAN'
elif isinstance(spark_type, DateType):
return 'DATE'
elif isinstance(spark_type, TimestampType):
return 'TIMESTAMP'
elif isinstance(spark_type, StringType):
return 'STRING'
elif isinstance(spark_type, DecimalType):
return 'DECIMAL({}, {})'.format(spark_type.precision, spark_type.scale)
elif isinstance(spark_type, ArrayType):
return 'ARRAY<{}>'.format(to_cast_string(spark_type.elementType))
elif isinstance(spark_type, StructType):
children = [fd.name + ':' + to_cast_string(fd.dataType) for fd in spark_type.fields]
return 'STRUCT<{}>'.format(','.join(children))
else:
raise RuntimeError('CAST TO TYPE {} NOT SUPPORTED YET'.format(spark_type))
def get_null_lit_string(spark_type):
if isinstance(spark_type, NullType):
return 'null'
else:
string_type = to_cast_string(spark_type)
return 'CAST(null as {})'.format(string_type)
def _convert_to_sql(spark_type, data):
if isinstance(data, str):
d = "'" + data.replace("'", "\\'") + "'"
elif isinstance(data, datetime):
d = "'" + data.strftime('%Y-%m-%d T%H:%M:%S.%f').zfill(26) + "'"
elif isinstance(data, date):
d = "'" + data.strftime('%Y-%m-%d').zfill(10) + "'"
elif isinstance(data, list):
assert isinstance(spark_type, ArrayType)
d = "array({})".format(",".join([_convert_to_sql(spark_type.elementType, x) for x in data]))
elif isinstance(data, tuple):
assert isinstance(spark_type, StructType) and len(data) == len(spark_type.fields)
# Format of each child: 'name',data
children = ["'{}'".format(fd.name) + ',' + _convert_to_sql(fd.dataType, x)
for fd, x in zip(spark_type.fields, data)]
d = "named_struct({})".format(','.join(children))
elif not data:
# data is None
d = "null"
else:
d = "'{}'".format(str(data))
if isinstance(spark_type, NullType):
return d
else:
return 'CAST({} as {})'.format(d, to_cast_string(spark_type))
def gen_scalars_for_sql(data_gen, count, seed=0, force_no_nulls=False):
"""Generate scalar values, but strings that can be used in selectExpr or SQL"""
src = _gen_scalars_common(data_gen, count, seed=seed)
if isinstance(data_gen, NullGen):
assert not force_no_nulls
return ('null' for i in range(0, count))
spark_type = data_gen.data_type
return (_convert_to_sql(spark_type, src.gen(force_no_nulls=force_no_nulls)) for i in range(0, count))
byte_gen = ByteGen()
short_gen = ShortGen()
int_gen = IntegerGen()
long_gen = LongGen()
float_gen = FloatGen()
double_gen = DoubleGen()
string_gen = StringGen()
boolean_gen = BooleanGen()
date_gen = DateGen()
timestamp_gen = TimestampGen()
decimal_gen_default = DecimalGen()
decimal_gen_neg_scale = DecimalGen(precision=7, scale=-3)
decimal_gen_scale_precision = DecimalGen(precision=7, scale=3)
decimal_gen_same_scale_precision = DecimalGen(precision=7, scale=7)
decimal_gen_64bit = DecimalGen(precision=12, scale=2)
decimal_gen_12_2 = DecimalGen(precision=12, scale=2)
decimal_gen_18_3 = DecimalGen(precision=18, scale=3)
decimal_gen_128bit = DecimalGen(precision=20, scale=2)
decimal_gen_20_2 = DecimalGen(precision=20, scale=2)
decimal_gen_30_2 = DecimalGen(precision=30, scale=2)
decimal_gen_36_5 = DecimalGen(precision=36, scale=5)
decimal_gen_36_neg5 = DecimalGen(precision=36, scale=-5)
decimal_gen_38_0 = DecimalGen(precision=38, scale=0)
decimal_gen_38_10 = DecimalGen(precision=38, scale=10)
decimal_gen_38_neg10 = DecimalGen(precision=38, scale=-10)
null_gen = NullGen()
numeric_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen]
integral_gens = [byte_gen, short_gen, int_gen, long_gen]
# A lot of mathematical expressions only support a double as input
# by parametrizing even for a single param for the test it makes the tests consistent
double_gens = [double_gen]
double_n_long_gens = [double_gen, long_gen]
int_n_long_gens = [int_gen, long_gen]
decimal_gens_no_neg = [decimal_gen_default, decimal_gen_scale_precision,
decimal_gen_same_scale_precision, decimal_gen_64bit]
decimal_gens = [decimal_gen_neg_scale] + decimal_gens_no_neg
decimal_128_gens_no_neg = [decimal_gen_20_2, decimal_gen_30_2, decimal_gen_36_5,
decimal_gen_38_0, decimal_gen_38_10]
decimal_128_gens = decimal_128_gens_no_neg + [decimal_gen_36_neg5, decimal_gen_38_neg10]
# all of the basic gens
all_basic_gens_no_null = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen]
all_basic_gens = all_basic_gens_no_null + [null_gen]
all_basic_gens_no_nan = [byte_gen, short_gen, int_gen, long_gen, FloatGen(no_nans=True), DoubleGen(no_nans=True),
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen]
# TODO add in some array generators to this once that is supported for sorting
# a selection of generators that should be orderable (sortable and compareable)
orderable_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen] + decimal_gens
# TODO add in some array generators to this once that is supported for these operations
# a selection of generators that can be compared for equality
eq_gens = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen, timestamp_gen, null_gen]
# Include decimal type while testing equalTo and notEqualTo
eq_gens_with_decimal_gen = eq_gens + decimal_gens
date_gens = [date_gen]
date_n_time_gens = [date_gen, timestamp_gen]
boolean_gens = [boolean_gen]
single_level_array_gens = [ArrayGen(sub_gen) for sub_gen in all_basic_gens + decimal_gens]
single_array_gens_sample_with_decimal128 = [ArrayGen(sub_gen) for sub_gen in decimal_128_gens]
single_level_array_gens_no_null = [ArrayGen(sub_gen) for sub_gen in all_basic_gens_no_null + decimal_gens_no_neg]
single_level_array_gens_no_nan = [ArrayGen(sub_gen) for sub_gen in all_basic_gens_no_nan + decimal_gens]
single_level_array_gens_no_decimal = [ArrayGen(sub_gen) for sub_gen in all_basic_gens]
map_string_string_gen = [MapGen(StringGen(pattern='key_[0-9]', nullable=False), StringGen())]
# Be careful to not make these too large of data generation takes for ever
# This is only a few nested array gens, because nesting can be very deep
nested_array_gens_sample = [ArrayGen(ArrayGen(short_gen, max_length=10), max_length=10),
ArrayGen(ArrayGen(string_gen, max_length=10), max_length=10),
ArrayGen(StructGen([['child0', byte_gen], ['child1', string_gen], ['child2', float_gen]]))]
# Some array gens, but not all because of nesting
array_gens_sample = single_level_array_gens + nested_array_gens_sample
array_gens_sample_with_decimal128 = single_level_array_gens + nested_array_gens_sample + single_array_gens_sample_with_decimal128
# all of the basic types in a single struct
all_basic_struct_gen = StructGen([['child'+str(ind), sub_gen] for ind, sub_gen in enumerate(all_basic_gens)])
# Some struct gens, but not all because of nesting
nonempty_struct_gens_sample = [all_basic_struct_gen,
StructGen([['child0', byte_gen], ['child1', all_basic_struct_gen]]),
StructGen([['child0', ArrayGen(short_gen)], ['child1', double_gen]])]
struct_gens_sample = nonempty_struct_gens_sample + [StructGen([])]
struct_gen_decimal128 = StructGen(
[['child' + str(ind), sub_gen] for ind, sub_gen in enumerate(decimal_128_gens)])
struct_gens_sample_with_decimal128 = struct_gens_sample + [
struct_gen_decimal128]
simple_string_to_string_map_gen = MapGen(StringGen(pattern='key_[0-9]', nullable=False),
StringGen(), max_length=10)
all_basic_map_gens = [MapGen(f(nullable=False), f()) for f in [BooleanGen, ByteGen, ShortGen, IntegerGen, LongGen, FloatGen, DoubleGen, DateGen, TimestampGen]] + [simple_string_to_string_map_gen]
decimal_64_map_gens = [MapGen(key_gen=gen, value_gen=gen, nullable=False) for gen in [DecimalGen(7, 3, nullable=False), DecimalGen(12, 2, nullable=False), DecimalGen(18, -3, nullable=False)]]
decimal_128_map_gens = [MapGen(key_gen=gen, value_gen=gen, nullable=False) for gen in [DecimalGen(20, 2, nullable=False), DecimalGen(36, 5, nullable=False), DecimalGen(38, 38, nullable=False),
DecimalGen(36, -5, nullable=False)]]
decimal_128_no_neg_map_gens = [MapGen(key_gen=gen, value_gen=gen, nullable=False) for gen in [DecimalGen(20, 2, nullable=False), DecimalGen(36, 5, nullable=False), DecimalGen(38, 38, nullable=False)]]
# Some map gens, but not all because of nesting
map_gens_sample = all_basic_map_gens + [MapGen(StringGen(pattern='key_[0-9]', nullable=False), ArrayGen(string_gen), max_length=10),
MapGen(RepeatSeqGen(IntegerGen(nullable=False), 10), long_gen, max_length=10),
MapGen(StringGen(pattern='key_[0-9]', nullable=False), simple_string_to_string_map_gen)]
allow_negative_scale_of_decimal_conf = {'spark.sql.legacy.allowNegativeScaleOfDecimal': 'true'}
def copy_and_update(conf, *more_confs):
local_conf = conf.copy()
for more in more_confs:
local_conf.update(more)
return local_conf
all_gen = [StringGen(), ByteGen(), ShortGen(), IntegerGen(), LongGen(),
FloatGen(), DoubleGen(), BooleanGen(), DateGen(), TimestampGen(),
decimal_gen_default, decimal_gen_scale_precision, decimal_gen_same_scale_precision,
decimal_gen_64bit, decimal_gen_128bit, decimal_gen_36_5, decimal_gen_38_10]
# Pyarrow will complain the error as below if the timestamp is out of range for both CPU and GPU,
# so narrow down the time range to avoid exceptions causing test failures.
#
# "pyarrow.lib.ArrowInvalid: Casting from timestamp[us, tz=UTC] to timestamp[ns]
# would result in out of bounds timestamp: 51496791452587000"
#
# This issue has been fixed in pyarrow by the PR https://github.com/apache/arrow/pull/7169
# However it still requires PySpark to specify the new argument "timestamp_as_object".
arrow_common_gen = [byte_gen, short_gen, int_gen, long_gen, float_gen, double_gen,
string_gen, boolean_gen, date_gen,
TimestampGen(start=datetime(1970, 1, 1, tzinfo=timezone.utc),
end=datetime(2262, 1, 1, tzinfo=timezone.utc))]
arrow_array_gens = [ArrayGen(subGen) for subGen in arrow_common_gen] + nested_array_gens_sample
arrow_one_level_struct_gen = StructGen([
['child'+str(i), sub_gen] for i, sub_gen in enumerate(arrow_common_gen)])
arrow_struct_gens = [arrow_one_level_struct_gen,
StructGen([['child0', ArrayGen(short_gen)], ['child1', arrow_one_level_struct_gen]])]
# This function adds a new column named uniq_int where each row
# has a new unique integer value. It just starts at 0 and
# increments by 1 for each row.
# This can be used to add a column to a dataframe if you need to
# sort on a column with unique values.
# This collects the data to driver though so can be expensive.
def append_unique_int_col_to_df(spark, dataframe):
def append_unique_to_rows(rows):
new = []
for item in range(len(rows)):
row_dict = rows[item].asDict()
row_dict['uniq_int'] = item
new_row = Row(**row_dict)
new.append(new_row)
return new
collected = dataframe.collect()
if (len(collected) > INT_MAX):
raise RuntimeError('To many rows to add unique integer values starting from 0 to')
existing_schema = dataframe.schema
new_rows = append_unique_to_rows(collected)
new_schema = StructType(existing_schema.fields + [StructField("uniq_int", IntegerType(), False)])
return spark.createDataFrame(new_rows, new_schema)
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/data_gen.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from asserts import assert_gpu_and_cpu_are_equal_collect, assert_gpu_and_cpu_are_equal_sql
from data_gen import *
from spark_session import with_spark_session
from pyspark.sql.utils import AnalysisException
encoded_url_gen = StringGen('([^%]{0,1}(%[0-9A-F][0-9A-F]){0,1}){0,30}')
def drop_udf(spark, udfname):
spark.sql("DROP TEMPORARY FUNCTION IF EXISTS {}".format(udfname))
def skip_if_no_hive(spark):
if spark.conf.get("spark.sql.catalogImplementation") != "hive":
raise RuntimeError('The Spark session does not have Hive support')
def load_hive_udf_or_skip_test(spark, udfname, udfclass):
drop_udf(spark, udfname)
spark.sql("CREATE TEMPORARY FUNCTION {} AS '{}'".format(udfname, udfclass))
def test_hive_simple_udf():
with_spark_session(skip_if_no_hive)
data_gens = [["i", int_gen], ["s", encoded_url_gen]]
def evalfn(spark):
load_hive_udf_or_skip_test(spark, "urldecode", "com.nvidia.spark.rapids.udf.hive.URLDecode")
return gen_df(spark, data_gens)
assert_gpu_and_cpu_are_equal_sql(
evalfn,
"hive_simple_udf_test_table",
"SELECT i, urldecode(s) FROM hive_simple_udf_test_table")
def test_hive_generic_udf():
with_spark_session(skip_if_no_hive)
def evalfn(spark):
load_hive_udf_or_skip_test(spark, "urlencode", "com.nvidia.spark.rapids.udf.hive.URLEncode")
return gen_df(spark, [["s", StringGen('.{0,30}')]])
assert_gpu_and_cpu_are_equal_sql(
evalfn,
"hive_generic_udf_test_table",
"SELECT urlencode(s) FROM hive_generic_udf_test_table")
def evalfn_decimal(spark):
load_hive_udf_or_skip_test(spark, "fraction", "com.nvidia.spark.rapids.udf.hive.DecimalFraction")
return gen_df(spark, [["dec", DecimalGen(38, 18)]])
assert_gpu_and_cpu_are_equal_sql(
evalfn_decimal,
"hive_generic_udf_test_table",
"SELECT fraction(dec) FROM hive_generic_udf_test_table")
@pytest.mark.rapids_udf_example_native
def test_hive_simple_udf_native():
with_spark_session(skip_if_no_hive)
data_gens = [["s", StringGen('.{0,30}')]]
def evalfn(spark):
load_hive_udf_or_skip_test(spark, "wordcount", "com.nvidia.spark.rapids.udf.hive.StringWordCount")
return gen_df(spark, data_gens)
assert_gpu_and_cpu_are_equal_sql(
evalfn,
"hive_native_udf_test_table",
"SELECT wordcount(s) FROM hive_native_udf_test_table")
def load_java_udf_or_skip_test(spark, udfname, udfclass, udf_return_type=None):
drop_udf(spark, udfname)
spark.udf.registerJavaFunction(udfname, udfclass, udf_return_type)
def test_java_url_decode():
def evalfn(spark):
load_java_udf_or_skip_test(spark, 'urldecode', 'com.nvidia.spark.rapids.udf.java.URLDecode')
return unary_op_df(spark, encoded_url_gen).selectExpr("urldecode(a)")
assert_gpu_and_cpu_are_equal_collect(evalfn)
def test_java_url_encode():
def evalfn(spark):
load_java_udf_or_skip_test(spark, 'urlencode', 'com.nvidia.spark.rapids.udf.java.URLEncode')
return unary_op_df(spark, StringGen('.{0,30}')).selectExpr("urlencode(a)")
assert_gpu_and_cpu_are_equal_collect(evalfn)
def test_java_decimal_fraction():
def evalfn(spark):
from pyspark.sql.types import DecimalType
load_java_udf_or_skip_test(spark, 'fraction',
'com.nvidia.spark.rapids.udf.java.DecimalFraction')
load_java_udf_or_skip_test(spark, 'fraction_dec64_s10',
'com.nvidia.spark.rapids.udf.java.DecimalFraction',
DecimalType(18, 10))
load_java_udf_or_skip_test(spark, 'fraction_dec32_s3',
'com.nvidia.spark.rapids.udf.java.DecimalFraction',
DecimalType(8, 3))
return three_col_df(spark, DecimalGen(38, 18), DecimalGen(18, 10), DecimalGen(8, 3)
).selectExpr("fraction(a)", "fraction_dec64_s10(b)", "fraction_dec32_s3(c)")
assert_gpu_and_cpu_are_equal_collect(evalfn)
@pytest.mark.rapids_udf_example_native
def test_java_cosine_similarity_reasonable_range():
def evalfn(spark):
class RangeFloatGen(FloatGen):
def start(self, rand):
self._start(rand, lambda: rand.uniform(-1000.0, 1000.0))
load_java_udf_or_skip_test(spark, "cosine_similarity", "com.nvidia.spark.rapids.udf.java.CosineSimilarity")
arraygen = ArrayGen(RangeFloatGen(nullable=False, no_nans=True, special_cases=[]), min_length=8, max_length=8)
df = binary_op_df(spark, arraygen)
return df.selectExpr("cosine_similarity(a, b)")
assert_gpu_and_cpu_are_equal_collect(evalfn)
@pytest.mark.rapids_udf_example_native
def test_java_cosine_similarity_with_nans():
def evalfn(spark):
load_java_udf_or_skip_test(spark, "cosine_similarity", "com.nvidia.spark.rapids.udf.java.CosineSimilarity")
arraygen = ArrayGen(FloatGen(nullable=False), min_length=8, max_length=8)
return binary_op_df(spark, arraygen).selectExpr("cosine_similarity(a, b)")
assert_gpu_and_cpu_are_equal_collect(evalfn)
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/rapids_udf_test.py |
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
try:
import pyspark
except ImportError as error:
import findspark
findspark.init()
import pyspark
_DRIVER_ENV = 'PYSP_TEST_spark_driver_extraJavaOptions'
def _spark__init():
#Force the RapidsPlugin to be enabled, so it blows up if the classpath is not set properly
# DO NOT SET ANY OTHER CONFIGS HERE!!!
# due to bugs in pyspark/pytest it looks like any configs set here
# can be reset in the middle of a test if specific operations are done (some types of cast etc)
_sb = pyspark.sql.SparkSession.builder
_sb.config('spark.plugins', 'com.nvidia.spark.SQLPlugin') \
.config("spark.sql.adaptive.enabled", "false") \
.config('spark.sql.queryExecutionListeners', 'org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback')
for key, value in os.environ.items():
if key.startswith('PYSP_TEST_') and key != _DRIVER_ENV:
_sb.config(key[10:].replace('_', '.'), value)
driver_opts = os.environ.get(_DRIVER_ENV, "")
if ('PYTEST_XDIST_WORKER' in os.environ):
wid = os.environ['PYTEST_XDIST_WORKER']
_handle_derby_dir(_sb, driver_opts, wid)
_handle_event_log_dir(_sb, wid)
else:
_sb.config('spark.driver.extraJavaOptions', driver_opts)
_handle_event_log_dir(_sb, 'gw0')
# enableHiveSupport() is needed for parquet bucket tests
_s = _sb.enableHiveSupport() \
.appName('rapids spark plugin integration tests (python)').getOrCreate()
#TODO catch the ClassNotFound error that happens if the classpath is not set up properly and
# make it a better error message
_s.sparkContext.setLogLevel("WARN")
return _s
def _handle_derby_dir(sb, driver_opts, wid):
d = "./derby_{}".format(wid)
if not os.path.exists(d):
os.makedirs(d)
sb.config('spark.driver.extraJavaOptions', driver_opts + ' -Dderby.system.home={}'.format(d))
def _handle_event_log_dir(sb, wid):
if os.environ.get('SPARK_EVENTLOG_ENABLED', str(True)).lower() in [
str(False).lower(), 'off', '0'
]:
print('Automatic configuration for spark event log disabled')
return
spark_conf = pyspark.SparkConf()
master_url = os.environ.get('PYSP_TEST_spark_master',
spark_conf.get("spark.master", 'local'))
event_log_config = os.environ.get('PYSP_TEST_spark_eventLog_enabled',
spark_conf.get('spark.eventLog.enabled', str(False).lower()))
event_log_codec = os.environ.get('PYSP_TEST_spark_eventLog_compression_codec', 'zstd')
if not master_url.startswith('local') or event_log_config != str(False).lower():
print("SPARK_EVENTLOG_ENABLED is ignored for non-local Spark master and when "
"it's pre-configured by the user")
return
d = "./eventlog_{}".format(wid)
if not os.path.exists(d):
os.makedirs(d)
print('Spark event logs will appear under {}. Set the environmnet variable '
'SPARK_EVENTLOG_ENABLED=false if you want to disable it'.format(d))
sb\
.config('spark.eventLog.dir', "file://{}".format(os.path.abspath(d))) \
.config('spark.eventLog.compress', True) \
.config('spark.eventLog.enabled', True) \
.config('spark.eventLog.compression.codec', event_log_codec)
_spark = _spark__init()
def get_spark_i_know_what_i_am_doing():
"""
Get the current SparkSession.
This should almost never be called directly instead you should call
with_spark_session, with_cpu_session, or with_gpu_session for spark_session.
This is to guarantee that the session and it's config is setup in a repeatable way.
"""
return _spark
def spark_version():
return _spark.version
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/spark_init_internal.py |
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from conftest import is_incompat, should_sort_on_spark, should_sort_locally, get_float_check, get_limit, spark_jvm
from datetime import date, datetime
from decimal import Decimal
import math
from pyspark.sql import Row
from py4j.protocol import Py4JJavaError
import pytest
from spark_session import with_cpu_session, with_gpu_session
import time
import types as pytypes
import data_gen
def _assert_equal(cpu, gpu, float_check, path):
t = type(cpu)
if (t is Row):
assert len(cpu) == len(gpu), "CPU and GPU row have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
if hasattr(cpu, "__fields__") and hasattr(gpu, "__fields__"):
assert cpu.__fields__ == gpu.__fields__, "CPU and GPU row have different fields at {} CPU: {} GPU: {}".format(path, cpu.__fields__, gpu.__fields__)
for field in cpu.__fields__:
_assert_equal(cpu[field], gpu[field], float_check, path + [field])
else:
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is list):
assert len(cpu) == len(gpu), "CPU and GPU list have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is tuple):
assert len(cpu) == len(gpu), "CPU and GPU list have different lengths at {} CPU: {} GPU: {}".format(path, len(cpu), len(gpu))
for index in range(len(cpu)):
_assert_equal(cpu[index], gpu[index], float_check, path + [index])
elif (t is pytypes.GeneratorType):
index = 0
# generator has no zip :( so we have to do this the hard way
done = False
while not done:
sub_cpu = None
sub_gpu = None
try:
sub_cpu = next(cpu)
except StopIteration:
done = True
try:
sub_gpu = next(gpu)
except StopIteration:
done = True
if done:
assert sub_cpu == sub_gpu and sub_cpu == None, "CPU and GPU generators have different lengths at {}".format(path)
else:
_assert_equal(sub_cpu, sub_gpu, float_check, path + [index])
index = index + 1
elif (t is dict):
# The order of key/values is not guaranteed in python dicts, nor are they guaranteed by Spark
# so sort the items to do our best with ignoring the order of dicts
cpu_items = list(cpu.items()).sort(key=_RowCmp)
gpu_items = list(gpu.items()).sort(key=_RowCmp)
_assert_equal(cpu_items, gpu_items, float_check, path + ["map"])
elif (t is int):
assert cpu == gpu, "GPU and CPU int values are different at {}".format(path)
elif (t is float):
if (math.isnan(cpu)):
assert math.isnan(gpu), "GPU and CPU float values are different at {}".format(path)
else:
assert float_check(cpu, gpu), "GPU and CPU float values are different {}".format(path)
elif isinstance(cpu, str):
assert cpu == gpu, "GPU and CPU string values are different at {}".format(path)
elif isinstance(cpu, datetime):
assert cpu == gpu, "GPU and CPU timestamp values are different at {}".format(path)
elif isinstance(cpu, date):
assert cpu == gpu, "GPU and CPU date values are different at {}".format(path)
elif isinstance(cpu, bool):
assert cpu == gpu, "GPU and CPU boolean values are different at {}".format(path)
elif isinstance(cpu, Decimal):
assert cpu == gpu, "GPU and CPU decimal values are different at {}".format(path)
elif isinstance(cpu, bytearray):
assert cpu == gpu, "GPU and CPU bytearray values are different at {}".format(path)
elif (cpu == None):
assert cpu == gpu, "GPU and CPU are not both null at {}".format(path)
else:
assert False, "Found unexpected type {} at {}".format(t, path)
def assert_equal(cpu, gpu):
"""Verify that the result from the CPU and the GPU are equal"""
try:
_assert_equal(cpu, gpu, float_check=get_float_check(), path=[])
except:
print("CPU OUTPUT: %s" % cpu)
print("GPU OUTPUT: %s" % gpu)
raise
def _has_incompat_conf(conf):
return ('spark.rapids.sql.incompatibleOps.enabled' in conf and
conf['spark.rapids.sql.incompatibleOps.enabled'].lower() == 'true')
class _RowCmp(object):
"""Allows for sorting Rows in a consistent way"""
def __init__(self, wrapped):
if isinstance(wrapped, Row) or isinstance(wrapped, list) or isinstance(wrapped, tuple):
self.wrapped = [_RowCmp(c) for c in wrapped]
elif isinstance(wrapped, dict):
def sort_dict(e):
return _RowCmp(e)
tmp = [(k, v) for k, v in wrapped.items()]
tmp.sort(key=sort_dict)
self.wrapped = [_RowCmp(c) for c in tmp]
else:
self.wrapped = wrapped
if isinstance(wrapped, float):
self.is_nan = math.isnan(wrapped)
else:
self.is_nan = False
def cmp(self, other):
try:
#None comes before anything else
#NaN comes next
if (self.wrapped is None and other.wrapped is None):
return 0
elif (self.wrapped is None):
return -1
elif (other.wrapped is None):
return 1
elif self.is_nan and other.is_nan:
return 0
elif self.is_nan:
return -1
elif other.is_nan:
return 1
elif self.wrapped == other.wrapped:
return 0
elif self.wrapped < other.wrapped:
return -1
else:
return 1
except TypeError as te:
print("ERROR TRYING TO COMPARE {} to {} {}".format(self.wrapped, other.wrapped, te))
raise te
def __lt__(self, other):
return self.cmp(other) < 0
def __gt__(self, other):
return self.cmp(other) > 0
def __eq__(self, other):
return self.cmp(other) == 0
def __le__(self, other):
return self.cmp(other) <= 0
def __ge__(self, other):
return self.cmp(other) >= 0
def __ne__(self, other):
return self.cmp(other) != 0
def _prep_func_for_compare(func, mode):
sort_locally = should_sort_locally()
if should_sort_on_spark():
def with_sorted(spark):
df = func(spark)
return df.sort(df.columns)
sorted_func = with_sorted
else:
sorted_func = func
limit_val = get_limit()
if limit_val > 0:
def with_limit(spark):
df = sorted_func(spark)
return df.limit(limit_val)
limit_func = with_limit
else:
limit_func = sorted_func
if mode == 'COLLECT':
bring_back = lambda spark: limit_func(spark).collect()
collect_type = 'COLLECT'
elif mode == 'COUNT':
bring_back = lambda spark: limit_func(spark).count()
collect_type = 'COUNT'
elif mode == 'COLLECT_WITH_DATAFRAME':
def bring_back(spark):
df = limit_func(spark)
return (df.collect(), df)
collect_type = 'COLLECT'
return (bring_back, collect_type)
else:
bring_back = lambda spark: limit_func(spark).toLocalIterator()
collect_type = 'ITERATOR'
if sort_locally:
raise RuntimeError('Local Sort is only supported on a collect')
return (bring_back, collect_type)
def _prep_incompat_conf(conf):
if is_incompat():
conf = dict(conf) # Make a copy before we change anything
conf['spark.rapids.sql.incompatibleOps.enabled'] = 'true'
elif _has_incompat_conf(conf):
raise AssertionError("incompat must be enabled by the incompat fixture")
return conf
def _assert_gpu_and_cpu_writes_are_equal(
write_func,
read_func,
base_path,
mode,
conf={}):
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
cpu_path = base_path + '/CPU'
with_cpu_session(lambda spark : write_func(spark, cpu_path), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
gpu_path = base_path + '/GPU'
with_gpu_session(lambda spark : write_func(spark, gpu_path), conf=conf)
gpu_end = time.time()
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, cpu_path), mode)
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, gpu_path), mode)
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_and_cpu_writes_are_equal_collect(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
ont he CPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, 'COLLECT', conf=conf)
def assert_gpu_and_cpu_writes_are_equal_iterator(write_func, read_func, base_path, conf={}):
"""
Assert when running write_func on both the CPU and the GPU and reading using read_func
ont he CPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_writes_are_equal(write_func, read_func, base_path, 'ITERATOR', conf=conf)
def assert_gpu_fallback_write(write_func,
read_func,
base_path,
cpu_fallback_class_name,
conf={}):
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
cpu_path = base_path + '/CPU'
with_cpu_session(lambda spark : write_func(spark, cpu_path), conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.startCapture()
gpu_start = time.time()
gpu_path = base_path + '/GPU'
with_gpu_session(lambda spark : write_func(spark, gpu_path), conf=conf)
gpu_end = time.time()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertCapturedAndGpuFellBack(cpu_fallback_class_name, 10000)
print('### WRITE: GPU TOOK {} CPU TOOK {} ###'.format(
gpu_end - gpu_start, cpu_end - cpu_start))
(cpu_bring_back, cpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, cpu_path), 'COLLECT')
(gpu_bring_back, gpu_collect_type) = _prep_func_for_compare(
lambda spark: read_func(spark, gpu_path), 'COLLECT')
from_cpu = with_cpu_session(cpu_bring_back, conf=conf)
from_gpu = with_cpu_session(gpu_bring_back, conf=conf)
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_cpu_and_gpu_are_equal_collect_with_capture(func,
exist_classes='',
non_exist_classes='',
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, 'COLLECT_WITH_DATAFRAME')
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu, cpu_df = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
from_gpu, gpu_df = with_gpu_session(bring_back, conf=conf)
gpu_end = time.time()
jvm = spark_jvm()
if exist_classes:
for clz in exist_classes.split(','):
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertContains(gpu_df._jdf, clz)
if non_exist_classes:
for clz in non_exist_classes.split(','):
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertNotContain(gpu_df._jdf, clz)
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_cpu_and_gpu_are_equal_sql_with_capture(df_fun,
sql,
table_name,
exist_classes='',
non_exist_classes='',
conf=None,
debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_cpu_and_gpu_are_equal_collect_with_capture(do_it_all, exist_classes, non_exist_classes, conf)
def assert_gpu_fallback_collect(func,
cpu_fallback_class_name,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, 'COLLECT_WITH_DATAFRAME')
conf = _prep_incompat_conf(conf)
print('### CPU RUN ###')
cpu_start = time.time()
from_cpu, cpu_df = with_cpu_session(bring_back, conf=conf)
cpu_end = time.time()
print('### GPU RUN ###')
gpu_start = time.time()
from_gpu, gpu_df = with_gpu_session(bring_back, conf=conf)
gpu_end = time.time()
jvm = spark_jvm()
jvm.org.apache.spark.sql.rapids.ExecutionPlanCaptureCallback.assertDidFallBack(gpu_df._jdf, cpu_fallback_class_name)
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def assert_gpu_sql_fallback_collect(df_fun, cpu_fallback_class_name, table_name, sql, conf=None, debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_fallback_collect(do_it_all, cpu_fallback_class_name, conf)
def _assert_gpu_and_cpu_are_equal(func,
mode,
conf={},
is_cpu_first=True):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
def run_on_gpu():
print('### GPU RUN ###')
global gpu_start
gpu_start = time.time()
global from_gpu
from_gpu = with_gpu_session(bring_back, conf=conf)
global gpu_end
gpu_end = time.time()
if is_cpu_first:
run_on_cpu()
run_on_gpu()
else:
run_on_gpu()
run_on_cpu()
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
assert_equal(from_cpu, from_gpu)
def run_with_cpu(func,
mode,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
print("run_with_cpu")
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
run_on_cpu()
print('### {}: CPU TOOK {} ###'.format(collect_type,
cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
return from_cpu
def run_with_cpu_and_gpu(func,
mode,
conf={}):
(bring_back, collect_type) = _prep_func_for_compare(func, mode)
conf = _prep_incompat_conf(conf)
def run_on_cpu():
print('### CPU RUN ###')
global cpu_start
cpu_start = time.time()
global from_cpu
from_cpu = with_cpu_session(bring_back, conf=conf)
global cpu_end
cpu_end = time.time()
def run_on_gpu():
print('### GPU RUN ###')
global gpu_start
gpu_start = time.time()
global from_gpu
from_gpu = with_gpu_session(bring_back, conf=conf)
global gpu_end
gpu_end = time.time()
run_on_cpu()
run_on_gpu()
print('### {}: GPU TOOK {} CPU TOOK {} ###'.format(collect_type,
gpu_end - gpu_start, cpu_end - cpu_start))
if should_sort_locally():
from_cpu.sort(key=_RowCmp)
from_gpu.sort(key=_RowCmp)
return (from_cpu, from_gpu)
def assert_gpu_and_cpu_are_equal_collect(func, conf={}, is_cpu_first=True):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is collected back to the driver and compared here, so be
careful about the amount of data returned.
"""
_assert_gpu_and_cpu_are_equal(func, 'COLLECT', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_are_equal_iterator(func, conf={}, is_cpu_first=True):
"""
Assert when running func on both the CPU and the GPU that the results are equal.
In this case the data is pulled back to the driver in chunks and compared here
so any amount of data can work, just be careful about how long it might take.
"""
_assert_gpu_and_cpu_are_equal(func, 'ITERATOR', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_row_counts_equal(func, conf={}, is_cpu_first=True):
"""
Assert that the row counts from running the func are the same on both the CPU and GPU.
This function runs count() to only get the number of rows and compares that count
between the CPU and GPU. It does NOT compare any underlying data.
"""
_assert_gpu_and_cpu_are_equal(func, 'COUNT', conf=conf, is_cpu_first=is_cpu_first)
def assert_gpu_and_cpu_are_equal_sql(df_fun, table_name, sql, conf=None, debug=False, is_cpu_first=True, validate_execs_in_gpu_plan=[]):
"""
Assert that the specified SQL query produces equal results on CPU and GPU.
:param df_fun: a function that will create the dataframe
:param table_name: Name of table to be created with the dataframe
:param sql: SQL query to be run on the specified table
:param conf: Any user-specified confs. Empty by default.
:param debug: Boolean to indicate if the SQL output should be printed
:param is_cpu_first: Boolean to indicate if the CPU should be run first or not
:param validate_execs_in_gpu_plan: String list of expressions to be validated in the GPU plan.
:return: Assertion failure, if results from CPU and GPU do not match.
"""
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
# we hold off on setting the validate execs until after creating the temp view
spark.conf.set('spark.rapids.sql.test.validateExecsInGpuPlan', ','.join(validate_execs_in_gpu_plan))
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(do_it_all, conf, is_cpu_first=is_cpu_first)
def assert_py4j_exception(func, error_message):
"""
Assert that a specific Java exception is thrown
:param func: a function to be verified
:param error_message: a string such as the one produce by java.lang.Exception.toString
:return: Assertion failure if no exception matching error_message has occurred.
"""
with pytest.raises(Py4JJavaError) as py4jError:
func()
assert error_message in str(py4jError.value.java_exception)
def assert_gpu_and_cpu_error(df_fun, conf, error_message):
"""
Assert that GPU and CPU execution results in a specific Java exception thrown
:param df_fun: a function to be verified
:param conf: Spark config
:param error_message: a string such as the one produce by java.lang.Exception.toString
:return: Assertion failure if either GPU or CPU versions has not generated error messages
expected
"""
assert_py4j_exception(lambda: with_cpu_session(df_fun, conf), error_message)
assert_py4j_exception(lambda: with_gpu_session(df_fun, conf), error_message)
def with_cpu_sql(df_fun, table_name, sql, conf=None, debug=False):
if conf is None:
conf = {}
def do_it_all(spark):
df = df_fun(spark)
df.createOrReplaceTempView(table_name)
if debug:
return data_gen.debug_df(spark.sql(sql))
else:
return spark.sql(sql)
assert_gpu_and_cpu_are_equal_collect(do_it_all, conf, is_cpu_first=is_cpu_first)
| spark-rapids-examples-main | examples/UDF-Examples/RAPIDS-accelerated-UDFs/src/main/python/asserts.py |
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
from pyspark.sql import SparkSession
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("Requires a data path.")
inputPath = sys.argv[1]
outputPath = sys.argv[2]
# You should set correct shapefile name by "--conf spark.cuspatial.sql.udf.shapeFileName"
spark = SparkSession.builder.getOrCreate()
# register the udf and set its parameters via the runtime config
spark.udf.registerJavaFunction("point_in_polygon", "com.nvidia.spark.rapids.udf.PointInPolygon", None)
# read the points data
df = spark.read.parquet(inputPath)
# null row is not supported yet by the UDF, filter out them first.
df = df.filter("x is not NULL and y is not NULL")
# test func start
df = df.selectExpr('x', 'y', 'point_in_polygon(x, y) as point_in_polygon')
# test func end
# trigger the test
begin = time.time()
df.write.mode("overwrite").parquet(outputPath)
end = time.time()
print("==> It took {} s".format(round(end-begin, 2)))
spark.stop()
| spark-rapids-examples-main | examples/UDF-Examples/Spark-cuSpatial/spatial_join.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import numpy as np
import cupy
import sys
def read_points(path):
print('reading points file:', path)
points = np.fromfile(path, dtype=np.int32)
points = cupy.asarray(points)
points = points.reshape((len(points)// 4, 4))
points = cudf.DataFrame(points)
points_df = cudf.DataFrame()
points_df['x'] = points[0]
points_df['y'] = points[1]
return points_df
if __name__ == '__main__':
if len(sys.argv) < 3:
raise Exception("Usage: to_parquet <input data path> <output data path>.")
inputPath = sys.argv[1]
outputPath = sys.argv[2]
points_df = read_points(inputPath)
points_df.to_parquet(outputPath)
| spark-rapids-examples-main | examples/UDF-Examples/Spark-cuSpatial/tools/to_parquet.py |
#
# Copyright (c) 2021-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import math
import pprint
import sys
# This needs to happen first to avoid pyarrow serialization errors.
from pyspark.sql import SparkSession
# Make sure pyarrow is referenced before anything else to avoid segfault due to conflict
# with TensorFlow libraries. Use `pa` package reference to ensure it's loaded before
# functions like `deserialize_model` which are implemented at the top level.
# See https://jira.apache.org/jira/browse/ARROW-3346
import pyarrow as pa
import horovod
import horovod.tensorflow.keras as hvd
import tensorflow as tf
from horovod.spark.common.backend import SparkBackend
from tensorflow.keras.layers import BatchNormalization, Input, Embedding, Concatenate, Dense, Flatten
from tensorflow.keras.layers.experimental.preprocessing import CategoryEncoding
PETASTORM_DATALOADER = 'petastorm'
NVTABULAR_DATALOADER = 'nvtabular'
CONTINUOUS_COLUMNS = [f'i{i}' for i in range(13)]
CATEGORICAL_COLUMNS = [f'c{c}' for c in range(26)]
ALL_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
LABEL_COLUMNS = ['clicked']
def get_category_dimensions(spark, data_dir):
df = spark.read.csv(f'{data_dir}/dimensions/*.csv', header=True).toPandas()
dimensions = df.to_dict('records')[0]
pprint.pprint(dimensions)
return dimensions
def build_model(dimensions, args):
inputs = {
**{i: Input(shape=(1,), name=i, dtype=tf.float32) for i in CONTINUOUS_COLUMNS},
**{c: Input(shape=(1,), name=c, dtype=tf.int32) for c in CATEGORICAL_COLUMNS}
}
one_hots = []
embeddings = []
for c in CATEGORICAL_COLUMNS:
dimension = int(dimensions[c]) + 1
if dimension <= 128:
one_hots.append(CategoryEncoding(num_tokens=dimension, name=f'one_hot_{c}')(inputs[c]))
else:
embedding_size = int(math.floor(0.6 * dimension ** 0.25))
embeddings.append(Embedding(input_dim=dimension,
output_dim=embedding_size,
input_length=1,
name=f'embedding_{c}')(inputs[c]))
x = Concatenate(name='embeddings_concat')(embeddings)
x = Flatten(name='embeddings_flatten')(x)
x = Concatenate(name='inputs_concat')([x] + one_hots + [inputs[i] for i in CONTINUOUS_COLUMNS])
x = BatchNormalization()(x)
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(1024, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(512, activation='relu')(x)
output = Dense(1, activation='sigmoid', name='output')(x)
model = tf.keras.Model(inputs=[inputs[c] for c in ALL_COLUMNS], outputs=output)
if hvd.rank() == 0:
model.summary()
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
opt = hvd.DistributedOptimizer(opt)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=[tf.keras.metrics.AUC()])
return model
def train_fn(dimensions, train_rows, val_rows, args):
# Make sure pyarrow is referenced before anything else to avoid segfault due to conflict
# with TensorFlow libraries. Use `pa` package reference to ensure it's loaded before
# functions like `deserialize_model` which are implemented at the top level.
# See https://jira.apache.org/jira/browse/ARROW-3346
pa
import atexit
import horovod.tensorflow.keras as hvd
from horovod.spark.task import get_available_devices
import os
import tempfile
import tensorflow as tf
import tensorflow.keras.backend as K
import shutil
gpus = get_available_devices()
if gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus[0]
if args.dataloader == NVTABULAR_DATALOADER:
os.environ['TF_MEMORY_ALLOCATION'] = '0.85'
from nvtabular.loader.tensorflow import KerasSequenceLoader
# Horovod: initialize Horovod inside the trainer.
hvd.init()
# Horovod: restore from checkpoint, use hvd.load_model under the hood.
model = build_model(dimensions, args)
# Horovod: adjust learning rate based on number of processes.
scaled_lr = K.get_value(model.optimizer.lr) * hvd.size()
K.set_value(model.optimizer.lr, scaled_lr)
# Horovod: print summary logs on the first worker.
verbose = 1 if hvd.rank() == 0 else 0
callbacks = [
# Horovod: broadcast initial variable states from rank 0 to all other processes.
# This is necessary to ensure consistent initialization of all workers when
# training is started with random weights or restored from a checkpoint.
hvd.callbacks.BroadcastGlobalVariablesCallback(root_rank=0),
# Horovod: average metrics among workers at the end of every epoch.
#
# Note: This callback must be in the list before the ReduceLROnPlateau,
# TensorBoard, or other metrics-based callbacks.
hvd.callbacks.MetricAverageCallback(),
# Horovod: using `lr = 1.0 * hvd.size()` from the very beginning leads to worse final
# accuracy. Scale the learning rate `lr = 1.0` ---> `lr = 1.0 * hvd.size()` during
# the first five epochs. See https://arxiv.org/abs/1706.02677 for details.
hvd.callbacks.LearningRateWarmupCallback(initial_lr=scaled_lr, warmup_epochs=5, verbose=verbose),
# Reduce LR if the metric is not improved for 10 epochs, and stop training
# if it has not improved for 20 epochs.
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_auc', patience=10, verbose=verbose),
tf.keras.callbacks.EarlyStopping(monitor='val_auc', mode='min', patience=20, verbose=verbose),
tf.keras.callbacks.TerminateOnNaN(),
# Log Tensorboard events.
tf.keras.callbacks.TensorBoard(log_dir=args.logs_dir, write_steps_per_second=True, update_freq=10)
]
# Horovod: save checkpoints only on the first worker to prevent other workers from corrupting them.
if hvd.rank() == 0:
ckpt_dir = tempfile.mkdtemp()
ckpt_file = os.path.join(ckpt_dir, 'checkpoint.h5')
atexit.register(lambda: shutil.rmtree(ckpt_dir))
callbacks.append(tf.keras.callbacks.ModelCheckpoint(
ckpt_file, monitor='val_auc', mode='min', save_best_only=True))
if args.dataloader == PETASTORM_DATALOADER:
from petastorm import make_batch_reader
from petastorm.tf_utils import make_petastorm_dataset
# Make Petastorm readers.
with make_batch_reader(f'{args.data_dir}/train',
num_epochs=None,
cur_shard=hvd.rank(),
shard_count=hvd.size(),
hdfs_driver='libhdfs') as train_reader:
with make_batch_reader(f'{args.data_dir}/val',
num_epochs=None,
cur_shard=hvd.rank(),
shard_count=hvd.size(),
hdfs_driver='libhdfs') as val_reader:
# Convert readers to tf.data.Dataset.
train_ds = make_petastorm_dataset(train_reader) \
.unbatch() \
.shuffle(10 * args.batch_size) \
.batch(args.batch_size) \
.map(lambda x: (tuple(getattr(x, c) for c in ALL_COLUMNS), x.clicked))
val_ds = make_petastorm_dataset(val_reader) \
.unbatch() \
.batch(args.batch_size) \
.map(lambda x: (tuple(getattr(x, c) for c in ALL_COLUMNS), x.clicked))
history = model.fit(train_ds,
validation_data=val_ds,
steps_per_epoch=int(train_rows / args.batch_size / hvd.size()),
validation_steps=int(val_rows / args.batch_size / hvd.size()),
callbacks=callbacks,
verbose=verbose,
epochs=args.epochs)
else:
import cupy
def seed_fn():
"""
Generate consistent dataloader shuffle seeds across workers
Reseeds each worker's dataloader each epoch to get fresh a shuffle
that's consistent across workers.
"""
min_int, max_int = tf.int32.limits
max_rand = max_int // hvd.size()
# Generate a seed fragment on each worker
seed_fragment = cupy.random.randint(0, max_rand).get()
# Aggregate seed fragments from all Horovod workers
seed_tensor = tf.constant(seed_fragment)
reduced_seed = hvd.allreduce(seed_tensor, name="shuffle_seed", op=hvd.Sum)
return reduced_seed % max_rand
train_ds = KerasSequenceLoader(
f'{args.data_dir}/train',
batch_size=args.batch_size,
label_names=LABEL_COLUMNS,
cat_names=CATEGORICAL_COLUMNS,
cont_names=CONTINUOUS_COLUMNS,
engine="parquet",
shuffle=True,
buffer_size=0.06, # how many batches to load at once
parts_per_chunk=1,
global_size=hvd.size(),
global_rank=hvd.rank(),
seed_fn=seed_fn)
val_ds = KerasSequenceLoader(
f'{args.data_dir}/val',
batch_size=args.batch_size,
label_names=LABEL_COLUMNS,
cat_names=CATEGORICAL_COLUMNS,
cont_names=CONTINUOUS_COLUMNS,
engine="parquet",
shuffle=False,
buffer_size=0.06, # how many batches to load at once
parts_per_chunk=1,
global_size=hvd.size(),
global_rank=hvd.rank())
history = model.fit(train_ds,
validation_data=val_ds,
steps_per_epoch=int(train_rows / args.batch_size / hvd.size()),
validation_steps=int(val_rows / args.batch_size / hvd.size()),
callbacks=callbacks,
verbose=verbose,
epochs=args.epochs)
if hvd.rank() == 0:
return history.history
def train(dimensions, train_rows, val_rows, args):
# Horovod: run training.
history = horovod.spark.run(train_fn,
args=(dimensions, train_rows, val_rows, args),
num_proc=args.num_proc,
extra_mpi_args='-mca btl_tcp_if_include enp134s0f0 -x NCCL_IB_GID_INDEX=3',
stdout=sys.stdout,
stderr=sys.stderr,
verbose=2,
nics={},
prefix_output_with_timestamp=True)[0]
best_val_loss = min(history['val_loss'])
print('Best Loss: %f' % best_val_loss)
def main():
parser = argparse.ArgumentParser(description='Criteo Spark Keras Training Example',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', default='file:///opt/data/criteo/parquet',
help='location of the transformed Criteo dataset in Parquet format')
parser.add_argument('--logs-dir', default='/opt/experiments/criteo', help='location of TensorFlow logs')
parser.add_argument('--dataloader', default=PETASTORM_DATALOADER,
choices=[PETASTORM_DATALOADER, NVTABULAR_DATALOADER],
help='dataloader to use')
parser.add_argument('--num-proc', type=int, default=1, help='number of worker processes for training')
parser.add_argument('--learning-rate', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--batch-size', type=int, default=64 * 1024, help='batch size')
parser.add_argument('--epochs', type=int, default=3, help='number of epochs to train')
parser.add_argument('--local-checkpoint-file', default='checkpoint', help='model checkpoint')
args = parser.parse_args()
spark = SparkSession.builder.appName('Criteo Keras Training').getOrCreate()
dimensions = get_category_dimensions(spark, args.data_dir)
train_df = spark.read.parquet(f'{args.data_dir}/train')
val_df = spark.read.parquet(f'{args.data_dir}/val')
test_df = spark.read.parquet(f'{args.data_dir}/test')
train_rows, val_rows, test_rows = train_df.count(), val_df.count(), test_df.count()
print('Training: %d' % train_rows)
print('Validation: %d' % val_rows)
print('Test: %d' % test_rows)
train(dimensions, train_rows, val_rows, args)
spark.stop()
if __name__ == '__main__':
main()
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/criteo_train/criteo_keras.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
import re
import string
import tensorflow as tf
from tensorflow.keras import layers
print("tf: {}".format(tf.__version__))
def custom_standardization(input_data):
lowercase = tf.strings.lower(input_data)
stripped_html = tf.strings.regex_replace(lowercase, "<br />", " ")
return tf.strings.regex_replace(
stripped_html, "[%s]" % re.escape(string.punctuation), ""
)
max_features = 10000
sequence_length = 250
vectorize_layer = layers.TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode="int",
output_sequence_length=sequence_length,
)
custom_objects = {"vectorize_layer": vectorize_layer,
"custom_standardization": custom_standardization}
with tf.keras.utils.custom_object_scope(custom_objects):
self.model = tf.keras.models.load_model(
"/text_model"
)
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get output configuration
pred_config = pb_utils.get_output_config_by_name(model_config, "pred")
# Convert Triton types to numpy types
self.pred_dtype = pb_utils.triton_string_to_numpy(pred_config['data_type'])
def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
pred_dtype = self.pred_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input numpy
sentence_input = pb_utils.get_input_tensor_by_name(request, "sentence")
sentences = list(sentence_input.as_numpy())
sentences = np.squeeze(sentences).tolist()
sentences = [s.decode('utf-8') for s in sentences]
pred = self.model.predict(sentences)
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
pred_tensor = pb_utils.Tensor("pred", pred.astype(pred_dtype))
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(output_tensors=[pred_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/dl_inference/tensorflow/models_config/text_classification/1/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
import tensorflow as tf
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
print("tf: {}".format(tf.__version__))
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
self.model = tf.keras.models.load_model("/my_pet_classifier")
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get output configuration
pred_config = pb_utils.get_output_config_by_name(model_config, "pred")
# Convert Triton types to numpy types
self.pred_dtype = pb_utils.triton_string_to_numpy(pred_config['data_type'])
def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
pred_dtype = self.pred_dtype
responses = []
def decode(input_tensor):
return tf.convert_to_tensor([[s[0].decode('utf-8')] for s in input_tensor.as_numpy()])
def identity(input_tensor):
return tf.convert_to_tensor(input_tensor.as_numpy())
input_transforms = {
"Type": decode,
"Age": identity,
"Breed1": decode,
"Gender": decode,
"Color1": decode,
"Color2": decode,
"MaturitySize": decode,
"FurLength": decode,
"Vaccinated": decode,
"Sterilized": decode,
"Health": decode,
"Fee": identity,
"PhotoAmt": identity
}
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input numpy
inputs = {name: transform(pb_utils.get_input_tensor_by_name(request, name)) for name, transform in input_transforms.items()}
pred = self.model.predict(inputs)
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
pred_tensor = pb_utils.Tensor("pred", np.squeeze(pred).astype(pred_dtype))
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(output_tensors=[pred_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/dl_inference/tensorflow/models_config/feature_columns/1/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
import torch
print("torch: {}".format(torch.__version__))
print("cuda: {}".format(torch.cuda.is_available()))
import transformers
print("transformers: {}".format(transformers.__version__))
from sentence_transformers import SentenceTransformer
self.model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get output configuration
embedding_config = pb_utils.get_output_config_by_name(model_config, "embedding")
# Convert Triton types to numpy types
self.embedding_dtype = pb_utils.triton_string_to_numpy(embedding_config['data_type'])
def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
embedding_dtype = self.embedding_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input numpy
sentence_input = pb_utils.get_input_tensor_by_name(request, "sentence")
sentences = list(sentence_input.as_numpy())
sentences = np.squeeze(sentences, -1).tolist()
sentences = [s.decode('utf-8') for s in sentences]
embedding = self.model.encode(sentences)
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
embedding_tensor = pb_utils.Tensor("embedding", embedding.astype(embedding_dtype))
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(output_tensors=[embedding_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/dl_inference/huggingface/models_config/hf_transformer/1/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
import torch
print("torch: {}".format(torch.__version__))
print("cuda: {}".format(torch.cuda.is_available()))
import transformers
print("transformers: {}".format(transformers.__version__))
from transformers import T5Tokenizer, T5ForConditionalGeneration
self.tokenizer = T5Tokenizer.from_pretrained("t5-small")
self.model = T5ForConditionalGeneration.from_pretrained("t5-small")
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get output configuration
output_config = pb_utils.get_output_config_by_name(model_config, "output")
# Convert Triton types to numpy types
self.output_dtype = pb_utils.triton_string_to_numpy(output_config['data_type'])
def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
output_dtype = self.output_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input numpy
sentence_input = pb_utils.get_input_tensor_by_name(request, "input")
sentences = list(sentence_input.as_numpy())
sentences = np.squeeze(sentences, -1).tolist()
sentences = [s.decode('utf-8') for s in sentences]
input_ids = self.tokenizer(sentences,
padding="longest",
max_length=512,
return_tensors="pt").input_ids
output_ids = self.model.generate(input_ids)
outputs = np.array([self.tokenizer.decode(o, skip_special_tokens=True) for o in output_ids])
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
output_tensor = pb_utils.Tensor("output", outputs.astype(output_dtype))
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(output_tensors=[output_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/dl_inference/huggingface/models_config/hf_generation/1/model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import json
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
import torch
print("torch: {}".format(torch.__version__))
print("cuda: {}".format(torch.cuda.is_available()))
import transformers
print("transformers: {}".format(transformers.__version__))
from transformers import pipeline
self.pipe = pipeline("text-classification", device=0)
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args['model_config'])
# Get output configuration
label_config = pb_utils.get_output_config_by_name(model_config, "label")
score_config = pb_utils.get_output_config_by_name(model_config, "score")
# Convert Triton types to numpy types
self.label_dtype = pb_utils.triton_string_to_numpy(label_config['data_type'])
self.score_dtype = pb_utils.triton_string_to_numpy(score_config['data_type'])
def execute(self, requests):
"""`execute` MUST be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference request is made
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
label_dtype = self.label_dtype
score_dtype = self.score_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get input numpy
sentence_input = pb_utils.get_input_tensor_by_name(request, "sentence")
sentences = list(sentence_input.as_numpy())
sentences = np.squeeze(sentences).tolist()
sentences = [s.decode('utf-8') for s in sentences]
results = self.pipe(sentences)
label = np.array([res['label'] for res in results])
score = np.array([res['score'] for res in results])
# Create output tensors. You need pb_utils.Tensor
# objects to create pb_utils.InferenceResponse.
label_tensor = pb_utils.Tensor("label", label.astype(label_dtype))
score_tensor = pb_utils.Tensor("score", score.astype(score_dtype))
# Create InferenceResponse. You can set an error here in case
# there was a problem with handling this inference request.
# Below is an example of how you can set errors in inference
# response:
#
# pb_utils.InferenceResponse(
# output_tensors=..., TritonError("An error occured"))
inference_response = pb_utils.InferenceResponse(output_tensors=[label_tensor, score_tensor])
responses.append(inference_response)
# You should return a list of pb_utils.InferenceResponse. Length
# of this list must match the length of `requests` list.
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
| spark-rapids-examples-main | examples/ML+DL-Examples/Spark-DL/dl_inference/huggingface/models_config/hf_pipeline/1/model.py |
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
(SparkSession
.builder
.getOrCreate()
.read
.csv(sys.argv[1])
.withColumn('_c1', format_string('%.6f', col('_c1').cast('float')))
.withColumn('_c1', when(col('_c1') == '0.000000', lit('0.0')).otherwise(col('_c1')))
.withColumn('_c1', when(col('_c1') == '1.000000', lit('1.0')).otherwise(col('_c1')))
.repartition(1)
.write
.option('nullValue', None)
.csv(sys.argv[2]))
| spark-rapids-examples-main | scripts/encoding-sample/truncate-model.py |
# Note: Plase modify the data source options for your case.
import sys
from pyspark.sql import SparkSession
(SparkSession
.builder
.getOrCreate()
.read
.option('sep', '\t')
.csv(sys.argv[1])
.repartition(int(sys.argv[3]))
.write
.option('sep', '\t')
.option('nullValue', None)
.csv(sys.argv[2]))
| spark-rapids-examples-main | scripts/encoding-sample/repartition.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.encoding.main import main
main()
| spark-rapids-examples-main | scripts/encoding/python/main.py |
spark-rapids-examples-main | scripts/encoding/python/com/__init__.py |
|
spark-rapids-examples-main | scripts/encoding/python/com/nvidia/__init__.py |
|
spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/__init__.py |
|
spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/__init__.py |
|
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.encoding.utility.args import parse_arguments
from importlib import import_module
def main():
args = parse_arguments()
getattr(import_module(args.mainClass), 'main')(args)
| spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/main.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.encoding.criteo.common import *
from com.nvidia.spark.encoding.utility.utils import *
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
from pyspark.sql import functions as F
from pyspark.sql.types import FloatType, DoubleType
import time
def get_dict_df(train_df, target_col, label_col):
'''
get one dict dataframe for one column
'''
col_target_df = train_df.groupBy(target_col).agg(F.mean(label_col))
return col_target_df
def encode_df(original_df, dict_df, col_name):
dict_df_rename = dict_df.withColumnRenamed('_c0', 'hash').withColumnRenamed('_c1', col_name+'_mean')
df_mean = (original_df.join(dict_df_rename, original_df[col_name] == dict_df_rename['hash'], how='left').drop('hash').drop(col_name)
.na.fill(-1, [col_name + '_mean']))
return df_mean
def main(args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
if args.mode == 'train':
for col_name, model_path in zip(args.columns, args.modelPaths):
df = load_data(spark, args.inputPaths, args, customize_reader).cache()
dict_df = get_dict_df(df, col_name, args.labelColumn)
dict_df.repartition(1).write.csv(model_path)
if args.mode == 'transform':
dict_dfs = [
load_dict_df(spark, path).withColumn('_c1', F.col('_c1').cast(DoubleType())).cache()
for path in args.modelPaths
]
for input_path, output_path in zip(args.inputPaths, args.outputPaths):
df = load_data(spark, input_path, args, customize_reader)
for col_name, dict_df in zip(args.columns, dict_dfs):
df = encode_df(df, dict_df, col_name)
save_data(df, output_path, args, customize_writer) | spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/criteo/target_cpu_main.py |
spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/criteo/__init__.py |
|
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from com.nvidia.spark.encoding.criteo.common import *
from com.nvidia.spark.encoding.utility.utils import *
from pyspark.ml.feature import StringIndexer, StringIndexerModel
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
def index(df, column):
column_index = column + '_index'
return (StringIndexer(inputCol=column, outputCol=column_index)
.setHandleInvalid('keep')
.fit(df))
def expand(indexer, df, column):
column_index = column + '_index'
df = (indexer
.transform(df)
.withColumn(column_index, col(column_index).cast('int')))
for i in range(0, len(indexer.labels)):
df = df.withColumn(column + '_' + str(i), (col(column_index) == i).cast('int'))
return df.drop(column, column_index)
def main(args):
spark = (SparkSession
.builder
.appName(args.mainClass)
.getOrCreate())
if args.mode == 'train':
df = load_data(spark, args.inputPaths, args, customize_reader).cache()
for column, path in zip(args.columns, args.modelPaths):
indexer = index(df, column)
save_model(indexer, path, args)
if args.mode == 'transform':
indexers = list(zip(args.columns, load_models(StringIndexerModel, args.modelPaths)))
for input_path, output_path in zip(args.inputPaths, args.outputPaths):
df = load_data(spark, input_path, args, customize_reader)
for column, indexer in indexers:
df = expand(indexer, df, column)
args.numRows and df.show(args.numRows)
save_data(df, output_path, args, customize_writer)
spark.stop()
| spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/criteo/one_hot_cpu_main.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def customize_reader(reader):
(reader
.option('sep', '\t'))
def customize_writer(writer):
(writer
.option('sep', '\t')
.option('nullValue', None))
| spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/criteo/common.py |
spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/utility/__init__.py |
|
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pickle
def load_data(spark, paths, args, customize=None):
reader = (spark
.read
.format(args.format))
customize and customize(reader)
return reader.load(paths)
def save_data(data_frame, path, args, customize=None):
writer = (data_frame
.write
.format(args.format))
args.overwrite and writer.mode('overwrite')
customize and customize(writer)
writer.save(path)
def load_model(model_class, path):
return model_class.load(path)
def load_models(model_class, paths):
return [load_model(model_class, path) for path in paths]
def save_model(model, path, args):
writer = model.write().overwrite() if args.overwrite else model
writer.save(path)
def save_dict(mean_dict, target_path):
'''
target_path: full path of the target location to save the dict
'''
with open(target_path+'.pkl', 'wb') as f:
pickle.dump(mean_dict, f, pickle.HIGHEST_PROTOCOL)
def load_dict(dict_path):
'''
dict_path: full path of target dict with '.pkl' tail.
'''
with open(dict_path, 'rb') as f:
return pickle.load(f)
def load_dict_df(spark, dict_df_path):
return spark.read.option("header","false").csv(dict_df_path)
| spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/utility/utils.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from argparse import ArgumentParser
from distutils.util import strtobool
def _to_bool(literal):
return bool(strtobool(literal))
def _to_str_list(literal):
return [x for x in literal.split(',') if x]
_examples = [
'com.nvidia.spark.encoding.criteo.one_hot_cpu_main',
'com.nvidia.spark.encoding.criteo.target_cpu_main'
]
def _validate_args(args):
usage = ''
if args.mode == 'transform' and not args.outputPaths:
usage += ' --outputPaths required for transform.\n'
# for production:
# validates that --columns and --inputPaths exists
# validates that --inputPath and --outputPath matches for transform
if (args.mainClass == 'com.nvidia.spark.encoding.criteo.target_cpu_main'
and args.mode == 'train'
and not args.labelColumn):
usage += ' --labelColumn required for target encoding. \n'
if usage:
print('-' * 80)
print('Usage:\n' + usage)
sys.exit(1)
def parse_arguments():
parser = ArgumentParser()
# application arguments
parser.add_argument('--mainClass', required=True, choices=_examples)
parser.add_argument('--mode', choices=['train', 'transform'], required=True)
parser.add_argument('--format', choices=['csv'], default='csv')
parser.add_argument('--columns', type=_to_str_list, required=True)
parser.add_argument('--modelPaths', type=_to_str_list, required=True)
parser.add_argument('--inputPaths', type=_to_str_list, required=True)
parser.add_argument('--outputPaths', type=_to_str_list) # for transform, required
parser.add_argument('--overwrite', type=_to_bool, default=False)
parser.add_argument('--numRows', type=int) # for transform, optional
parser.add_argument('--labelColumn', help='name of the label column') # for target encoding, required
parsed = parser.parse_args()
_validate_args(parsed)
return parsed
| spark-rapids-examples-main | scripts/encoding/python/com/nvidia/spark/encoding/utility/args.py |
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import numpy as np
import os
opt = TrainOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
opt.instance_feat = True
opt.continue_train = True
name = 'features'
save_path = os.path.join(opt.checkpoints_dir, opt.name)
############ Initialize #########
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
########### Encode features ###########
reencode = True
if reencode:
features = {}
for label in range(opt.label_nc):
features[label] = np.zeros((0, opt.feat_num+1))
for i, data in enumerate(dataset):
feat = model.module.encode_features(data['image'], data['inst'])
for label in range(opt.label_nc):
features[label] = np.append(features[label], feat[label], axis=0)
print('%d / %d images' % (i+1, dataset_size))
save_name = os.path.join(save_path, name + '.npy')
np.save(save_name, features)
############## Clustering ###########
n_clusters = opt.n_clusters
load_name = os.path.join(save_path, name + '.npy')
features = np.load(load_name).item()
from sklearn.cluster import KMeans
centers = {}
for label in range(opt.label_nc):
feat = features[label]
feat = feat[feat[:,-1] > 0.5, :-1]
if feat.shape[0]:
n_clusters = min(feat.shape[0], opt.n_clusters)
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(feat)
centers[label] = kmeans.cluster_centers_
save_name = os.path.join(save_path, name + '_clustered_%03d.npy' % opt.n_clusters)
np.save(save_name, centers)
print('saving to %s' % save_name) | pix2pixHD-master | encode_features.py |
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import os
import util.util as util
from torch.autograd import Variable
import torch.nn as nn
opt = TrainOptions().parse()
opt.nThreads = 1
opt.batchSize = 1
opt.serial_batches = True
opt.no_flip = True
opt.instance_feat = True
name = 'features'
save_path = os.path.join(opt.checkpoints_dir, opt.name)
############ Initialize #########
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
model = create_model(opt)
util.mkdirs(os.path.join(opt.dataroot, opt.phase + '_feat'))
######## Save precomputed feature maps for 1024p training #######
for i, data in enumerate(dataset):
print('%d / %d images' % (i+1, dataset_size))
feat_map = model.module.netE.forward(Variable(data['image'].cuda(), volatile=True), data['inst'].cuda())
feat_map = nn.Upsample(scale_factor=2, mode='nearest')(feat_map)
image_numpy = util.tensor2im(feat_map.data[0])
save_path = data['path'][0].replace('/train_label/', '/train_feat/')
util.save_image(image_numpy, save_path) | pix2pixHD-master | precompute_feature_maps.py |
import os
import sys
from random import randint
import numpy as np
import tensorrt
try:
from PIL import Image
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.autoinit
import argparse
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have pycuda and the example dependencies installed.
https://wiki.tiker.net/PyCuda/Installation/Linux
pip(3) install tensorrt[examples]
""".format(err))
exit(1)
try:
import tensorrt as trt
from tensorrt.parsers import caffeparser
from tensorrt.parsers import onnxparser
except ImportError as err:
sys.stderr.write("""ERROR: failed to import module ({})
Please make sure you have the TensorRT Library installed
and accessible in your LD_LIBRARY_PATH
""".format(err))
exit(1)
G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
class Profiler(trt.infer.Profiler):
"""
Example Implimentation of a Profiler
Is identical to the Profiler class in trt.infer so it is possible
to just use that instead of implementing this if further
functionality is not needed
"""
def __init__(self, timing_iter):
trt.infer.Profiler.__init__(self)
self.timing_iterations = timing_iter
self.profile = []
def report_layer_time(self, layerName, ms):
record = next((r for r in self.profile if r[0] == layerName), (None, None))
if record == (None, None):
self.profile.append((layerName, ms))
else:
self.profile[self.profile.index(record)] = (record[0], record[1] + ms)
def print_layer_times(self):
totalTime = 0
for i in range(len(self.profile)):
print("{:40.40} {:4.3f}ms".format(self.profile[i][0], self.profile[i][1] / self.timing_iterations))
totalTime += self.profile[i][1]
print("Time over all layers: {:4.2f} ms per iteration".format(totalTime / self.timing_iterations))
def get_input_output_names(trt_engine):
nbindings = trt_engine.get_nb_bindings();
maps = []
for b in range(0, nbindings):
dims = trt_engine.get_binding_dimensions(b).to_DimsCHW()
name = trt_engine.get_binding_name(b)
type = trt_engine.get_binding_data_type(b)
if (trt_engine.binding_is_input(b)):
maps.append(name)
print("Found input: ", name)
else:
maps.append(name)
print("Found output: ", name)
print("shape=" + str(dims.C()) + " , " + str(dims.H()) + " , " + str(dims.W()))
print("dtype=" + str(type))
return maps
def create_memory(engine, name, buf, mem, batchsize, inp, inp_idx):
binding_idx = engine.get_binding_index(name)
if binding_idx == -1:
raise AttributeError("Not a valid binding")
print("Binding: name={}, bindingIndex={}".format(name, str(binding_idx)))
dims = engine.get_binding_dimensions(binding_idx).to_DimsCHW()
eltCount = dims.C() * dims.H() * dims.W() * batchsize
if engine.binding_is_input(binding_idx):
h_mem = inp[inp_idx]
inp_idx = inp_idx + 1
else:
h_mem = np.random.uniform(0.0, 255.0, eltCount).astype(np.dtype('f4'))
d_mem = cuda.mem_alloc(eltCount * 4)
cuda.memcpy_htod(d_mem, h_mem)
buf.insert(binding_idx, int(d_mem))
mem.append(d_mem)
return inp_idx
#Run inference on device
def time_inference(engine, batch_size, inp):
bindings = []
mem = []
inp_idx = 0
for io in get_input_output_names(engine):
inp_idx = create_memory(engine, io, bindings, mem,
batch_size, inp, inp_idx)
context = engine.create_execution_context()
g_prof = Profiler(500)
context.set_profiler(g_prof)
for i in range(iter):
context.execute(batch_size, bindings)
g_prof.print_layer_times()
context.destroy()
return
def convert_to_datatype(v):
if v==8:
return trt.infer.DataType.INT8
elif v==16:
return trt.infer.DataType.HALF
elif v==32:
return trt.infer.DataType.FLOAT
else:
print("ERROR: Invalid model data type bit depth: " + str(v))
return trt.infer.DataType.INT8
def run_trt_engine(engine_file, bs, it):
engine = trt.utils.load_engine(G_LOGGER, engine_file)
time_inference(engine, bs, it)
def run_onnx(onnx_file, data_type, bs, inp):
# Create onnx_config
apex = onnxparser.create_onnxconfig()
apex.set_model_file_name(onnx_file)
apex.set_model_dtype(convert_to_datatype(data_type))
# create parser
trt_parser = onnxparser.create_onnxparser(apex)
assert(trt_parser)
data_type = apex.get_model_dtype()
onnx_filename = apex.get_model_file_name()
trt_parser.parse(onnx_filename, data_type)
trt_parser.report_parsing_info()
trt_parser.convert_to_trtnetwork()
trt_network = trt_parser.get_trtnetwork()
assert(trt_network)
# create infer builder
trt_builder = trt.infer.create_infer_builder(G_LOGGER)
trt_builder.set_max_batch_size(max_batch_size)
trt_builder.set_max_workspace_size(max_workspace_size)
if (apex.get_model_dtype() == trt.infer.DataType_kHALF):
print("------------------- Running FP16 -----------------------------")
trt_builder.set_half2_mode(True)
elif (apex.get_model_dtype() == trt.infer.DataType_kINT8):
print("------------------- Running INT8 -----------------------------")
trt_builder.set_int8_mode(True)
else:
print("------------------- Running FP32 -----------------------------")
print("----- Builder is Done -----")
print("----- Creating Engine -----")
trt_engine = trt_builder.build_cuda_engine(trt_network)
print("----- Engine is built -----")
time_inference(engine, bs, inp)
| pix2pixHD-master | run_engine.py |
import os
from collections import OrderedDict
from torch.autograd import Variable
from options.test_options import TestOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
from util import html
import torch
opt = TestOptions().parse(save=False)
opt.nThreads = 1 # test code only supports nThreads = 1
opt.batchSize = 1 # test code only supports batchSize = 1
opt.serial_batches = True # no shuffle
opt.no_flip = True # no flip
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
visualizer = Visualizer(opt)
# create website
web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch))
# test
if not opt.engine and not opt.onnx:
model = create_model(opt)
if opt.data_type == 16:
model.half()
elif opt.data_type == 8:
model.type(torch.uint8)
if opt.verbose:
print(model)
else:
from run_engine import run_trt_engine, run_onnx
for i, data in enumerate(dataset):
if i >= opt.how_many:
break
if opt.data_type == 16:
data['label'] = data['label'].half()
data['inst'] = data['inst'].half()
elif opt.data_type == 8:
data['label'] = data['label'].uint8()
data['inst'] = data['inst'].uint8()
if opt.export_onnx:
print ("Exporting to ONNX: ", opt.export_onnx)
assert opt.export_onnx.endswith("onnx"), "Export model file should end with .onnx"
torch.onnx.export(model, [data['label'], data['inst']],
opt.export_onnx, verbose=True)
exit(0)
minibatch = 1
if opt.engine:
generated = run_trt_engine(opt.engine, minibatch, [data['label'], data['inst']])
elif opt.onnx:
generated = run_onnx(opt.onnx, opt.data_type, minibatch, [data['label'], data['inst']])
else:
generated = model.inference(data['label'], data['inst'], data['image'])
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0]))])
img_path = data['path']
print('process image... %s' % img_path)
visualizer.save_images(webpage, visuals, img_path)
webpage.save()
| pix2pixHD-master | test.py |
import time
import os
import numpy as np
import torch
from torch.autograd import Variable
from collections import OrderedDict
from subprocess import call
import fractions
def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import util.util as util
from util.visualizer import Visualizer
opt = TrainOptions().parse()
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
try:
start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
except:
start_epoch, epoch_iter = 1, 0
print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
else:
start_epoch, epoch_iter = 1, 0
opt.print_freq = lcm(opt.print_freq, opt.batchSize)
if opt.debug:
opt.display_freq = 1
opt.print_freq = 1
opt.niter = 1
opt.niter_decay = 0
opt.max_dataset_size = 10
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
model = create_model(opt)
visualizer = Visualizer(opt)
if opt.fp16:
from apex import amp
model, [optimizer_G, optimizer_D] = amp.initialize(model, [model.optimizer_G, model.optimizer_D], opt_level='O1')
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
else:
optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
total_steps = (start_epoch-1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
if total_steps % opt.print_freq == print_delta:
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
loss_dict = dict(zip(model.module.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat',0) + loss_dict.get('G_VGG',0)
############### Backward Pass ####################
# update generator weights
optimizer_G.zero_grad()
if opt.fp16:
with amp.scale_loss(loss_G, optimizer_G) as scaled_loss: scaled_loss.backward()
else:
loss_G.backward()
optimizer_G.step()
# update discriminator weights
optimizer_D.zero_grad()
if opt.fp16:
with amp.scale_loss(loss_D, optimizer_D) as scaled_loss: scaled_loss.backward()
else:
loss_D.backward()
optimizer_D.step()
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}
t = (time.time() - iter_start_time) / opt.print_freq
visualizer.print_current_errors(epoch, epoch_iter, errors, t)
visualizer.plot_current_errors(errors, total_steps)
#call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
### display output images
if save_fake:
visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
('synthesized_image', util.tensor2im(generated.data[0])),
('real_image', util.tensor2im(data['image'][0]))])
visualizer.display_current_results(visuals, epoch, total_steps)
### save latest model
if total_steps % opt.save_latest_freq == save_delta:
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
model.module.save('latest')
np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
if epoch_iter >= dataset_size:
break
# end of epoch
iter_end_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.module.save('latest')
model.module.save(epoch)
np.savetxt(iter_path, (epoch+1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.module.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.module.update_learning_rate()
| pix2pixHD-master | train.py |
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')
self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features')
self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map')
self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file")
self.parser.add_argument("--engine", type=str, help="run serialized TRT engine")
self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT")
self.isTrain = False
| pix2pixHD-master | options/test_options.py |
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
# for displays
self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen')
self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results')
self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
# for training
self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate')
self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero')
self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
# for discriminators
self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers')
self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN')
self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
self.isTrain = True
| pix2pixHD-master | options/train_options.py |
pix2pixHD-master | options/__init__.py |
|
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser()
self.initialized = False
def initialize(self):
# experiment specifics
self.parser.add_argument('--name', type=str, default='label2city', help='name of the experiment. It decides where to store samples and models')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
self.parser.add_argument('--model', type=str, default='pix2pixHD', help='which model to use')
self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator')
self.parser.add_argument('--data_type', default=32, type=int, choices=[8, 16, 32], help="Supported data type i.e. 8, 16, 32 bit")
self.parser.add_argument('--verbose', action='store_true', default=False, help='toggles verbose')
self.parser.add_argument('--fp16', action='store_true', default=False, help='train with AMP')
self.parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
# input/output sizes
self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
self.parser.add_argument('--loadSize', type=int, default=1024, help='scale images to this size')
self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
self.parser.add_argument('--label_nc', type=int, default=35, help='# of input label channels')
self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
# for setting inputs
self.parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/')
self.parser.add_argument('--resize_or_crop', type=str, default='scale_width', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')
self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
# for displays
self.parser.add_argument('--display_winsize', type=int, default=512, help='display window size')
self.parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed')
# for generator
self.parser.add_argument('--netG', type=str, default='global', help='selects model to use for netG')
self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
self.parser.add_argument('--n_downsample_global', type=int, default=4, help='number of downsampling layers in netG')
self.parser.add_argument('--n_blocks_global', type=int, default=9, help='number of residual blocks in the global generator network')
self.parser.add_argument('--n_blocks_local', type=int, default=3, help='number of residual blocks in the local enhancer network')
self.parser.add_argument('--n_local_enhancers', type=int, default=1, help='number of local enhancers to use')
self.parser.add_argument('--niter_fix_global', type=int, default=0, help='number of epochs that we only train the outmost local enhancer')
# for instance-wise features
self.parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input')
self.parser.add_argument('--instance_feat', action='store_true', help='if specified, add encoded instance features as input')
self.parser.add_argument('--label_feat', action='store_true', help='if specified, add encoded label features as input')
self.parser.add_argument('--feat_num', type=int, default=3, help='vector length for encoded features')
self.parser.add_argument('--load_features', action='store_true', help='if specified, load precomputed feature maps')
self.parser.add_argument('--n_downsample_E', type=int, default=4, help='# of downsampling layers in encoder')
self.parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer')
self.parser.add_argument('--n_clusters', type=int, default=10, help='number of clusters for features')
self.initialized = True
def parse(self, save=True):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.isTrain = self.isTrain # train or test
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
if save and not self.opt.continue_train:
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| pix2pixHD-master | options/base_options.py |
import random
import torch
from torch.autograd import Variable
class ImagePool():
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| pix2pixHD-master | util/image_pool.py |
from __future__ import print_function
import torch
import numpy as np
from PIL import Image
import numpy as np
import os
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:,:,0]
return image_numpy.astype(imtype)
# Converts a one-hot tensor into a colorful label map
def tensor2label(label_tensor, n_label, imtype=np.uint8):
if n_label == 0:
return tensor2im(label_tensor, imtype)
label_tensor = label_tensor.cpu().float()
if label_tensor.size()[0] > 1:
label_tensor = label_tensor.max(0, keepdim=True)[1]
label_tensor = Colorize(n_label)(label_tensor)
label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
return label_numpy.astype(imtype)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
###############################################################################
# Code from
# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
# Modified so it complies with the Citscape label map colors
###############################################################################
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def labelcolormap(N):
if N == 35: # cityscape
cmap = np.array([( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), ( 0, 0, 0), (111, 74, 0), ( 81, 0, 81),
(128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
(180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220, 0),
(107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255, 0, 0), ( 0, 0,142), ( 0, 0, 70),
( 0, 60,100), ( 0, 0, 90), ( 0, 0,110), ( 0, 80,100), ( 0, 0,230), (119, 11, 32), ( 0, 0,142)],
dtype=np.uint8)
else:
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r, g, b = 0, 0, 0
id = i
for j in range(7):
str_id = uint82bin(id)
r = r ^ (np.uint8(str_id[-1]) << (7-j))
g = g ^ (np.uint8(str_id[-2]) << (7-j))
b = b ^ (np.uint8(str_id[-3]) << (7-j))
id = id >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
class Colorize(object):
def __init__(self, n=35):
self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
| pix2pixHD-master | util/util.py |
import dominate
from dominate.tags import *
import os
class HTML:
def __init__(self, web_dir, title, refresh=0):
self.title = title
self.web_dir = web_dir
self.img_dir = os.path.join(self.web_dir, 'images')
if not os.path.exists(self.web_dir):
os.makedirs(self.web_dir)
if not os.path.exists(self.img_dir):
os.makedirs(self.img_dir)
self.doc = dominate.document(title=title)
if refresh > 0:
with self.doc.head:
meta(http_equiv="refresh", content=str(refresh))
def get_image_dir(self):
return self.img_dir
def add_header(self, str):
with self.doc:
h3(str)
def add_table(self, border=1):
self.t = table(border=border, style="table-layout: fixed;")
self.doc.add(self.t)
def add_images(self, ims, txts, links, width=512):
self.add_table()
with self.t:
with tr():
for im, txt, link in zip(ims, txts, links):
with td(style="word-wrap: break-word;", halign="center", valign="top"):
with p():
with a(href=os.path.join('images', link)):
img(style="width:%dpx" % (width), src=os.path.join('images', im))
br()
p(txt)
def save(self):
html_file = '%s/index.html' % self.web_dir
f = open(html_file, 'wt')
f.write(self.doc.render())
f.close()
if __name__ == '__main__':
html = HTML('web/', 'test_html')
html.add_header('hello world')
ims = []
txts = []
links = []
for n in range(4):
ims.append('image_%d.jpg' % n)
txts.append('text_%d' % n)
links.append('image_%d.jpg' % n)
html.add_images(ims, txts, links)
html.save()
| pix2pixHD-master | util/html.py |
pix2pixHD-master | util/__init__.py |
|
import numpy as np
import os
import ntpath
import time
from . import util
from . import html
import scipy.misc
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.tf_log = opt.tf_log
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
if self.tf_log:
import tensorflow as tf
self.tf = tf
self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs')
self.writer = tf.summary.FileWriter(self.log_dir)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, step):
if self.tf_log: # show images in tensorboard output
img_summaries = []
for label, image_numpy in visuals.items():
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
scipy.misc.toimage(image_numpy).save(s, format="jpeg")
# Create an Image object
img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1])
# Create a Summary value
img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum))
# Create and write Summary
summary = self.tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
if self.use_html: # save images to a html file
for label, image_numpy in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i))
util.save_image(image_numpy[i], img_path)
else:
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=30)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
if isinstance(image_numpy, list):
for i in range(len(image_numpy)):
img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i)
ims.append(img_path)
txts.append(label+str(i))
links.append(img_path)
else:
img_path = 'epoch%.3d_%s.jpg' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
if len(ims) < 10:
webpage.add_images(ims, txts, links, width=self.win_size)
else:
num = int(round(len(ims)/2.0))
webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size)
webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, errors, step):
if self.tf_log:
for tag, value in errors.items():
summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t):
message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
for k, v in errors.items():
if v != 0:
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
# save image to the disk
def save_images(self, webpage, visuals, image_path):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
image_name = '%s_%s.jpg' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(image_numpy, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
| pix2pixHD-master | util/visualizer.py |
import torch
def create_model(opt):
if opt.model == 'pix2pixHD':
from .pix2pixHD_model import Pix2PixHDModel, InferenceModel
if opt.isTrain:
model = Pix2PixHDModel()
else:
model = InferenceModel()
else:
from .ui_model import UIModel
model = UIModel()
model.initialize(opt)
if opt.verbose:
print("model [%s] was created" % (model.name()))
if opt.isTrain and len(opt.gpu_ids) and not opt.fp16:
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
return model
| pix2pixHD-master | models/models.py |
pix2pixHD-master | models/__init__.py |
|
import torch
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import os
from PIL import Image
import util.util as util
from .base_model import BaseModel
from . import networks
class UIModel(BaseModel):
def name(self):
return 'UIModel'
def initialize(self, opt):
assert(not opt.isTrain)
BaseModel.initialize(self, opt)
self.use_features = opt.instance_feat or opt.label_feat
netG_input_nc = opt.label_nc
if not opt.no_instance:
netG_input_nc += 1
if self.use_features:
netG_input_nc += opt.feat_num
self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)
self.load_network(self.netG, 'G', opt.which_epoch)
print('---------- Networks initialized -------------')
def toTensor(self, img, normalize=False):
tensor = torch.from_numpy(np.array(img, np.int32, copy=False))
tensor = tensor.view(1, img.size[1], img.size[0], len(img.mode))
tensor = tensor.transpose(1, 2).transpose(1, 3).contiguous()
if normalize:
return (tensor.float()/255.0 - 0.5) / 0.5
return tensor.float()
def load_image(self, label_path, inst_path, feat_path):
opt = self.opt
# read label map
label_img = Image.open(label_path)
if label_path.find('face') != -1:
label_img = label_img.convert('L')
ow, oh = label_img.size
w = opt.loadSize
h = int(w * oh / ow)
label_img = label_img.resize((w, h), Image.NEAREST)
label_map = self.toTensor(label_img)
# onehot vector input for label map
self.label_map = label_map.cuda()
oneHot_size = (1, opt.label_nc, h, w)
input_label = self.Tensor(torch.Size(oneHot_size)).zero_()
self.input_label = input_label.scatter_(1, label_map.long().cuda(), 1.0)
# read instance map
if not opt.no_instance:
inst_img = Image.open(inst_path)
inst_img = inst_img.resize((w, h), Image.NEAREST)
self.inst_map = self.toTensor(inst_img).cuda()
self.edge_map = self.get_edges(self.inst_map)
self.net_input = Variable(torch.cat((self.input_label, self.edge_map), dim=1), volatile=True)
else:
self.net_input = Variable(self.input_label, volatile=True)
self.features_clustered = np.load(feat_path).item()
self.object_map = self.inst_map if opt.instance_feat else self.label_map
object_np = self.object_map.cpu().numpy().astype(int)
self.feat_map = self.Tensor(1, opt.feat_num, h, w).zero_()
self.cluster_indices = np.zeros(self.opt.label_nc, np.uint8)
for i in np.unique(object_np):
label = i if i < 1000 else i//1000
if label in self.features_clustered:
feat = self.features_clustered[label]
np.random.seed(i+1)
cluster_idx = np.random.randint(0, feat.shape[0])
self.cluster_indices[label] = cluster_idx
idx = (self.object_map == i).nonzero()
self.set_features(idx, feat, cluster_idx)
self.net_input_original = self.net_input.clone()
self.label_map_original = self.label_map.clone()
self.feat_map_original = self.feat_map.clone()
if not opt.no_instance:
self.inst_map_original = self.inst_map.clone()
def reset(self):
self.net_input = self.net_input_prev = self.net_input_original.clone()
self.label_map = self.label_map_prev = self.label_map_original.clone()
self.feat_map = self.feat_map_prev = self.feat_map_original.clone()
if not self.opt.no_instance:
self.inst_map = self.inst_map_prev = self.inst_map_original.clone()
self.object_map = self.inst_map if self.opt.instance_feat else self.label_map
def undo(self):
self.net_input = self.net_input_prev
self.label_map = self.label_map_prev
self.feat_map = self.feat_map_prev
if not self.opt.no_instance:
self.inst_map = self.inst_map_prev
self.object_map = self.inst_map if self.opt.instance_feat else self.label_map
# get boundary map from instance map
def get_edges(self, t):
edge = torch.cuda.ByteTensor(t.size()).zero_()
edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,:,:-1] = edge[:,:,:,:-1] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,1:,:] = edge[:,:,1:,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
edge[:,:,:-1,:] = edge[:,:,:-1,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
return edge.float()
# change the label at the source position to the label at the target position
def change_labels(self, click_src, click_tgt):
y_src, x_src = click_src[0], click_src[1]
y_tgt, x_tgt = click_tgt[0], click_tgt[1]
label_src = int(self.label_map[0, 0, y_src, x_src])
inst_src = self.inst_map[0, 0, y_src, x_src]
label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt])
inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt]
idx_src = (self.inst_map == inst_src).nonzero()
# need to change 3 things: label map, instance map, and feature map
if idx_src.shape:
# backup current maps
self.backup_current_state()
# change both the label map and the network input
self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0
self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1
# update the instance map (and the network input)
if inst_tgt > 1000:
# if different instances have different ids, give the new object a new id
tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000)
inst_tgt = self.inst_map[tgt_indices].max() + 1
self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt
self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
# also copy the source features to the target position
idx_tgt = (self.inst_map == inst_tgt).nonzero()
if idx_tgt.shape:
self.copy_features(idx_src, idx_tgt[0,:])
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
# add strokes of target label in the image
def add_strokes(self, click_src, label_tgt, bw, save):
# get the region of the new strokes (bw is the brush width)
size = self.net_input.size()
h, w = size[2], size[3]
idx_src = torch.LongTensor(bw**2, 4).fill_(0)
for i in range(bw):
idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i))
for j in range(bw):
idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j))
idx_src = idx_src.cuda()
# again, need to update 3 things
if idx_src.shape:
# backup current maps
if save:
self.backup_current_state()
# update the label map (and the network input) in the stroke region
self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
for k in range(self.opt.label_nc):
self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1
# update the instance map (and the network input)
self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
# also update the features if available
if self.opt.instance_feat:
feat = self.features_clustered[label_tgt]
#np.random.seed(label_tgt+1)
#cluster_idx = np.random.randint(0, feat.shape[0])
cluster_idx = self.cluster_indices[label_tgt]
self.set_features(idx_src, feat, cluster_idx)
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
# add an object to the clicked position with selected style
def add_objects(self, click_src, label_tgt, mask, style_id=0):
y, x = click_src[0], click_src[1]
mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]
idx_src = torch.from_numpy(mask).cuda().nonzero()
idx_src[:,2] += y
idx_src[:,3] += x
# backup current maps
self.backup_current_state()
# update label map
self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
for k in range(self.opt.label_nc):
self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1
# update instance map
self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
# update feature map
self.set_features(idx_src, self.feat, style_id)
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
def single_forward(self, net_input, feat_map):
net_input = torch.cat((net_input, feat_map), dim=1)
fake_image = self.netG.forward(net_input)
if fake_image.size()[0] == 1:
return fake_image.data[0]
return fake_image.data
# generate all outputs for different styles
def style_forward(self, click_pt, style_id=-1):
if click_pt is None:
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
self.crop = None
self.mask = None
else:
instToChange = int(self.object_map[0, 0, click_pt[0], click_pt[1]])
self.instToChange = instToChange
label = instToChange if instToChange < 1000 else instToChange//1000
self.feat = self.features_clustered[label]
self.fake_image = []
self.mask = self.object_map == instToChange
idx = self.mask.nonzero()
self.get_crop_region(idx)
if idx.size():
if style_id == -1:
(min_y, min_x, max_y, max_x) = self.crop
### original
for cluster_idx in range(self.opt.multiple_output):
self.set_features(idx, self.feat, cluster_idx)
fake_image = self.single_forward(self.net_input, self.feat_map)
fake_image = util.tensor2im(fake_image[:,min_y:max_y,min_x:max_x])
self.fake_image.append(fake_image)
"""### To speed up previewing different style results, either crop or downsample the label maps
if instToChange > 1000:
(min_y, min_x, max_y, max_x) = self.crop
### crop
_, _, h, w = self.net_input.size()
offset = 512
y_start, x_start = max(0, min_y-offset), max(0, min_x-offset)
y_end, x_end = min(h, (max_y + offset)), min(w, (max_x + offset))
y_region = slice(y_start, y_start+(y_end-y_start)//16*16)
x_region = slice(x_start, x_start+(x_end-x_start)//16*16)
net_input = self.net_input[:,:,y_region,x_region]
for cluster_idx in range(self.opt.multiple_output):
self.set_features(idx, self.feat, cluster_idx)
fake_image = self.single_forward(net_input, self.feat_map[:,:,y_region,x_region])
fake_image = util.tensor2im(fake_image[:,min_y-y_start:max_y-y_start,min_x-x_start:max_x-x_start])
self.fake_image.append(fake_image)
else:
### downsample
(min_y, min_x, max_y, max_x) = [crop//2 for crop in self.crop]
net_input = self.net_input[:,:,::2,::2]
size = net_input.size()
net_input_batch = net_input.expand(self.opt.multiple_output, size[1], size[2], size[3])
for cluster_idx in range(self.opt.multiple_output):
self.set_features(idx, self.feat, cluster_idx)
feat_map = self.feat_map[:,:,::2,::2]
if cluster_idx == 0:
feat_map_batch = feat_map
else:
feat_map_batch = torch.cat((feat_map_batch, feat_map), dim=0)
fake_image_batch = self.single_forward(net_input_batch, feat_map_batch)
for i in range(self.opt.multiple_output):
self.fake_image.append(util.tensor2im(fake_image_batch[i,:,min_y:max_y,min_x:max_x]))"""
else:
self.set_features(idx, self.feat, style_id)
self.cluster_indices[label] = style_id
self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
def backup_current_state(self):
self.net_input_prev = self.net_input.clone()
self.label_map_prev = self.label_map.clone()
self.inst_map_prev = self.inst_map.clone()
self.feat_map_prev = self.feat_map.clone()
# crop the ROI and get the mask of the object
def get_crop_region(self, idx):
size = self.net_input.size()
h, w = size[2], size[3]
min_y, min_x = idx[:,2].min(), idx[:,3].min()
max_y, max_x = idx[:,2].max(), idx[:,3].max()
crop_min = 128
if max_y - min_y < crop_min:
min_y = max(0, (max_y + min_y) // 2 - crop_min // 2)
max_y = min(h-1, min_y + crop_min)
if max_x - min_x < crop_min:
min_x = max(0, (max_x + min_x) // 2 - crop_min // 2)
max_x = min(w-1, min_x + crop_min)
self.crop = (min_y, min_x, max_y, max_x)
self.mask = self.mask[:,:, min_y:max_y, min_x:max_x]
# update the feature map once a new object is added or the label is changed
def update_features(self, cluster_idx, mask=None, click_pt=None):
self.feat_map_prev = self.feat_map.clone()
# adding a new object
if mask is not None:
y, x = click_pt[0], click_pt[1]
mask = np.transpose(mask, (2,0,1))[np.newaxis,...]
idx = torch.from_numpy(mask).cuda().nonzero()
idx[:,2] += y
idx[:,3] += x
# changing the label of an existing object
else:
idx = (self.object_map == self.instToChange).nonzero()
# update feature map
self.set_features(idx, self.feat, cluster_idx)
# set the class features to the target feature
def set_features(self, idx, feat, cluster_idx):
for k in range(self.opt.feat_num):
self.feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k]
# copy the features at the target position to the source position
def copy_features(self, idx_src, idx_tgt):
for k in range(self.opt.feat_num):
val = self.feat_map[idx_tgt[0], idx_tgt[1] + k, idx_tgt[2], idx_tgt[3]]
self.feat_map[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = val
def get_current_visuals(self, getLabel=False):
mask = self.mask
if self.mask is not None:
mask = np.transpose(self.mask[0].cpu().float().numpy(), (1,2,0)).astype(np.uint8)
dict_list = [('fake_image', self.fake_image), ('mask', mask)]
if getLabel: # only output label map if needed to save bandwidth
label = util.tensor2label(self.net_input.data[0], self.opt.label_nc)
dict_list += [('label', label)]
return OrderedDict(dict_list) | pix2pixHD-master | models/ui_model.py |
import os
import torch
import sys
class BaseModel(torch.nn.Module):
def name(self):
return 'BaseModel'
def initialize(self, opt):
self.opt = opt
self.gpu_ids = opt.gpu_ids
self.isTrain = opt.isTrain
self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
def set_input(self, input):
self.input = input
def forward(self):
pass
# used in test time, no backprop
def test(self):
pass
def get_image_paths(self):
pass
def optimize_parameters(self):
pass
def get_current_visuals(self):
return self.input
def get_current_errors(self):
return {}
def save(self, label):
pass
# helper saving function that can be used by subclasses
def save_network(self, network, network_label, epoch_label, gpu_ids):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
save_path = os.path.join(self.save_dir, save_filename)
torch.save(network.cpu().state_dict(), save_path)
if len(gpu_ids) and torch.cuda.is_available():
network.cuda()
# helper loading function that can be used by subclasses
def load_network(self, network, network_label, epoch_label, save_dir=''):
save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
if not save_dir:
save_dir = self.save_dir
save_path = os.path.join(save_dir, save_filename)
if not os.path.isfile(save_path):
print('%s not exists yet!' % save_path)
if network_label == 'G':
raise('Generator must exist!')
else:
#network.load_state_dict(torch.load(save_path))
try:
network.load_state_dict(torch.load(save_path))
except:
pretrained_dict = torch.load(save_path)
model_dict = network.state_dict()
try:
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
network.load_state_dict(pretrained_dict)
if self.opt.verbose:
print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label)
except:
print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label)
for k, v in pretrained_dict.items():
if v.size() == model_dict[k].size():
model_dict[k] = v
if sys.version_info >= (3,0):
not_initialized = set()
else:
from sets import Set
not_initialized = Set()
for k, v in model_dict.items():
if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
not_initialized.add(k.split('.')[0])
print(sorted(not_initialized))
network.load_state_dict(model_dict)
def update_learning_rate():
pass
| pix2pixHD-master | models/base_model.py |
import torch
import torch.nn as nn
import functools
from torch.autograd import Variable
import numpy as np
###############################################################################
# Functions
###############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'global':
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
elif netG == 'local':
netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global,
n_local_enhancers, n_blocks_local, norm_layer)
elif netG == 'encoder':
netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)
else:
raise('generator not implemented!')
print(netG)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netG.cuda(gpu_ids[0])
netG.apply(weights_init)
return netG
def define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]):
norm_layer = get_norm_layer(norm_type=norm)
netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)
print(netD)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
netD.cuda(gpu_ids[0])
netD.apply(weights_init)
return netD
def print_network(net):
if isinstance(net, list):
net = net[0]
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
##############################################################################
# Losses
##############################################################################
class GANLoss(nn.Module):
def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_var = None
self.fake_label_var = None
self.Tensor = tensor
if use_lsgan:
self.loss = nn.MSELoss()
else:
self.loss = nn.BCELoss()
def get_target_tensor(self, input, target_is_real):
target_tensor = None
if target_is_real:
create_label = ((self.real_label_var is None) or
(self.real_label_var.numel() != input.numel()))
if create_label:
real_tensor = self.Tensor(input.size()).fill_(self.real_label)
self.real_label_var = Variable(real_tensor, requires_grad=False)
target_tensor = self.real_label_var
else:
create_label = ((self.fake_label_var is None) or
(self.fake_label_var.numel() != input.numel()))
if create_label:
fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
self.fake_label_var = Variable(fake_tensor, requires_grad=False)
target_tensor = self.fake_label_var
return target_tensor
def __call__(self, input, target_is_real):
if isinstance(input[0], list):
loss = 0
for input_i in input:
pred = input_i[-1]
target_tensor = self.get_target_tensor(pred, target_is_real)
loss += self.loss(pred, target_tensor)
return loss
else:
target_tensor = self.get_target_tensor(input[-1], target_is_real)
return self.loss(input[-1], target_tensor)
class VGGLoss(nn.Module):
def __init__(self, gpu_ids):
super(VGGLoss, self).__init__()
self.vgg = Vgg19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
def forward(self, x, y):
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
##############################################################################
# Generator
##############################################################################
class LocalEnhancer(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9,
n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):
super(LocalEnhancer, self).__init__()
self.n_local_enhancers = n_local_enhancers
###### global generator model #####
ngf_global = ngf * (2**n_local_enhancers)
model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model
model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers
self.model = nn.Sequential(*model_global)
###### local enhancer layers #####
for n in range(1, n_local_enhancers+1):
### downsample
ngf_global = ngf * (2**(n_local_enhancers-n))
model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0),
norm_layer(ngf_global), nn.ReLU(True),
nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf_global * 2), nn.ReLU(True)]
### residual blocks
model_upsample = []
for i in range(n_blocks_local):
model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
### upsample
model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(ngf_global), nn.ReLU(True)]
### final convolution
if n == n_local_enhancers:
model_upsample += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def forward(self, input):
### create input pyramid
input_downsampled = [input]
for i in range(self.n_local_enhancers):
input_downsampled.append(self.downsample(input_downsampled[-1]))
### output at coarest level
output_prev = self.model(input_downsampled[-1])
### build up one layer at a time
for n_local_enhancers in range(1, self.n_local_enhancers+1):
model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')
input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]
output_prev = model_upsample(model_downsample(input_i) + output_prev)
return output_prev
class GlobalGenerator(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d,
padding_type='reflect'):
assert(n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = nn.ReLU(True)
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class Encoder(nn.Module):
def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
super(Encoder, self).__init__()
self.output_nc = output_nc
model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf), nn.ReLU(True)]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, inst):
outputs = self.model(input)
# instance-wise average pooling
outputs_mean = outputs.clone()
inst_list = np.unique(inst.cpu().numpy().astype(int))
for i in inst_list:
for b in range(input.size()[0]):
indices = (inst[b:b+1] == int(i)).nonzero() # n x 4
for j in range(self.output_nc):
output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]
mean_feat = torch.mean(output_ins).expand_as(output_ins)
outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat
return outputs_mean
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
from torchvision import models
class Vgg19(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
for x in range(2):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(2, 7):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(7, 12):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(12, 21):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(21, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h_relu1 = self.slice1(X)
h_relu2 = self.slice2(h_relu1)
h_relu3 = self.slice3(h_relu2)
h_relu4 = self.slice4(h_relu3)
h_relu5 = self.slice5(h_relu4)
out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
return out
| pix2pixHD-master | models/networks.py |
import numpy as np
import torch
import os
from torch.autograd import Variable
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
class Pix2PixHDModel(BaseModel):
def name(self):
return 'Pix2PixHDModel'
def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss):
flags = (True, use_gan_feat_loss, use_vgg_loss, True, True)
def loss_filter(g_gan, g_gan_feat, g_vgg, d_real, d_fake):
return [l for (l,f) in zip((g_gan,g_gan_feat,g_vgg,d_real,d_fake),flags) if f]
return loss_filter
def initialize(self, opt):
BaseModel.initialize(self, opt)
if opt.resize_or_crop != 'none' or not opt.isTrain: # when training at full res this causes OOM
torch.backends.cudnn.benchmark = True
self.isTrain = opt.isTrain
self.use_features = opt.instance_feat or opt.label_feat
self.gen_features = self.use_features and not self.opt.load_features
input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc
##### define networks
# Generator network
netG_input_nc = input_nc
if not opt.no_instance:
netG_input_nc += 1
if self.use_features:
netG_input_nc += opt.feat_num
self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)
# Discriminator network
if self.isTrain:
use_sigmoid = opt.no_lsgan
netD_input_nc = input_nc + opt.output_nc
if not opt.no_instance:
netD_input_nc += 1
self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid,
opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
### Encoder network
if self.gen_features:
self.netE = networks.define_G(opt.output_nc, opt.feat_num, opt.nef, 'encoder',
opt.n_downsample_E, norm=opt.norm, gpu_ids=self.gpu_ids)
if self.opt.verbose:
print('---------- Networks initialized -------------')
# load networks
if not self.isTrain or opt.continue_train or opt.load_pretrain:
pretrained_path = '' if not self.isTrain else opt.load_pretrain
self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
if self.isTrain:
self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path)
if self.gen_features:
self.load_network(self.netE, 'E', opt.which_epoch, pretrained_path)
# set loss functions and optimizers
if self.isTrain:
if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
raise NotImplementedError("Fake Pool Not Implemented for MultiGPU")
self.fake_pool = ImagePool(opt.pool_size)
self.old_lr = opt.lr
# define loss functions
self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss)
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionFeat = torch.nn.L1Loss()
if not opt.no_vgg_loss:
self.criterionVGG = networks.VGGLoss(self.gpu_ids)
# Names so we can breakout loss
self.loss_names = self.loss_filter('G_GAN','G_GAN_Feat','G_VGG','D_real', 'D_fake')
# initialize optimizers
# optimizer G
if opt.niter_fix_global > 0:
import sys
if sys.version_info >= (3,0):
finetune_list = set()
else:
from sets import Set
finetune_list = Set()
params_dict = dict(self.netG.named_parameters())
params = []
for key, value in params_dict.items():
if key.startswith('model' + str(opt.n_local_enhancers)):
params += [value]
finetune_list.add(key.split('.')[0])
print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global)
print('The layers that are finetuned are ', sorted(finetune_list))
else:
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
# optimizer D
params = list(self.netD.parameters())
self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
def encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if self.opt.label_nc == 0:
input_label = label_map.data.cuda()
else:
# create one-hot vector for label map
size = label_map.size()
oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
if self.opt.data_type == 16:
input_label = input_label.half()
# get edges from instance map
if not self.opt.no_instance:
inst_map = inst_map.data.cuda()
edge_map = self.get_edges(inst_map)
input_label = torch.cat((input_label, edge_map), dim=1)
input_label = Variable(input_label, volatile=infer)
# real images for training
if real_image is not None:
real_image = Variable(real_image.data.cuda())
# instance map for feature encoding
if self.use_features:
# get precomputed feature maps
if self.opt.load_features:
feat_map = Variable(feat_map.data.cuda())
if self.opt.label_feat:
inst_map = label_map.cuda()
return input_label, inst_map, real_image, feat_map
def discriminate(self, input_label, test_image, use_pool=False):
input_concat = torch.cat((input_label, test_image.detach()), dim=1)
if use_pool:
fake_query = self.fake_pool.query(input_concat)
return self.netD.forward(fake_query)
else:
return self.netD.forward(input_concat)
def forward(self, label, inst, image, feat, infer=False):
# Encode Inputs
input_label, inst_map, real_image, feat_map = self.encode_input(label, inst, image, feat)
# Fake Generation
if self.use_features:
if not self.opt.load_features:
feat_map = self.netE.forward(real_image, inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
fake_image = self.netG.forward(input_concat)
# Fake Detection and Loss
pred_fake_pool = self.discriminate(input_label, fake_image, use_pool=True)
loss_D_fake = self.criterionGAN(pred_fake_pool, False)
# Real Detection and Loss
pred_real = self.discriminate(input_label, real_image)
loss_D_real = self.criterionGAN(pred_real, True)
# GAN loss (Fake Passability Loss)
pred_fake = self.netD.forward(torch.cat((input_label, fake_image), dim=1))
loss_G_GAN = self.criterionGAN(pred_fake, True)
# GAN feature matching loss
loss_G_GAN_Feat = 0
if not self.opt.no_ganFeat_loss:
feat_weights = 4.0 / (self.opt.n_layers_D + 1)
D_weights = 1.0 / self.opt.num_D
for i in range(self.opt.num_D):
for j in range(len(pred_fake[i])-1):
loss_G_GAN_Feat += D_weights * feat_weights * \
self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat
# VGG feature matching loss
loss_G_VGG = 0
if not self.opt.no_vgg_loss:
loss_G_VGG = self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat
# Only return the fake_B image if necessary to save BW
return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_D_real, loss_D_fake ), None if not infer else fake_image ]
def inference(self, label, inst, image=None):
# Encode Inputs
image = Variable(image) if image is not None else None
input_label, inst_map, real_image, _ = self.encode_input(Variable(label), Variable(inst), image, infer=True)
# Fake Generation
if self.use_features:
if self.opt.use_encoded_image:
# encode the real image to get feature map
feat_map = self.netE.forward(real_image, inst_map)
else:
# sample clusters from precomputed features
feat_map = self.sample_features(inst_map)
input_concat = torch.cat((input_label, feat_map), dim=1)
else:
input_concat = input_label
if torch.__version__.startswith('0.4'):
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
else:
fake_image = self.netG.forward(input_concat)
return fake_image
def sample_features(self, inst):
# read precomputed feature clusters
cluster_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, self.opt.cluster_path)
features_clustered = np.load(cluster_path, encoding='latin1').item()
# randomly sample from the feature clusters
inst_np = inst.cpu().numpy().astype(int)
feat_map = self.Tensor(inst.size()[0], self.opt.feat_num, inst.size()[2], inst.size()[3])
for i in np.unique(inst_np):
label = i if i < 1000 else i//1000
if label in features_clustered:
feat = features_clustered[label]
cluster_idx = np.random.randint(0, feat.shape[0])
idx = (inst == int(i)).nonzero()
for k in range(self.opt.feat_num):
feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k]
if self.opt.data_type==16:
feat_map = feat_map.half()
return feat_map
def encode_features(self, image, inst):
image = Variable(image.cuda(), volatile=True)
feat_num = self.opt.feat_num
h, w = inst.size()[2], inst.size()[3]
block_num = 32
feat_map = self.netE.forward(image, inst.cuda())
inst_np = inst.cpu().numpy().astype(int)
feature = {}
for i in range(self.opt.label_nc):
feature[i] = np.zeros((0, feat_num+1))
for i in np.unique(inst_np):
label = i if i < 1000 else i//1000
idx = (inst == int(i)).nonzero()
num = idx.size()[0]
idx = idx[num//2,:]
val = np.zeros((1, feat_num+1))
for k in range(feat_num):
val[0, k] = feat_map[idx[0], idx[1] + k, idx[2], idx[3]].data[0]
val[0, feat_num] = float(num) / (h * w // block_num)
feature[label] = np.append(feature[label], val, axis=0)
return feature
def get_edges(self, t):
edge = torch.cuda.ByteTensor(t.size()).zero_()
edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,:,:-1] = edge[:,:,:,:-1] | (t[:,:,:,1:] != t[:,:,:,:-1])
edge[:,:,1:,:] = edge[:,:,1:,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
edge[:,:,:-1,:] = edge[:,:,:-1,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
if self.opt.data_type==16:
return edge.half()
else:
return edge.float()
def save(self, which_epoch):
self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)
self.save_network(self.netD, 'D', which_epoch, self.gpu_ids)
if self.gen_features:
self.save_network(self.netE, 'E', which_epoch, self.gpu_ids)
def update_fixed_params(self):
# after fixing the global generator for a number of iterations, also start finetuning it
params = list(self.netG.parameters())
if self.gen_features:
params += list(self.netE.parameters())
self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
if self.opt.verbose:
print('------------ Now also finetuning global generator -----------')
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.niter_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
if self.opt.verbose:
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
class InferenceModel(Pix2PixHDModel):
def forward(self, inp):
label, inst = inp
return self.inference(label, inst)
| pix2pixHD-master | models/pix2pixHD_model.py |
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
import numpy as np
import random
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
def get_params(opt, size):
w, h = size
new_h = h
new_w = w
if opt.resize_or_crop == 'resize_and_crop':
new_h = new_w = opt.loadSize
elif opt.resize_or_crop == 'scale_width_and_crop':
new_w = opt.loadSize
new_h = opt.loadSize * h // w
x = random.randint(0, np.maximum(0, new_w - opt.fineSize))
y = random.randint(0, np.maximum(0, new_h - opt.fineSize))
flip = random.random() > 0.5
return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params, method=Image.BICUBIC, normalize=True):
transform_list = []
if 'resize' in opt.resize_or_crop:
osize = [opt.loadSize, opt.loadSize]
transform_list.append(transforms.Scale(osize, method))
elif 'scale_width' in opt.resize_or_crop:
transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method)))
if 'crop' in opt.resize_or_crop:
transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
if opt.resize_or_crop == 'none':
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
if opt.isTrain and not opt.no_flip:
transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def normalize():
return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC):
ow, oh = img.size
if (ow == target_width):
return img
w = target_width
h = int(target_width * oh / ow)
return img.resize((w, h), method)
def __crop(img, pos, size):
ow, oh = img.size
x1, y1 = pos
tw = th = size
if (ow > tw or oh > th):
return img.crop((x1, y1, x1 + tw, y1 + th))
return img
def __flip(img, flip):
if flip:
return img.transpose(Image.FLIP_LEFT_RIGHT)
return img
| pix2pixHD-master | data/base_dataset.py |
def CreateDataLoader(opt):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
| pix2pixHD-master | data/data_loader.py |
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
| pix2pixHD-master | data/base_data_loader.py |
pix2pixHD-master | data/__init__.py |
|
###############################################################################
# Code from
# https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py
# Modified the original code so that it also loads images from the current
# directory as well as the subdirectories
###############################################################################
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " +
",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| pix2pixHD-master | data/image_folder.py |
import os.path
from data.base_dataset import BaseDataset, get_params, get_transform, normalize
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
### input A (label maps)
dir_A = '_A' if self.opt.label_nc == 0 else '_label'
self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A)
self.A_paths = sorted(make_dataset(self.dir_A))
### input B (real images)
if opt.isTrain or opt.use_encoded_image:
dir_B = '_B' if self.opt.label_nc == 0 else '_img'
self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B)
self.B_paths = sorted(make_dataset(self.dir_B))
### instance maps
if not opt.no_instance:
self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
self.inst_paths = sorted(make_dataset(self.dir_inst))
### load precomputed instance-wise encoded features
if opt.load_features:
self.dir_feat = os.path.join(opt.dataroot, opt.phase + '_feat')
print('----------- loading features from %s ----------' % self.dir_feat)
self.feat_paths = sorted(make_dataset(self.dir_feat))
self.dataset_size = len(self.A_paths)
def __getitem__(self, index):
### input A (label maps)
A_path = self.A_paths[index]
A = Image.open(A_path)
params = get_params(self.opt, A.size)
if self.opt.label_nc == 0:
transform_A = get_transform(self.opt, params)
A_tensor = transform_A(A.convert('RGB'))
else:
transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
A_tensor = transform_A(A) * 255.0
B_tensor = inst_tensor = feat_tensor = 0
### input B (real images)
if self.opt.isTrain or self.opt.use_encoded_image:
B_path = self.B_paths[index]
B = Image.open(B_path).convert('RGB')
transform_B = get_transform(self.opt, params)
B_tensor = transform_B(B)
### if using instance maps
if not self.opt.no_instance:
inst_path = self.inst_paths[index]
inst = Image.open(inst_path)
inst_tensor = transform_A(inst)
if self.opt.load_features:
feat_path = self.feat_paths[index]
feat = Image.open(feat_path).convert('RGB')
norm = normalize()
feat_tensor = norm(transform_A(feat))
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': A_path}
return input_dict
def __len__(self):
return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize
def name(self):
return 'AlignedDataset' | pix2pixHD-master | data/aligned_dataset.py |
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
from data.aligned_dataset import AlignedDataset
dataset = AlignedDataset()
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
def load_data(self):
return self.dataloader
def __len__(self):
return min(len(self.dataset), self.opt.max_dataset_size)
| pix2pixHD-master | data/custom_dataset_data_loader.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), 'util'))
| otk-pyoptix-master | test/conftest.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import cupy as cp
import array
import pytest
import sample_ptx
import tutil
if tutil.optix_version_gte( (7,2) ):
class TestModuleCompileBoundValueEntry:
def test_compile_bound_value_entry( self ):
bound_value_entry_default = optix.ModuleCompileBoundValueEntry(
)
bound_value = array.array( 'f', [0.1, 0.2, 0.3] )
bound_value_entry = optix.ModuleCompileBoundValueEntry(
pipelineParamOffsetInBytes = 4,
boundValue = bound_value,
annotation = "my_bound_value"
)
assert bound_value_entry.pipelineParamOffsetInBytes == 4
with pytest.raises( AttributeError ):
print( bound_value_entry.boundValue )
assert bound_value_entry.annotation == "my_bound_value"
bound_value_entry.pipelineParamOffsetInBytes = 8
assert bound_value_entry.pipelineParamOffsetInBytes == 8
bound_value_entry.annotation = "new_bound_value"
assert bound_value_entry.annotation == "new_bound_value"
if tutil.optix_version_gte( (7,4) ):
class TestModuleCompilePayloadType:
def test_compile_payload_type( self ):
payload_semantics = [ 0, 1 ]
payload_type_default = optix.PayloadType(
)
payload_type_default.payloadSemantics = payload_semantics
payload_type = optix.PayloadType(
payloadSemantics = payload_semantics
)
class TestModule:
if tutil.optix_version_gte( (7,2) ):
def test_options( self ):
mod_opts = optix.ModuleCompileOptions(
maxRegisterCount = 64,
optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1,
debugLevel = tutil.default_debug_level(),
boundValues = []
)
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == tutil.default_debug_level()
# optix.ModuleCompileOptions.boundValues is write-only
with pytest.raises( AttributeError ):
print( mod_opts.boundValues )
mod_opts = optix.ModuleCompileOptions()
assert mod_opts.maxRegisterCount == optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_DEFAULT
assert mod_opts.debugLevel == tutil.default_debug_level()
mod_opts.maxRegisterCount = 64
mod_opts.optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1
mod_opts.debugLevel = tutil.default_debug_level()
mod_opts.boundValues = [ optix.ModuleCompileBoundValueEntry() ];
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == tutil.default_debug_level()
elif tutil.optix_version_gte( (7,1) ):
def test_options( self ):
mod_opts = optix.ModuleCompileOptions(
maxRegisterCount = 64,
optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1,
debugLevel = tutil.default_debug_level()
)
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == tutil.default_debug_level()
mod_opts = optix.ModuleCompileOptions()
assert mod_opts.maxRegisterCount == optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_DEFAULT
assert mod_opts.debugLevel == optix.COMPILE_DEBUG_LEVEL_DEFAULT
mod_opts.maxRegisterCount = 64
mod_opts.optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1
mod_opts.debugLevel = tutil.default_debug_level()
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == tutil.default_debug_level()
else:
def test_options( self ):
mod_opts = optix.ModuleCompileOptions(
maxRegisterCount = 64,
optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1,
debugLevel = tutil.default_debug_level()
)
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == tutil.default_debug_level()
mod_opts = optix.ModuleCompileOptions()
assert mod_opts.maxRegisterCount == optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_DEFAULT
assert mod_opts.debugLevel == tutil.default_debug_level()
mod_opts.maxRegisterCount = 64
mod_opts.optLevel = optix.COMPILE_OPTIMIZATION_LEVEL_1
mod_opts.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
assert mod_opts.maxRegisterCount == 64
assert mod_opts.optLevel == optix.COMPILE_OPTIMIZATION_LEVEL_1
assert mod_opts.debugLevel == optix.COMPILE_DEBUG_LEVEL_FULL
def test_create_destroy( self ):
ctx = optix.deviceContextCreate(0, optix.DeviceContextOptions())
module_opts = optix.ModuleCompileOptions()
pipeline_opts = optix.PipelineCompileOptions()
if tutil.optix_version_gte( (7,7) ):
mod, log = ctx.moduleCreate(
module_opts,
pipeline_opts,
sample_ptx.hello_ptx,
)
else:
mod, log = ctx.moduleCreateFromPTX(
module_opts,
pipeline_opts,
sample_ptx.hello_ptx,
)
assert type(mod) is optix.Module
assert type(log) is str
mod.destroy()
ctx.destroy()
if tutil.optix_version_gte( (7,4) ):
def test_payload_semantics_use( self ):
ctx = optix.deviceContextCreate(0, optix.DeviceContextOptions())
module_opts = optix.ModuleCompileOptions()
pipeline_opts = optix.PipelineCompileOptions()
pipeline_opts.numPayloadValues = 3
payload_sem = (
optix.PAYLOAD_SEMANTICS_TRACE_CALLER_READ_WRITE |
optix.PAYLOAD_SEMANTICS_CH_READ_WRITE |
optix.PAYLOAD_SEMANTICS_MS_READ_WRITE |
optix.PAYLOAD_SEMANTICS_AH_READ_WRITE |
optix.PAYLOAD_SEMANTICS_IS_READ_WRITE
)
payload_type = optix.PayloadType( [ payload_sem, payload_sem, payload_sem ] )
module_opts.payloadTypes = [ payload_type ]
if tutil.optix_version_gte( (7,7 ) ):
mod, log = ctx.moduleCreate(
module_opts,
pipeline_opts,
sample_ptx.triangle_ptx,
)
else:
mod, log = ctx.moduleCreateFromPTX(
module_opts,
pipeline_opts,
sample_ptx.triangle_ptx,
)
mod.destroy()
ctx.destroy()
def test_bound_values_use( self ):
ctx = optix.deviceContextCreate(0, optix.DeviceContextOptions())
module_opts = optix.ModuleCompileOptions()
pipeline_opts = optix.PipelineCompileOptions()
bound_value = array.array( 'f', [0.1, 0.2, 0.3] )
bound_value_entry = optix.ModuleCompileBoundValueEntry(
pipelineParamOffsetInBytes = 4,
boundValue = bound_value,
annotation = "my_bound_value"
)
module_opts.boundValues = [ bound_value_entry ]
if tutil.optix_version_gte( (7,7) ):
mod, log = ctx.moduleCreate(
module_opts,
pipeline_opts,
sample_ptx.hello_ptx,
)
else:
mod, log = ctx.moduleCreateFromPTX(
module_opts,
pipeline_opts,
sample_ptx.hello_ptx,
)
mod.destroy()
ctx.destroy()
if tutil.optix_version_gte( (7,1) ):
def test_builtin_is_module_get( self ):
ctx = optix.deviceContextCreate(0, optix.DeviceContextOptions())
module_opts = optix.ModuleCompileOptions()
pipeline_opts = optix.PipelineCompileOptions()
builtin_is_opts = optix.BuiltinISOptions()
builtin_is_opts.builtinISModuleType = optix.PRIMITIVE_TYPE_TRIANGLE
is_mod = ctx.builtinISModuleGet(
module_opts,
pipeline_opts,
builtin_is_opts
)
assert type( is_mod ) is optix.Module
is_mod.destroy()
ctx.destroy()
| otk-pyoptix-master | test/test_module.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix as ox
import cupy as cp
import array
import pytest
import sample_ptx
import tutil
if tutil.optix_version_gte( (7,4) ):
class TestProgramGroupOptions:
def test_constructor(self):
pgo = ox.ProgramGroupOptions()
assert type(pgo) is ox.ProgramGroupOptions
class TestProgramGroupBase:
def setup_method(self):
self.ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
if tutil.optix_version_gte( (7,7) ):
self.mod, log = self.ctx.moduleCreate(ox.ModuleCompileOptions(),
ox.PipelineCompileOptions(),
sample_ptx.hello_ptx)
else:
self.mod, log = self.ctx.moduleCreateFromPTX(ox.ModuleCompileOptions(),
ox.PipelineCompileOptions(),
sample_ptx.hello_ptx)
def teardown_method(self):
self.mod.destroy()
self.ctx.destroy()
class TestProgramGroupDescriptor(TestProgramGroupBase):
def test_constructor(self):
pgd = ox.ProgramGroupDesc(raygenModule = self.mod,
raygenEntryFunctionName = "__raygen__hello")
assert pgd.raygenModule == self.mod
assert pgd.raygenEntryFunctionName == "__raygen__hello"
def test_attributes(self):
pgd = ox.ProgramGroupDesc()
pgd.raygenModule = self.mod
pgd.raygenEntryFunctionName = "__raygen__hello"
assert pgd.raygenModule == self.mod
assert pgd.raygenEntryFunctionName == "__raygen__hello"
class TestProgramGroup(TestProgramGroupBase):
def test_create_raygen(self):
prog_group_desc = ox.ProgramGroupDesc()
prog_group_desc.raygenModule = self.mod
prog_group_desc.raygenEntryFunctionName = "__raygen__hello"
prog_groups = None
log = None
if tutil.optix_version_gte( (7,4) ):
prog_group_opts = ox.ProgramGroupOptions()
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc], prog_group_opts)
else:
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc] )
assert len(prog_groups) == 1
assert type(prog_groups[0]) is ox.ProgramGroup
prog_groups[0].destroy()
def test_create_miss(self):
prog_group_desc = ox.ProgramGroupDesc()
prog_group_desc.missModule = self.mod
prog_group_desc.missEntryFunctionName = "__miss__noop"
prog_groups = None
log = None
if tutil.optix_version_gte( (7,4) ):
prog_group_opts = ox.ProgramGroupOptions()
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc], prog_group_opts)
else:
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc] )
assert len(prog_groups) == 1
assert type(prog_groups[0]) is ox.ProgramGroup
prog_groups[0].destroy()
def test_create_callables(self):
prog_group_desc = ox.ProgramGroupDesc()
prog_group_desc.callablesModuleDC = self.mod
prog_group_desc.callablesModuleCC = self.mod
prog_group_desc.callablesEntryFunctionNameCC = "__continuation_callable__noop"
prog_group_desc.callablesEntryFunctionNameDC = "__direct_callable__noop"
prog_groups = None
log = None
if tutil.optix_version_gte( (7,4) ):
prog_group_opts = ox.ProgramGroupOptions()
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc], prog_group_opts)
else:
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc] )
assert len(prog_groups) == 1
assert type(prog_groups[0]) is ox.ProgramGroup
prog_groups[0].destroy()
def test_create_hitgroup(self):
prog_group_desc = ox.ProgramGroupDesc()
prog_group_desc.hitgroupModuleCH = self.mod
prog_group_desc.hitgroupModuleAH = self.mod
prog_group_desc.hitgroupModuleIS = self.mod
prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__noop"
prog_group_desc.hitgroupEntryFunctionNameAH = "__anyhit__noop"
prog_group_desc.hitgroupEntryFunctionNameIS = "__intersection__noop"
prog_groups = None
log = None
if tutil.optix_version_gte( (7,4) ):
prog_group_opts = ox.ProgramGroupOptions()
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc], prog_group_opts)
else:
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc] )
assert len(prog_groups) == 1
assert type(prog_groups[0]) is ox.ProgramGroup
prog_groups[0].destroy()
def create_prog_group(self):
prog_group_desc = ox.ProgramGroupDesc()
prog_group_desc.raygenModule = self.mod
prog_group_desc.raygenEntryFunctionName = "__raygen__hello"
prog_groups = None
log = None
if tutil.optix_version_gte( (7,4) ):
prog_group_opts = ox.ProgramGroupOptions()
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc], prog_group_opts)
else:
prog_groups, log = self.ctx.programGroupCreate([prog_group_desc] )
return prog_groups[0]
def test_get_stack_size(self):
if tutil.optix_version_gte( (7,6) ):
print("TODO - newer version requires pipeline arg")
else:
prog_group = self.create_prog_group()
stack_size = prog_group.getStackSize()
assert type(stack_size) is ox.StackSizes
| otk-pyoptix-master | test/test_program_group.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
hello_ptx = '''
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-29373293
// Cuda compilation tools, release 11.2, V11.2.67
// Based on NVVM 7.0.1
//
.version 7.2
.target sm_60
.address_size 64
// .globl __raygen__hello
.visible .const .align 8 .b8 params[16];
.visible .entry __raygen__hello()
{
.reg .pred %p<4>;
.reg .b16 %rs<5>;
.reg .f32 %f<39>;
.reg .b32 %r<12>;
.reg .b64 %rd<6>;
.loc 1 39 0
Lfunc_begin0:
.loc 1 39 0
.loc 1 41 26
.loc 2 5675 5, function_name Linfo_string0, inlined_at 1 41 26
// begin inline asm
call (%r1), _optix_get_launch_index_x, ();
// end inline asm
.loc 2 5676 5, function_name Linfo_string0, inlined_at 1 41 26
// begin inline asm
call (%r2), _optix_get_launch_index_y, ();
// end inline asm
Ltmp0:
.loc 1 42 39
.loc 2 5703 5, function_name Linfo_string1, inlined_at 1 42 39
// begin inline asm
call (%rd1), _optix_get_sbt_data_ptr_64, ();
// end inline asm
Ltmp1:
.loc 1 43 5
ld.const.u64 %rd2, [params];
cvta.to.global.u64 %rd3, %rd2;
ld.const.u32 %r4, [params+8];
mad.lo.s32 %r5, %r4, %r2, %r1;
ld.f32 %f1, [%rd1];
ld.f32 %f2, [%rd1+4];
ld.f32 %f3, [%rd1+8];
.loc 3 121 22
mov.f32 %f4, 0f3F800000;
min.ftz.f32 %f5, %f1, %f4;
.loc 3 121 12
mov.f32 %f6, 0f00000000;
max.ftz.f32 %f7, %f6, %f5;
.loc 3 121 22
min.ftz.f32 %f8, %f2, %f4;
.loc 3 121 12
max.ftz.f32 %f9, %f6, %f8;
.loc 3 121 22
min.ftz.f32 %f10, %f3, %f4;
.loc 3 121 12
max.ftz.f32 %f11, %f6, %f10;
.loc 4 38 33
lg2.approx.ftz.f32 %f12, %f7;
mul.ftz.f32 %f13, %f12, 0f3ED55555;
ex2.approx.ftz.f32 %f14, %f13;
.loc 4 38 56
lg2.approx.ftz.f32 %f15, %f9;
mul.ftz.f32 %f16, %f15, 0f3ED55555;
ex2.approx.ftz.f32 %f17, %f16;
.loc 4 38 79
lg2.approx.ftz.f32 %f18, %f11;
mul.ftz.f32 %f19, %f18, 0f3ED55555;
ex2.approx.ftz.f32 %f20, %f19;
setp.lt.ftz.f32 %p1, %f7, 0f3B4D2E1C;
mul.ftz.f32 %f21, %f7, 0f414EB852;
fma.rn.ftz.f32 %f22, %f14, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f23, %f21, %f22, %p1;
setp.lt.ftz.f32 %p2, %f9, 0f3B4D2E1C;
mul.ftz.f32 %f24, %f9, 0f414EB852;
fma.rn.ftz.f32 %f25, %f17, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f26, %f24, %f25, %p2;
setp.lt.ftz.f32 %p3, %f11, 0f3B4D2E1C;
mul.ftz.f32 %f27, %f11, 0f414EB852;
fma.rn.ftz.f32 %f28, %f20, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f29, %f27, %f28, %p3;
Ltmp2:
.loc 4 61 25
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 25
min.ftz.f32 %f30, %f23, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 25
max.ftz.f32 %f31, %f6, %f30;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 25
mul.ftz.f32 %f32, %f31, 0f43800000;
cvt.rzi.ftz.u32.f32 %r6, %f32;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 25
min.u32 %r7, %r6, 255;
Ltmp3:
.loc 4 61 58
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 58
min.ftz.f32 %f33, %f26, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 58
max.ftz.f32 %f34, %f6, %f33;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 58
mul.ftz.f32 %f35, %f34, 0f43800000;
cvt.rzi.ftz.u32.f32 %r8, %f35;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 58
min.u32 %r9, %r8, 255;
Ltmp4:
.loc 4 61 91
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 91
min.ftz.f32 %f36, %f29, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 91
max.ftz.f32 %f37, %f6, %f36;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 91
mul.ftz.f32 %f38, %f37, 0f43800000;
cvt.rzi.ftz.u32.f32 %r10, %f38;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 91
min.u32 %r11, %r10, 255;
Ltmp5:
.loc 4 61 91
mul.wide.u32 %rd4, %r5, 4;
add.s64 %rd5, %rd3, %rd4;
Ltmp6:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 91
cvt.u16.u32 %rs1, %r11;
Ltmp7:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 58
cvt.u16.u32 %rs2, %r9;
Ltmp8:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 25
cvt.u16.u32 %rs3, %r7;
Ltmp9:
.loc 4 61 91
mov.u16 %rs4, 255;
st.global.v4.u8 [%rd5], {%rs3, %rs2, %rs1, %rs4};
.loc 1 45 1
ret;
Ltmp10:
Lfunc_end0:
}
// .globl __anyhit__noop
.visible .entry __anyhit__noop()
{
.loc 1 48 0
Lfunc_begin1:
.loc 1 48 0
.loc 1 48 48
ret;
Ltmp11:
Lfunc_end1:
}
// .globl __closesthit__noop
.visible .entry __closesthit__noop()
{
.loc 1 51 0
Lfunc_begin2:
.loc 1 51 0
.loc 1 51 52
ret;
Ltmp12:
Lfunc_end2:
}
// .globl __intersection__noop
.visible .entry __intersection__noop()
{
.loc 1 54 0
Lfunc_begin3:
.loc 1 54 0
.loc 1 54 55
ret;
Ltmp13:
Lfunc_end3:
}
// .globl __intersect__noop
.visible .entry __intersect__noop()
{
.loc 1 57 0
Lfunc_begin4:
.loc 1 57 0
.loc 1 57 52
ret;
Ltmp14:
Lfunc_end4:
}
// .globl __miss__noop
.visible .entry __miss__noop()
{
.loc 1 60 0
Lfunc_begin5:
.loc 1 60 0
.loc 1 60 47
ret;
Ltmp15:
Lfunc_end5:
}
// .globl __direct_callable__noop
.visible .entry __direct_callable__noop()
{
.loc 1 63 0
Lfunc_begin6:
.loc 1 63 0
.loc 1 63 58
ret;
Ltmp16:
Lfunc_end6:
}
// .globl __continuation_callable__noop
.visible .entry __continuation_callable__noop()
{
.loc 1 66 0
Lfunc_begin7:
.loc 1 66 0
.loc 1 66 64
ret;
Ltmp17:
Lfunc_end7:
}
.file 1 "optixHello/draw_solid_color.cu"
.file 2 "include/internal/optix_7_device_impl.h"
.file 3 "sutil/vec_math.h"
.file 4 "cuda/helpers.h"
.file 5 "cuda/include/crt/math_functions.hpp"
.section .debug_str
{
Linfo_string0:
.b8 95,90,78,55,51,95,73,78,84,69,82,78,65,76,95,53,49,95,116,109,112,120,102,116,95,48,48,49,48,102,48,57,54,95,48,48,48,48,48,48
.b8 48,48,95,55,95,100,114,97,119,95,115,111,108,105,100,95,99,111,108,111,114,95,99,112,112,49,95,105,105,95,51,101,52,98,52,55,50,54,49,57
.b8 111,112,116,105,120,71,101,116,76,97,117,110,99,104,73,110,100,101,120,69,118,0
Linfo_string1:
.b8 95,90,78,55,51,95,73,78,84,69,82,78,65,76,95,53,49,95,116,109,112,120,102,116,95,48,48,49,48,102,48,57,54,95,48,48,48,48,48,48
.b8 48,48,95,55,95,100,114,97,119,95,115,111,108,105,100,95,99,111,108,111,114,95,99,112,112,49,95,105,105,95,51,101,52,98,52,55,50,54,50,50
.b8 111,112,116,105,120,71,101,116,83,98,116,68,97,116,97,80,111,105,110,116,101,114,69,118,0
Linfo_string2:
.b8 95,90,50,49,113,117,97,110,116,105,122,101,85,110,115,105,103,110,101,100,56,66,105,116,115,102,0
}
'''
triangle_ptx = '''
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-29373293
// Cuda compilation tools, release 11.2, V11.2.67
// Based on NVVM 7.0.1
//
.version 7.2
.target sm_60
.address_size 64
// .globl __raygen__rg
.visible .const .align 8 .b8 params[72];
.visible .entry __raygen__rg()
{
.reg .pred %p<4>;
.reg .b16 %rs<5>;
.reg .f32 %f<89>;
.reg .b32 %r<118>;
.reg .b64 %rd<6>;
.loc 1 64 0
Lfunc_begin0:
.loc 1 64 0
.loc 1 67 23
.loc 2 5711 5, function_name Linfo_string0, inlined_at 1 67 23
// begin inline asm
call (%r1), _optix_get_launch_index_x, ();
// end inline asm
.loc 2 5712 5, function_name Linfo_string0, inlined_at 1 67 23
// begin inline asm
call (%r2), _optix_get_launch_index_y, ();
// end inline asm
Ltmp0:
.loc 1 68 23
.loc 2 5725 5, function_name Linfo_string1, inlined_at 1 68 23
// begin inline asm
call (%r4), _optix_get_launch_dimension_x, ();
// end inline asm
.loc 2 5726 5, function_name Linfo_string1, inlined_at 1 68 23
// begin inline asm
call (%r5), _optix_get_launch_dimension_y, ();
// end inline asm
Ltmp1:
.loc 1 73 5
ld.const.v2.f32 {%f10, %f11}, [params+24];
mov.u32 %r44, 0;
mov.u32 %r43, 1;
ld.const.v2.f32 {%f13, %f14}, [params+32];
ld.const.v2.f32 {%f17, %f18}, [params+40];
ld.const.v2.f32 {%f21, %f22}, [params+48];
ld.const.v2.f32 {%f25, %f26}, [params+56];
cvt.rn.f32.u32 %f29, %r1;
cvt.rn.f32.u32 %f30, %r4;
div.approx.ftz.f32 %f31, %f29, %f30;
cvt.rn.f32.u32 %f32, %r2;
cvt.rn.f32.u32 %f33, %r5;
div.approx.ftz.f32 %f34, %f32, %f33;
fma.rn.ftz.f32 %f35, %f31, 0f40000000, 0fBF800000;
mov.f32 %f36, 0f3F800000;
fma.rn.ftz.f32 %f37, %f34, 0f40000000, 0fBF800000;
ld.const.v2.f32 {%f38, %f39}, [params+16];
mul.ftz.f32 %f40, %f17, %f37;
mul.ftz.f32 %f41, %f18, %f37;
mul.ftz.f32 %f42, %f21, %f37;
fma.rn.ftz.f32 %f43, %f11, %f35, %f40;
fma.rn.ftz.f32 %f44, %f13, %f35, %f41;
fma.rn.ftz.f32 %f45, %f14, %f35, %f42;
add.ftz.f32 %f46, %f22, %f43;
add.ftz.f32 %f47, %f25, %f44;
add.ftz.f32 %f48, %f26, %f45;
mul.ftz.f32 %f49, %f47, %f47;
fma.rn.ftz.f32 %f50, %f46, %f46, %f49;
fma.rn.ftz.f32 %f51, %f48, %f48, %f50;
Ltmp2:
.loc 3 547 25
rsqrt.approx.ftz.f32 %f52, %f51;
mul.ftz.f32 %f4, %f52, %f46;
mul.ftz.f32 %f5, %f52, %f47;
mul.ftz.f32 %f6, %f52, %f48;
Ltmp3:
.loc 1 77 5
ld.const.u64 %rd1, [params+64];
Ltmp4:
.loc 1 77 5
.loc 2 198 5, function_name Linfo_string2, inlined_at 1 77 5
mov.f32 %f8, 0f5A0E1BCA;
mov.f32 %f9, 0f00000000;
mov.u32 %r40, 255;
mov.u32 %r45, 3;
// begin inline asm
call(%r7,%r8,%r9,%r10,%r11,%r12,%r13,%r14,%r15,%r16,%r17,%r18,%r19,%r20,%r21,%r22,%r23,%r24,%r25,%r26,%r27,%r28,%r29,%r30,%r31,%r32,%r33,%r34,%r35,%r36,%r37,%r38),_optix_trace_typed_32,(%r44,%rd1,%f38,%f39,%f10,%f4,%f5,%f6,%f9,%f8,%f9,%r40,%r44,%r44,%r43,%r44,%r45,%r78,%r79,%r80,%r81,%r82,%r83,%r84,%r85,%r86,%r87,%r88,%r89,%r90,%r91,%r92,%r93,%r94,%r95,%r96,%r97,%r98,%r99,%r100,%r101,%r102,%r103,%r104,%r105,%r106,%r107,%r108,%r109);
// end inline asm
Ltmp5:
.loc 4 137 10
mov.b32 %f53, %r7;
mov.b32 %f54, %r8;
mov.b32 %f55, %r9;
.loc 1 96 5
ld.const.u64 %rd2, [params];
cvta.to.global.u64 %rd3, %rd2;
ld.const.u32 %r110, [params+8];
mad.lo.s32 %r111, %r110, %r2, %r1;
.loc 3 121 22
min.ftz.f32 %f56, %f53, %f36;
.loc 3 121 12
max.ftz.f32 %f57, %f9, %f56;
.loc 3 121 22
min.ftz.f32 %f58, %f54, %f36;
.loc 3 121 12
max.ftz.f32 %f59, %f9, %f58;
.loc 3 121 22
min.ftz.f32 %f60, %f55, %f36;
.loc 3 121 12
max.ftz.f32 %f61, %f9, %f60;
.loc 5 38 33
lg2.approx.ftz.f32 %f62, %f57;
mul.ftz.f32 %f63, %f62, 0f3ED55555;
ex2.approx.ftz.f32 %f64, %f63;
.loc 5 38 56
lg2.approx.ftz.f32 %f65, %f59;
mul.ftz.f32 %f66, %f65, 0f3ED55555;
ex2.approx.ftz.f32 %f67, %f66;
.loc 5 38 79
lg2.approx.ftz.f32 %f68, %f61;
mul.ftz.f32 %f69, %f68, 0f3ED55555;
ex2.approx.ftz.f32 %f70, %f69;
setp.lt.ftz.f32 %p1, %f57, 0f3B4D2E1C;
mul.ftz.f32 %f71, %f57, 0f414EB852;
fma.rn.ftz.f32 %f72, %f64, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f73, %f71, %f72, %p1;
setp.lt.ftz.f32 %p2, %f59, 0f3B4D2E1C;
mul.ftz.f32 %f74, %f59, 0f414EB852;
fma.rn.ftz.f32 %f75, %f67, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f76, %f74, %f75, %p2;
setp.lt.ftz.f32 %p3, %f61, 0f3B4D2E1C;
mul.ftz.f32 %f77, %f61, 0f414EB852;
fma.rn.ftz.f32 %f78, %f70, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f79, %f77, %f78, %p3;
Ltmp6:
.loc 5 61 25
.loc 3 121 22, function_name Linfo_string3, inlined_at 5 61 25
min.ftz.f32 %f80, %f73, %f36;
.loc 3 121 12, function_name Linfo_string3, inlined_at 5 61 25
max.ftz.f32 %f81, %f9, %f80;
.loc 5 54 5, function_name Linfo_string3, inlined_at 5 61 25
mul.ftz.f32 %f82, %f81, 0f43800000;
cvt.rzi.ftz.u32.f32 %r112, %f82;
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 25
min.u32 %r113, %r112, 255;
Ltmp7:
.loc 5 61 58
.loc 3 121 22, function_name Linfo_string3, inlined_at 5 61 58
min.ftz.f32 %f83, %f76, %f36;
.loc 3 121 12, function_name Linfo_string3, inlined_at 5 61 58
max.ftz.f32 %f84, %f9, %f83;
.loc 5 54 5, function_name Linfo_string3, inlined_at 5 61 58
mul.ftz.f32 %f85, %f84, 0f43800000;
cvt.rzi.ftz.u32.f32 %r114, %f85;
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 58
min.u32 %r115, %r114, 255;
Ltmp8:
.loc 5 61 91
.loc 3 121 22, function_name Linfo_string3, inlined_at 5 61 91
min.ftz.f32 %f86, %f79, %f36;
.loc 3 121 12, function_name Linfo_string3, inlined_at 5 61 91
max.ftz.f32 %f87, %f9, %f86;
.loc 5 54 5, function_name Linfo_string3, inlined_at 5 61 91
mul.ftz.f32 %f88, %f87, 0f43800000;
cvt.rzi.ftz.u32.f32 %r116, %f88;
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 91
min.u32 %r117, %r116, 255;
Ltmp9:
.loc 5 61 91
mul.wide.u32 %rd4, %r111, 4;
add.s64 %rd5, %rd3, %rd4;
Ltmp10:
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 91
cvt.u16.u32 %rs1, %r117;
Ltmp11:
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 58
cvt.u16.u32 %rs2, %r115;
Ltmp12:
.loc 6 870 10, function_name Linfo_string3, inlined_at 5 61 25
cvt.u16.u32 %rs3, %r113;
Ltmp13:
.loc 5 61 91
mov.u16 %rs4, 255;
st.global.v4.u8 [%rd5], {%rs3, %rs2, %rs1, %rs4};
.loc 1 97 1
ret;
Ltmp14:
Lfunc_end0:
}
// .globl __miss__ms
.visible .entry __miss__ms()
{
.reg .b32 %r<7>;
.reg .b64 %rd<2>;
.loc 1 100 0
Lfunc_begin1:
.loc 1 100 0
.loc 1 102 56
.loc 2 5739 5, function_name Linfo_string4, inlined_at 1 102 56
// begin inline asm
call (%rd1), _optix_get_sbt_data_ptr_64, ();
// end inline asm
Ltmp15:
.loc 1 103 5
ld.u32 %r2, [%rd1];
ld.u32 %r4, [%rd1+4];
ld.u32 %r6, [%rd1+8];
Ltmp16:
.loc 1 43 5
.loc 2 3921 5, function_name Linfo_string5, inlined_at 1 43 5
mov.u32 %r1, 0;
// begin inline asm
call _optix_set_payload, (%r1, %r2);
// end inline asm
Ltmp17:
.loc 1 44 5
.loc 2 3931 5, function_name Linfo_string6, inlined_at 1 44 5
mov.u32 %r3, 1;
// begin inline asm
call _optix_set_payload, (%r3, %r4);
// end inline asm
Ltmp18:
.loc 1 45 5
.loc 2 3941 5, function_name Linfo_string7, inlined_at 1 45 5
mov.u32 %r5, 2;
// begin inline asm
call _optix_set_payload, (%r5, %r6);
// end inline asm
Ltmp19:
.loc 1 104 1
ret;
Ltmp20:
Lfunc_end1:
}
// .globl __closesthit__ch
.visible .entry __closesthit__ch()
{
.reg .f32 %f<3>;
.reg .b32 %r<7>;
.loc 1 107 0
Lfunc_begin2:
.loc 1 107 0
.loc 1 111 33
.loc 2 5699 5, function_name Linfo_string8, inlined_at 1 111 33
// begin inline asm
call (%f1, %f2), _optix_get_triangle_barycentrics, ();
// end inline asm
Ltmp21:
.loc 4 132 10
mov.b32 %r2, %f1;
Ltmp22:
.loc 1 43 5
.loc 2 3921 5, function_name Linfo_string5, inlined_at 1 43 5
mov.u32 %r1, 0;
// begin inline asm
call _optix_set_payload, (%r1, %r2);
// end inline asm
Ltmp23:
.loc 4 132 10
mov.b32 %r4, %f2;
Ltmp24:
.loc 1 44 5
.loc 2 3931 5, function_name Linfo_string6, inlined_at 1 44 5
mov.u32 %r3, 1;
// begin inline asm
call _optix_set_payload, (%r3, %r4);
// end inline asm
Ltmp25:
.loc 1 45 5
.loc 2 3941 5, function_name Linfo_string7, inlined_at 1 45 5
mov.u32 %r5, 2;
mov.u32 %r6, 1065353216;
// begin inline asm
call _optix_set_payload, (%r5, %r6);
// end inline asm
Ltmp26:
.loc 1 114 1
ret;
Ltmp27:
Lfunc_end2:
}
.file 1 "optixTriangle/optixTriangle.cu"
.file 2 "include/internal/optix_7_device_impl.h"
.file 3 "sutil/vec_math.h"
.file 4 "cuda/include/crt/device_functions.hpp"
.file 5 "cuda/helpers.h"
.file 6 "include/crt/math_functions.hpp"
.section .debug_str
{
Linfo_string0:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,49,57,111,112,116
.b8 105,120,71,101,116,76,97,117,110,99,104,73,110,100,101,120,69,118,0
Linfo_string1:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,50,52,111,112,116
.b8 105,120,71,101,116,76,97,117,110,99,104,68,105,109,101,110,115,105,111,110,115,69,118,0
Linfo_string2:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,49,48,111,112,116
.b8 105,120,84,114,97,99,101,69,121,54,102,108,111,97,116,51,83,48,95,102,102,102,106,106,106,106,106,82,106,83,49,95,83,49,95,0
Linfo_string3:
.b8 95,90,50,49,113,117,97,110,116,105,122,101,85,110,115,105,103,110,101,100,56,66,105,116,115,102,0
Linfo_string4:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,50,50,111,112,116
.b8 105,120,71,101,116,83,98,116,68,97,116,97,80,111,105,110,116,101,114,69,118,0
Linfo_string5:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,49,55,111,112,116
.b8 105,120,83,101,116,80,97,121,108,111,97,100,95,48,69,106,0
Linfo_string6:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,49,55,111,112,116
.b8 105,120,83,101,116,80,97,121,108,111,97,100,95,49,69,106,0
Linfo_string7:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,49,55,111,112,116
.b8 105,120,83,101,116,80,97,121,108,111,97,100,95,50,69,106,0
Linfo_string8:
.b8 95,90,78,55,48,95,73,78,84,69,82,78,65,76,95,52,56,95,116,109,112,120,102,116,95,48,48,48,48,99,54,52,101,95,48,48,48,48,48,48
.b8 48,48,95,55,95,111,112,116,105,120,84,114,105,97,110,103,108,101,95,99,112,112,49,95,105,105,95,100,101,98,99,100,99,53,49,50,56,111,112,116
.b8 105,120,71,101,116,84,114,105,97,110,103,108,101,66,97,114,121,99,101,110,116,114,105,99,115,69,118,0
}
'''
| otk-pyoptix-master | test/sample_ptx.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import cupy as cp
import optix as ox
import pytest
import tutil
class Logger:
def __init__(self):
self.num_mssgs = 0
def __call__(self, level, tag, mssg):
print("[{:>2}][{:>12}]: {}".format(level, tag, mssg))
self.num_mssgs += 1
def log_callback(level, tag, mssg):
print("[{:>2}][{:>12}]: {}".format(level, tag, mssg))
class TestDeviceContextOptions:
def test_default_ctor(self):
options = ox.DeviceContextOptions()
assert options.logCallbackFunction is None
assert options.logCallbackLevel == 0
if tutil.optix_version_gte( (7,2) ):
assert options.validationMode == ox.DEVICE_CONTEXT_VALIDATION_MODE_OFF
def test_ctor0(self):
options = ox.DeviceContextOptions(log_callback)
assert options.logCallbackFunction == log_callback
def test_ctor1(self):
logger = Logger()
if tutil.optix_version_gte( (7,2) ):
options = ox.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 3,
validationMode = ox.DEVICE_CONTEXT_VALIDATION_MODE_ALL
)
else:
options = ox.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 3
)
assert options.logCallbackFunction == logger
assert options.logCallbackLevel == 3
if tutil.optix_version_gte( (7,2) ):
assert options.validationMode == ox.DEVICE_CONTEXT_VALIDATION_MODE_ALL
else:
assert options.validationMode == ox.DEVICE_CONTEXT_VALIDATION_MODE_OFF
def test_context_options_props(self):
options = ox.DeviceContextOptions()
options.logCallbackLevel = 1
assert options.logCallbackLevel == 1
options.logCallbackFunction = log_callback
assert options.logCallbackFunction == log_callback
class TestContext:
def test_create_destroy( self ):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
ctx.destroy()
def test_get_property( self ):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
v = ctx.getProperty( ox.DEVICE_PROPERTY_LIMIT_NUM_BITS_INSTANCE_VISIBILITY_MASK )
assert type( v ) is int
assert v > 1 and v <= 16 # at time of writing, was 8
ctx.destroy()
def test_set_log_callback( self ):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
logger = Logger()
ctx.setLogCallback( logger, 3 )
ctx.setLogCallback( None, 2 )
ctx.setLogCallback( log_callback, 1 )
ctx.destroy()
def test_cache_default(self):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
assert ctx.getCacheEnabled()
ctx.destroy()
def test_cache_enable_disable(self):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
ctx.setCacheEnabled(False);
assert not ctx.getCacheEnabled()
ctx.setCacheEnabled(True);
assert ctx.getCacheEnabled()
ctx.destroy()
def test_cache_database_sizes(self):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
db_sizes = ( 1024, 1024*1024 )
ctx.setCacheDatabaseSizes( *db_sizes )
assert ctx.getCacheDatabaseSizes() == db_sizes
ctx.destroy()
def test_set_get_cache( self ):
ctx = ox.deviceContextCreate(0, ox.DeviceContextOptions())
v = ctx.getCacheLocation()
assert type(v) is str
loc = "/dev/null"
with pytest.raises( RuntimeError ):
ctx.setCacheLocation( loc ) # not valid dir
ctx.destroy()
| otk-pyoptix-master | test/test_context.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import pytest
import cupy as cp
import tutil
class TestPipeline:
def test_pipeline_options( self ):
pipeline_options = optix.PipelineCompileOptions()
pipeline_options.usesMotionBlur = False
pipeline_options.traversableGraphFlags = optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING
pipeline_options.numPayloadValues = 2
pipeline_options.numAttributeValues = 2
pipeline_options.exceptionFlags = optix.EXCEPTION_FLAG_NONE
pipeline_options.pipelineLaunchParamsVariableName = "params1"
assert pipeline_options.pipelineLaunchParamsVariableName == "params1"
pipeline_options = optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING,
numPayloadValues = 3,
numAttributeValues = 4,
exceptionFlags = optix.EXCEPTION_FLAG_NONE,
pipelineLaunchParamsVariableName = "params2"
)
assert pipeline_options.pipelineLaunchParamsVariableName == "params2"
| otk-pyoptix-master | test/test_pipeline.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import cupy as cp
ptx_string_old = '''
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-29069683
// Cuda compilation tools, release 11.1, V11.1.74
// Based on LLVM 3.4svn
//
.version 7.1
.target sm_52
.address_size 64
// .globl __raygen__hello
.const .align 8 .b8 params[16];
.visible .entry __raygen__hello(
)
{
.reg .pred %p<4>;
.reg .b16 %rs<5>;
.reg .f32 %f<39>;
.reg .b32 %r<13>;
.reg .b64 %rd<6>;
// inline asm
call (%r1), _optix_get_launch_index_x, ();
// inline asm
// inline asm
call (%r2), _optix_get_launch_index_y, ();
// inline asm
// inline asm
call (%rd1), _optix_get_sbt_data_ptr_64, ();
// inline asm
ld.const.u64 %rd2, [params];
cvta.to.global.u64 %rd3, %rd2;
ld.const.u32 %r4, [params+8];
mad.lo.s32 %r5, %r4, %r2, %r1;
ld.f32 %f1, [%rd1];
ld.f32 %f2, [%rd1+4];
ld.f32 %f3, [%rd1+8];
mov.f32 %f4, 0f3F800000;
min.ftz.f32 %f5, %f1, %f4;
mov.f32 %f6, 0f00000000;
max.ftz.f32 %f7, %f6, %f5;
min.ftz.f32 %f8, %f2, %f4;
max.ftz.f32 %f9, %f6, %f8;
min.ftz.f32 %f10, %f3, %f4;
max.ftz.f32 %f11, %f6, %f10;
lg2.approx.ftz.f32 %f12, %f7;
mul.ftz.f32 %f13, %f12, 0f3ED55555;
ex2.approx.ftz.f32 %f14, %f13;
lg2.approx.ftz.f32 %f15, %f9;
mul.ftz.f32 %f16, %f15, 0f3ED55555;
ex2.approx.ftz.f32 %f17, %f16;
lg2.approx.ftz.f32 %f18, %f11;
mul.ftz.f32 %f19, %f18, 0f3ED55555;
ex2.approx.ftz.f32 %f20, %f19;
setp.lt.ftz.f32 %p1, %f7, 0f3B4D2E1C;
mul.ftz.f32 %f21, %f7, 0f414EB852;
fma.rn.ftz.f32 %f22, %f14, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f23, %f21, %f22, %p1;
setp.lt.ftz.f32 %p2, %f9, 0f3B4D2E1C;
mul.ftz.f32 %f24, %f9, 0f414EB852;
fma.rn.ftz.f32 %f25, %f17, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f26, %f24, %f25, %p2;
setp.lt.ftz.f32 %p3, %f11, 0f3B4D2E1C;
mul.ftz.f32 %f27, %f11, 0f414EB852;
fma.rn.ftz.f32 %f28, %f20, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f29, %f27, %f28, %p3;
min.ftz.f32 %f30, %f23, %f4;
max.ftz.f32 %f31, %f6, %f30;
mul.ftz.f32 %f32, %f31, 0f43800000;
cvt.rzi.ftz.u32.f32 %r6, %f32;
mov.u32 %r7, 255;
min.u32 %r8, %r6, %r7;
min.ftz.f32 %f33, %f26, %f4;
max.ftz.f32 %f34, %f6, %f33;
mul.ftz.f32 %f35, %f34, 0f43800000;
cvt.rzi.ftz.u32.f32 %r9, %f35;
min.u32 %r10, %r9, %r7;
min.ftz.f32 %f36, %f29, %f4;
max.ftz.f32 %f37, %f6, %f36;
mul.ftz.f32 %f38, %f37, 0f43800000;
cvt.rzi.ftz.u32.f32 %r11, %f38;
min.u32 %r12, %r11, %r7;
mul.wide.u32 %rd4, %r5, 4;
add.s64 %rd5, %rd3, %rd4;
cvt.u16.u32 %rs1, %r12;
cvt.u16.u32 %rs2, %r10;
cvt.u16.u32 %rs3, %r8;
mov.u16 %rs4, 255;
st.global.v4.u8 [%rd5], {%rs3, %rs2, %rs1, %rs4};
ret;
}
'''
ptx_string = '''
//
// Generated by NVIDIA NVVM Compiler
//
// Compiler Build ID: CL-29373293
// Cuda compilation tools, release 11.2, V11.2.67
// Based on NVVM 7.0.1
//
.version 7.2
.target sm_60
.address_size 64
// .globl __raygen__hello
.visible .const .align 8 .b8 params[16];
.visible .entry __raygen__hello()
{
.reg .pred %p<4>;
.reg .b16 %rs<5>;
.reg .f32 %f<39>;
.reg .b32 %r<12>;
.reg .b64 %rd<6>;
.loc 1 39 0
Lfunc_begin0:
.loc 1 39 0
.loc 1 41 26
.loc 2 5675 5, function_name Linfo_string0, inlined_at 1 41 26
// begin inline asm
call (%r1), _optix_get_launch_index_x, ();
// end inline asm
.loc 2 5676 5, function_name Linfo_string0, inlined_at 1 41 26
// begin inline asm
call (%r2), _optix_get_launch_index_y, ();
// end inline asm
Ltmp0:
.loc 1 42 39
.loc 2 5703 5, function_name Linfo_string1, inlined_at 1 42 39
// begin inline asm
call (%rd1), _optix_get_sbt_data_ptr_64, ();
// end inline asm
Ltmp1:
.loc 1 43 5
ld.const.u64 %rd2, [params];
cvta.to.global.u64 %rd3, %rd2;
ld.const.u32 %r4, [params+8];
mad.lo.s32 %r5, %r4, %r2, %r1;
ld.f32 %f1, [%rd1];
ld.f32 %f2, [%rd1+4];
ld.f32 %f3, [%rd1+8];
.loc 3 121 22
mov.f32 %f4, 0f3F800000;
min.ftz.f32 %f5, %f1, %f4;
.loc 3 121 12
mov.f32 %f6, 0f00000000;
max.ftz.f32 %f7, %f6, %f5;
.loc 3 121 22
min.ftz.f32 %f8, %f2, %f4;
.loc 3 121 12
max.ftz.f32 %f9, %f6, %f8;
.loc 3 121 22
min.ftz.f32 %f10, %f3, %f4;
.loc 3 121 12
max.ftz.f32 %f11, %f6, %f10;
.loc 4 38 33
lg2.approx.ftz.f32 %f12, %f7;
mul.ftz.f32 %f13, %f12, 0f3ED55555;
ex2.approx.ftz.f32 %f14, %f13;
.loc 4 38 56
lg2.approx.ftz.f32 %f15, %f9;
mul.ftz.f32 %f16, %f15, 0f3ED55555;
ex2.approx.ftz.f32 %f17, %f16;
.loc 4 38 79
lg2.approx.ftz.f32 %f18, %f11;
mul.ftz.f32 %f19, %f18, 0f3ED55555;
ex2.approx.ftz.f32 %f20, %f19;
setp.lt.ftz.f32 %p1, %f7, 0f3B4D2E1C;
mul.ftz.f32 %f21, %f7, 0f414EB852;
fma.rn.ftz.f32 %f22, %f14, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f23, %f21, %f22, %p1;
setp.lt.ftz.f32 %p2, %f9, 0f3B4D2E1C;
mul.ftz.f32 %f24, %f9, 0f414EB852;
fma.rn.ftz.f32 %f25, %f17, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f26, %f24, %f25, %p2;
setp.lt.ftz.f32 %p3, %f11, 0f3B4D2E1C;
mul.ftz.f32 %f27, %f11, 0f414EB852;
fma.rn.ftz.f32 %f28, %f20, 0f3F870A3D, 0fBD6147AE;
selp.f32 %f29, %f27, %f28, %p3;
Ltmp2:
.loc 4 61 25
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 25
min.ftz.f32 %f30, %f23, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 25
max.ftz.f32 %f31, %f6, %f30;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 25
mul.ftz.f32 %f32, %f31, 0f43800000;
cvt.rzi.ftz.u32.f32 %r6, %f32;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 25
min.u32 %r7, %r6, 255;
Ltmp3:
.loc 4 61 58
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 58
min.ftz.f32 %f33, %f26, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 58
max.ftz.f32 %f34, %f6, %f33;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 58
mul.ftz.f32 %f35, %f34, 0f43800000;
cvt.rzi.ftz.u32.f32 %r8, %f35;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 58
min.u32 %r9, %r8, 255;
Ltmp4:
.loc 4 61 91
.loc 3 121 22, function_name Linfo_string2, inlined_at 4 61 91
min.ftz.f32 %f36, %f29, %f4;
.loc 3 121 12, function_name Linfo_string2, inlined_at 4 61 91
max.ftz.f32 %f37, %f6, %f36;
.loc 4 54 5, function_name Linfo_string2, inlined_at 4 61 91
mul.ftz.f32 %f38, %f37, 0f43800000;
cvt.rzi.ftz.u32.f32 %r10, %f38;
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 91
min.u32 %r11, %r10, 255;
Ltmp5:
.loc 4 61 91
mul.wide.u32 %rd4, %r5, 4;
add.s64 %rd5, %rd3, %rd4;
Ltmp6:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 91
cvt.u16.u32 %rs1, %r11;
Ltmp7:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 58
cvt.u16.u32 %rs2, %r9;
Ltmp8:
.loc 5 870 10, function_name Linfo_string2, inlined_at 4 61 25
cvt.u16.u32 %rs3, %r7;
Ltmp9:
.loc 4 61 91
mov.u16 %rs4, 255;
st.global.v4.u8 [%rd5], {%rs3, %rs2, %rs1, %rs4};
.loc 1 45 1
ret;
Ltmp10:
Lfunc_end0:
}
// .globl __anyhit__noop
.visible .entry __anyhit__noop()
{
.loc 1 48 0
Lfunc_begin1:
.loc 1 48 0
.loc 1 48 48
ret;
Ltmp11:
Lfunc_end1:
}
// .globl __closesthit__noop
.visible .entry __closesthit__noop()
{
.loc 1 51 0
Lfunc_begin2:
.loc 1 51 0
.loc 1 51 52
ret;
Ltmp12:
Lfunc_end2:
}
// .globl __intersection__noop
.visible .entry __intersection__noop()
{
.loc 1 54 0
Lfunc_begin3:
.loc 1 54 0
.loc 1 54 55
ret;
Ltmp13:
Lfunc_end3:
}
// .globl __intersect__noop
.visible .entry __intersect__noop()
{
.loc 1 57 0
Lfunc_begin4:
.loc 1 57 0
.loc 1 57 52
ret;
Ltmp14:
Lfunc_end4:
}
// .globl __miss__noop
.visible .entry __miss__noop()
{
.loc 1 60 0
Lfunc_begin5:
.loc 1 60 0
.loc 1 60 47
ret;
Ltmp15:
Lfunc_end5:
}
// .globl __direct_callable__noop
.visible .entry __direct_callable__noop()
{
.loc 1 63 0
Lfunc_begin6:
.loc 1 63 0
.loc 1 63 58
ret;
Ltmp16:
Lfunc_end6:
}
// .globl __continuation_callable__noop
.visible .entry __continuation_callable__noop()
{
.loc 1 66 0
Lfunc_begin7:
.loc 1 66 0
.loc 1 66 64
ret;
Ltmp17:
Lfunc_end7:
}
.file 1 "/home/kmorley/Code/optix_sdk/samples_exp/optixHello/draw_solid_color.cu"
.file 2 "/home/kmorley/Code/optix_sdk/include/internal/optix_7_device_impl.h"
.file 3 "/home/kmorley/Code/optix_sdk/samples_exp/sutil/vec_math.h"
.file 4 "/home/kmorley/Code/optix_sdk/samples_exp/cuda/helpers.h"
.file 5 "/usr/local/cuda/include/crt/math_functions.hpp"
.section .debug_str
{
Linfo_string0:
.b8 95,90,78,55,51,95,73,78,84,69,82,78,65,76,95,53,49,95,116,109,112,120,102,116,95,48,48,49,48,102,48,57,54,95,48,48,48,48,48,48
.b8 48,48,95,55,95,100,114,97,119,95,115,111,108,105,100,95,99,111,108,111,114,95,99,112,112,49,95,105,105,95,51,101,52,98,52,55,50,54,49,57
.b8 111,112,116,105,120,71,101,116,76,97,117,110,99,104,73,110,100,101,120,69,118,0
Linfo_string1:
.b8 95,90,78,55,51,95,73,78,84,69,82,78,65,76,95,53,49,95,116,109,112,120,102,116,95,48,48,49,48,102,48,57,54,95,48,48,48,48,48,48
.b8 48,48,95,55,95,100,114,97,119,95,115,111,108,105,100,95,99,111,108,111,114,95,99,112,112,49,95,105,105,95,51,101,52,98,52,55,50,54,50,50
.b8 111,112,116,105,120,71,101,116,83,98,116,68,97,116,97,80,111,105,110,116,101,114,69,118,0
Linfo_string2:
.b8 95,90,50,49,113,117,97,110,116,105,122,101,85,110,115,105,103,110,101,100,56,66,105,116,115,102,0
}
'''
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def create_default_ctx():
ctx_options = optix.DeviceContextOptions()
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def default_debug_level():
if optix_version_gte( (7,1) ):
return optix.COMPILE_DEBUG_LEVEL_DEFAULT
else:
return optix.COMPILE_DEBUG_LEVEL_LINEINFO
def create_default_module():
ctx = create_default_ctx();
module_opts = optix.ModuleCompileOptions()
pipeline_opts = optix.PipelineCompileOptions()
mod, log = ctx.moduleCreateFromPTX(
module_opts,
pipeline_opts,
ptx_string,
)
return ( ctx, mod )
| otk-pyoptix-master | test/util/tutil.py |
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
if "PYOPTIX_CMAKE_ARGS" in os.environ:
cmake_args += [ os.environ[ 'PYOPTIX_CMAKE_ARGS' ] ]
# the following is only needed for 7.0 compiles, because the optix device header of that
# first version included stddef.h.
if "PYOPTIX_STDDEF_DIR" in os.environ:
cmake_args += [ "-DOptiX_STDDEF_DIR={}".format(os.environ[ 'PYOPTIX_STDDEF_DIR' ]) ]
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
print( "CMAKE CMD: <<<{}>>>".format( ' '.join( ['cmake', ext.sourcedir] + cmake_args ) ) )
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='optix',
version='0.0.1',
author='Keith Morley',
author_email='[email protected]',
description='Python bindings for NVIDIA OptiX',
long_description='',
ext_modules=[CMakeExtension('optix')],
cmdclass=dict(build_ext=CMakeBuild),
zip_safe=False,
)
| otk-pyoptix-master | optix/setup.py |
#!/usr/bin/env python3
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import os
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import array
import ctypes # C interop helpers
from PIL import Image # Image IO
from pynvrtc.compiler import Program
import path_util
#-------------------------------------------------------------------------------
#
# Util
#
#-------------------------------------------------------------------------------
pix_width = 512
pix_height = 512
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def round_up( val, mult_of ):
return val if val % mult_of == 0 else val + mult_of - val % mult_of
def get_aligned_itemsize( formats, alignment ):
names = []
for i in range( len(formats ) ):
names.append( 'x'+str(i) )
temp_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'align' : True
} )
return round_up( temp_dtype.itemsize, alignment )
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def array_to_device_memory( numpy_array, stream=cp.cuda.Stream() ):
byte_size = numpy_array.size*numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p( numpy_array.ctypes.data )
d_mem = cp.cuda.memory.alloc( byte_size )
d_mem.copy_from_async( h_ptr, byte_size, stream )
return d_mem
def compile_cuda( cuda_file ):
with open( cuda_file, 'rb' ) as f:
src = f.read()
nvrtc_dll = os.environ.get('NVRTC_DLL')
if nvrtc_dll is None:
nvrtc_dll = ''
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program( src.decode(), cuda_file,
lib_name= nvrtc_dll )
compile_options = [
'-use_fast_math',
'-lineinfo',
'-default-device',
'-std=c++11',
'-rdc',
'true',
f'-I{path_util.include_path}',
f'-I{path_util.cuda_tk_path}'
]
print("pynvrtc compile options = {}".format(compile_options))
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
if not optix_version_gte( (7,1) ):
compile_options.append( f'-I{path_util.stddef_path}' )
ptx = prog.compile( compile_options )
return ptx
#-------------------------------------------------------------------------------
#
# Optix setup
#
#-------------------------------------------------------------------------------
def create_ctx():
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix.version()[1] >= 2:
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
def set_pipeline_options():
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags =
optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING,
numPayloadValues = 2,
numAttributeValues = 2,
exceptionFlags = optix.EXCEPTION_FLAG_NONE,
pipelineLaunchParamsVariableName = "params"
)
def create_module( ctx, pipeline_options, hello_ptx ):
print( "Creating optix module ..." )
formats = ['u8', 'u4']
itemsize = get_aligned_itemsize( formats, 16 )
params_dtype = np.dtype( {
'names' : ['image', 'image_width' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
if optix_version_gte( (7,2) ):
bound_value = array.array( 'i', [pix_width] )
bound_value_entry = optix.ModuleCompileBoundValueEntry(
pipelineParamOffsetInBytes = params_dtype.fields['image_width'][1],
boundValue = bound_value,
annotation = "my_bound_value"
)
module_options = optix.ModuleCompileOptions(
maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT,
boundValues = [ bound_value_entry ],
debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
)
else:
module_options = optix.ModuleCompileOptions(
maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT,
debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
)
module, log = ctx.moduleCreateFromPTX(
module_options,
pipeline_options,
hello_ptx
)
print( "\tModule create log: <<<{}>>>".format( log ) )
return module
def create_program_groups( ctx, module ):
print( "Creating program groups ... " )
raygen_prog_group_desc = optix.ProgramGroupDesc()
raygen_prog_group_desc.raygenModule = module
raygen_prog_group_desc.raygenEntryFunctionName = "__raygen__hello"
log = None
raygen_prog_group = None
if optix_version_gte( (7,4) ):
# ProgramGroupOptions introduced in OptiX 7.4
program_group_options = optix.ProgramGroupOptions()
raygen_prog_group, log = ctx.programGroupCreate(
[ raygen_prog_group_desc ],
program_group_options,
)
else:
raygen_prog_group, log = ctx.programGroupCreate(
[ raygen_prog_group_desc ]
)
print( "\tProgramGroup raygen create log: <<<{}>>>".format( log ) )
miss_prog_group_desc = optix.ProgramGroupDesc( missEntryFunctionName = "")
program_group_options = optix.ProgramGroupOptions()
miss_prog_group, log = ctx.programGroupCreate(
[ miss_prog_group_desc ]
# Even in 7.4+, the OptixProgramGroupOptions param is optional
)
print( "\tProgramGroup miss create log: <<<{}>>>".format( log ) )
return ( raygen_prog_group, miss_prog_group )
def create_pipeline( ctx, raygen_prog_group, pipeline_compile_options ):
print( "Creating pipeline ... " )
max_trace_depth = 0
program_groups = [ raygen_prog_group ]
pipeline_link_options = optix.PipelineLinkOptions()
pipeline_link_options.maxTraceDepth = max_trace_depth
pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
log = ""
pipeline = ctx.pipelineCreate(
pipeline_compile_options,
pipeline_link_options,
program_groups,
log
)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes( prog_group, stack_sizes )
(dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size) = \
optix.util.computeStackSizes(
stack_sizes,
0, # maxTraceDepth
0, # maxCCDepth
0 # maxDCDepth
)
pipeline.setStackSize(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
2 # maxTraversableDepth
)
return pipeline
def create_sbt( raygen_prog_group, miss_prog_group ):
print( "Creating sbt ... " )
global d_raygen_sbt
global d_miss_sbt
header_format = '{}B'.format( optix.SBT_RECORD_HEADER_SIZE )
#
# raygen record
#
formats = [ header_format, 'f4', 'f4', 'f4' ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'r', 'g', 'b' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_raygen_sbt = np.array( [ (0, 0.462, 0.725, 0.0 ) ], dtype=dtype )
optix.sbtRecordPackHeader( raygen_prog_group, h_raygen_sbt )
d_raygen_sbt = array_to_device_memory( h_raygen_sbt )
#
# miss record
#
formats = [ header_format, 'i4']
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'x' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_miss_sbt = np.array( [ (0, 127 ) ], dtype=dtype )
optix.sbtRecordPackHeader( miss_prog_group, h_miss_sbt )
d_miss_sbt = array_to_device_memory( h_miss_sbt )
sbt = optix.ShaderBindingTable()
sbt.raygenRecord = d_raygen_sbt.ptr
sbt.missRecordBase = d_miss_sbt.ptr
sbt.missRecordStrideInBytes = h_miss_sbt.dtype.itemsize
sbt.missRecordCount = 1
return sbt
def launch( pipeline, sbt ):
print( "Launching ... " )
pix_bytes = pix_width*pix_height*4
h_pix = np.zeros( (pix_width,pix_height,4), 'B' )
h_pix[0:256, 0:256] = [255, 128, 0, 255]
d_pix = cp.array( h_pix )
formats = ['u8', 'u4']
itemsize = get_aligned_itemsize( formats, 8 )
params_dtype = np.dtype( {
'names' : ['image', 'image_width' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_params = np.array( [ ( d_pix.data.ptr, pix_width ) ], dtype=params_dtype )
d_params = array_to_device_memory( h_params )
stream = cp.cuda.Stream()
optix.launch(
pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
sbt,
pix_width,
pix_height,
1 # depth
)
stream.synchronize()
h_pix = cp.asnumpy( d_pix )
return h_pix
#-------------------------------------------------------------------------------
#
# main
#
#-------------------------------------------------------------------------------
def main():
ctx = create_ctx()
hello_cu = os.path.join(os.path.dirname(__file__), 'hello.cu')
hello_ptx = compile_cuda(hello_cu)
pipeline_options = set_pipeline_options()
module = create_module( ctx, pipeline_options, hello_ptx )
raygen_prog_group, miss_prog_group = create_program_groups( ctx, module )
pipeline = create_pipeline( ctx, raygen_prog_group, pipeline_options )
sbt = create_sbt( raygen_prog_group, miss_prog_group )
pix = launch( pipeline, sbt )
print( "Total number of log messages: {}".format( logger.num_mssgs ) )
img = Image.fromarray( pix, 'RGBA' )
img.save( 'my.png' )
img.show()
if __name__ == "__main__":
main()
| otk-pyoptix-master | examples/hello.py |
#!/usr/bin/env python3
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import Imath
import OpenEXR
import cupy as cp
import cupy.cuda.runtime as cuda
import numpy as np
import ctypes
#-------------------------------------------------------------------------------
#
# Helpers
#
#-------------------------------------------------------------------------------
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
class State:
def __init__( self ):
self.tile_size = (0, 0)
self.exposure = 0.0
self.layer = optix.DenoiserLayer()
self.guide_layer = optix.DenoiserGuideLayer()
#self.scratch_size = 0
self.overlap = 0
self.d_intensity = 0 #
self.d_scratch = 0 # CUPY RAII memory pointers
self.d_state = 0 #
def __str__( self ):
return (
"w : {}\n".format( self.layer.input.width ) +
"h : {}\n".format( self.layer.input.height ) +
"tile : {}\n".format( self.tile_size ) +
"expos : {}" .format( self.exposure )
)
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def create_optix_image_2D( w, h, image ):
oi = optix.Image2D()
byte_size = w*h*4*4
d_mem = cuda.malloc( byte_size )
if image is not None:
cuda.memcpy(
d_mem,
image.ctypes.data,
byte_size,
cuda.memcpyHostToDevice
)
oi.data = d_mem
oi.width = w
oi.height = h
oi.rowStrideInBytes = w*4*4
oi.pixelStrideInBytes = 4*4
oi.format = optix.PIXEL_FORMAT_FLOAT4
return oi
def free_optix_image_2D( optix_image ):
cuda.free( optix_imae.data )
oi.data = 0
def load_exr( filename ):
exr_file = OpenEXR.InputFile( filename )
exr_header = exr_file.header()
r,g,b = exr_file.channels("RGB", pixel_type=Imath.PixelType(Imath.PixelType.FLOAT) )
dw = exr_header[ "dataWindow" ]
w = dw.max.x - dw.min.x + 1
h = dw.max.y - dw.min.y + 1
image = np.ones( (h, w, 4), dtype = np.float32 )
image[:, :, 0] = np.core.multiarray.frombuffer( r, dtype = np.float32 ).reshape(h, w)
image[:, :, 1] = np.core.multiarray.frombuffer( g, dtype = np.float32 ).reshape(h, w)
image[:, :, 2] = np.core.multiarray.frombuffer( b, dtype = np.float32 ).reshape(h, w)
return create_optix_image_2D( w, h, image.flatten() )
def write_exr( filename, optix_image ):
w = optix_image.width
h = optix_image.height
data = np.zeros( (h*w*4), dtype = np.float32 )
cuda.memcpy(
data.ctypes.data,
optix_image.data,
w*h*4*4,
cuda.memcpyDeviceToHost
)
exr = OpenEXR.OutputFile( filename, OpenEXR.Header( w, h ) )
exr.writePixels( {
'R' : data[0::4].tobytes(),
'G' : data[1::4].tobytes(),
'B' : data[2::4].tobytes()
} )
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description = 'Apply OptiX denoiser to input images'
)
parser.add_argument(
'-n', '--normal',
metavar = 'normal.exr',
type = str,
help = 'Screen space normals input'
)
parser.add_argument(
'-a', '--albedo',
metavar = 'albedo.exr',
type = str,
help = 'Albedo input'
)
parser.add_argument(
'-o', '--out',
metavar = 'out.exr',
type = str,
help="Output filename, default 'denoised.exr'" ,
default='denoised.exr'
)
parser.add_argument(
'-t', '--tilesize',
metavar='INT',
type = int,
nargs = 2,
help="Output image name.",
default = ( 0, 0 )
)
parser.add_argument(
'-e', '--exposure',
metavar = 'FLOAT',
type = float,
help = "Exposure to be applied to output",
default = 1.0
)
parser.add_argument(
'color',
metavar = 'color.exr',
type = str,
help = "Noisy color image name."
)
return parser.parse_args()
def load_state( args, state ):
print( "Loading color file '{}'".format( args.color) )
state.layer.input = load_exr( args.color )
state.layer.output = create_optix_image_2D( state.layer.input.width, state.layer.input.height, None )
print( " ... success" )
if args.normal:
print( "Loading normal file '{}'".format( args.normal) )
state.guide_layer.normal = load_exr( args.normal )
w = state.guide_layer.normal.width
h = state.guide_layer.normal.height
if w != state.layer.input.width or h != state.layer.input.height:
print( "ERROR: Normal image dims do not match color image dims" )
sys.exit(0)
print( " ... success" )
if args.albedo:
print( "Loading albedo file '{}'".format( args.albedo) )
state.guide_layer.albedo = load_exr( args.albedo )
w = state.guide_layer.albedo.width
h = state.guide_layer.albedo.height
if w != state.layer.input.width or h != state.layer.input.height:
print( "ERROR: Albedo image dims do not match color image dims" )
sys.exit(0)
print( " ... success" )
if args.tilesize[0] <= 0 or args.tilesize[1] <= 0:
state.tile_size = (
state.layer.input.width,
state.layer.input.height
)
else:
state.tile_size = args.tilesize
state.exposure = args.exposure
#-------------------------------------------------------------------------------
#
# Denoising
#
#-------------------------------------------------------------------------------
def create_ctx():
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix_version_gte( (7,2) ):
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
def denoiser_init( ctx, state ):
options = optix.DenoiserOptions()
options.guideAlbedo = 0 if state.guide_layer.albedo.width == 0 else 1
options.guideNormal = 0 if state.guide_layer.normal.width == 0 else 1
denoiser = ctx.denoiserCreate( optix.DENOISER_MODEL_KIND_HDR, options )
sizes = denoiser.computeMemoryResources(
state.tile_size[0],
state.tile_size[1]
)
if state.tile_size[0] == state.layer.input.width and state.tile_size[0] == state.layer.input.width:
state.scratch_size = sizes.withoutOverlapScratchSizeInBytes
else:
state.scratch_size = sizes.withOverlapScratchSizeInBytes
state.overlap = sizes.overlapWindowSizeInPixels
state.d_state = cp.empty( ( sizes.stateSizeInBytes ), dtype='B' )
state.d_intensity = cp.empty( ( 1 ), 'f4' )
state.d_scratch = cp.empty( ( state.scratch_size ), dtype='B' )
denoiser.setup(
0,
state.tile_size[0] + 2*state.overlap,
state.tile_size[1] + 2*state.overlap,
state.d_state.data.ptr,
state.d_state.nbytes,
state.d_scratch.data.ptr,
state.d_scratch.nbytes
)
return denoiser
def denoiser_exec( denoiser, state ):
params = optix.DenoiserParams()
params.denoiseAlpha = 0
params.hdrIntensity = state.d_intensity
params.hdrAverageColor = 0
params.blendFactor = 0.0
denoiser.computeIntensity(
0,
state.layer.input,
state.d_intensity.data.ptr,
state.d_scratch.data.ptr,
state.d_scratch.nbytes
)
denoiser.invokeTiled(
0, # CUDA stream
params,
state.d_state.data.ptr,
state.d_state.nbytes,
state.guide_layer,
[ state.layer ],
state.d_scratch.data.ptr,
state.d_scratch.nbytes,
state.overlap,
state.tile_size[0],
state.tile_size[1]
)
#-------------------------------------------------------------------------------
#
# Main
#
#-------------------------------------------------------------------------------
def main():
args = parse_args()
state = State()
load_state( args, state )
print( "\n-------- State loaded --------" )
print( state )
print( "------------------------------\n" )
ctx = create_ctx()
denoiser = denoiser_init( ctx, state )
denoiser_exec( denoiser, state )
write_exr( args.out, state.layer.output )
if __name__ == "__main__":
main()
| otk-pyoptix-master | examples/denoiser.py |
#!/usr/bin/env python3
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import optix
import os
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import array
import ctypes # C interop helpers
from PIL import Image, ImageOps # Image IO
from pynvrtc.compiler import Program
import path_util
class State:
def __init__( self ):
self.context = None
self.tri_gas_handle = 0
self.d_tri_gas_output_buffer = 0 # Triangle AS memory
self.sphere_gas_handle = 0 # Traversable handle for sphere
self.d_sphere_gas_output_buffer = 0 # Sphere AS memory
self.sphere_motion_transform_handle = 0
self.d_sphere_motion_transform = 0
self.ias_handle = 0 # Traversable handle for instance AS
self.d_ias_output_buffer = 0 # Instance AS memory
self.ptx_module = None
self.pipeline_compile_options = None
self.pipeline = None
self.raygen_prog_group = None
self.miss_group = None
self.tri_hit_group = None
self.sphere_hit_group = None
self.stream = stream=cp.cuda.Stream()
self.params = None
self.d_params = 0
self.sbt = None
self.d_raygen_record = 0
self.d_miss_records = 0
self.d_hitgroup_records = 0
#-------------------------------------------------------------------------------
#
# Util
#
#-------------------------------------------------------------------------------
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def round_up( val, mult_of ):
return val if val % mult_of == 0 else val + mult_of - val % mult_of
def get_aligned_itemsize( formats, alignment ):
names = []
for i in range( len(formats ) ):
names.append( 'x'+str(i) )
temp_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'aligned' : True
} )
return round_up( temp_dtype.itemsize, alignment )
def array_to_device_memory( numpy_array, stream=cp.cuda.Stream() ):
byte_size = numpy_array.size*numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p( numpy_array.ctypes.data )
d_mem = cp.cuda.memory.alloc( byte_size )
d_mem.copy_from_async( h_ptr, byte_size, stream )
return d_mem
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def compile_cuda( cuda_file ):
with open( cuda_file, 'rb' ) as f:
src = f.read()
nvrtc_dll = os.environ.get('NVRTC_DLL')
if nvrtc_dll is None:
nvrtc_dll = ''
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program( src.decode(), cuda_file,
lib_name= nvrtc_dll )
compile_options = [
'-use_fast_math',
'-lineinfo',
'-default-device',
'-std=c++11',
'-rdc',
'true',
f'-I{path_util.cuda_tk_path}',
f'-I{path_util.include_path}'
]
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
if (optix.version()[1] == 0):
compile_options.append( f'-I{path_util.stddef_path}' )
ptx = prog.compile( compile_options )
return ptx
#-------------------------------------------------------------------------------
#
# Optix setup
#
#-------------------------------------------------------------------------------
pix_width = 768
pix_height = 768
def create_context( state ):
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix_version_gte( (7,2) ):
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
state.context = optix.deviceContextCreate( cu_ctx, ctx_options )
def build_triangle_gas( state ):
NUM_KEYS = 3
motion_options = optix.MotionOptions()
motion_options.numKeys = NUM_KEYS
motion_options.timeBegin = 0.0
motion_options.timeEnd = 1.0
motion_options.flags = optix.MOTION_FLAG_NONE
accel_options = optix.AccelBuildOptions(
buildFlags = optix.BUILD_FLAG_ALLOW_COMPACTION,
operation = optix.BUILD_OPERATION_BUILD,
motionOptions = motion_options
)
#
# Copy triangle mesh data to device
#
NUM_VERTS = 3
vertices_0 = cp.array( [
0.0, 0.0, 0.0, 0.0,
1.0, 0.0, 0.0, 0.0,
0.5, 1.0, 0.0, 0.0,
], dtype = 'f4' )
vertices_1 = cp.array( [
0.5, 0.0, 0.0, 0.0,
1.5, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 0.0,
],
dtype = 'f4'
)
vertices_2 = cp.array( [
0.5, -0.5, 0.0, 0.0,
1.5, -0.5, 0.0, 0.0,
1.0, 0.5, 0.0, 0.0
],
dtype = 'f4'
)
triangle_input = optix.BuildInputTriangleArray()
triangle_input.vertexFormat = optix.VERTEX_FORMAT_FLOAT3
triangle_input.vertexStrideInBytes = np.dtype( 'f4' ).itemsize*4 # four floats per vert
triangle_input.numVertices = NUM_VERTS
triangle_input.vertexBuffers = [ vertices_0.data.ptr, vertices_1.data.ptr, vertices_2.data.ptr ]
triangle_input.flags = [ optix.GEOMETRY_FLAG_DISABLE_ANYHIT ]
triangle_input.numSbtRecords = 1
triangle_input.sbtIndexOffsetBuffer = 0
gas_buffer_sizes = state.context.accelComputeMemoryUsage(
[ accel_options ],
[ triangle_input ]
)
d_temp_buffer = cp.cuda.alloc( gas_buffer_sizes.tempSizeInBytes )
d_output_buffer = cp.cuda.alloc( gas_buffer_sizes.outputSizeInBytes )
d_result = cp.array( [ 0 ], dtype = 'u8' )
emit_property = optix.AccelEmitDesc(
type = optix.PROPERTY_TYPE_COMPACTED_SIZE,
result = d_result.data.ptr
)
state.tri_gas_handle = state.context.accelBuild(
0, # CUDA stream
[ accel_options ],
[ triangle_input ],
d_temp_buffer.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[ emit_property ]
)
compacted_gas_size = cp.asnumpy( d_result )[0]
if compacted_gas_size < gas_buffer_sizes.outputSizeInBytes and False:
state.d_tri_gas_output_buffer = cp.cuda.alloc( compacted_gas_size )
state.tri_gas_handle = state.context.accelCompact(
0, #CUDA stream
state.tri_gas_handle,
state.d_tri_gas_output_buffer.ptr,
compacted_gas_size
)
else:
state.d_tri_gas_output_buffer = d_output_buffer
def build_sphere_gas( state ):
accel_options = optix.AccelBuildOptions(
buildFlags = optix.BUILD_FLAG_ALLOW_COMPACTION,
operation = optix.BUILD_OPERATION_BUILD
)
aabb = cp.array( [
-1.5, -1.0, -0.5,
-0.5, 0.0, 0.5
#-1.0, -1.0, -1.0,
# 1.0, 1.0, 1.0
], dtype = 'f4'
)
sphere_input = optix.BuildInputCustomPrimitiveArray(
aabbBuffers = [ aabb.data.ptr ],
numPrimitives = 1,
#flags = [ optix.GEOMETRY_FLAG_DISABLE_ANYHIT ],
flags = [ optix.GEOMETRY_FLAG_NONE],
numSbtRecords = 1
)
gas_buffer_sizes = state.context.accelComputeMemoryUsage(
[ accel_options ],
[ sphere_input ]
)
d_temp_buffer = cp.cuda.alloc( gas_buffer_sizes.tempSizeInBytes )
d_output_buffer = cp.cuda.alloc( gas_buffer_sizes.outputSizeInBytes )
d_result = cp.array( [ 0 ], dtype = 'u8' )
emit_property = optix.AccelEmitDesc(
type = optix.PROPERTY_TYPE_COMPACTED_SIZE,
result = d_result.data.ptr
)
state.sphere_gas_handle = state.context.accelBuild(
0, # CUDA stream
[ accel_options ],
[ sphere_input ],
d_temp_buffer.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[ emit_property ]
)
compacted_gas_size = cp.asnumpy( d_result )[0]
if compacted_gas_size < gas_buffer_sizes.outputSizeInBytes and False:
state.d_sphere_gas_output_buffer = cp.cuda.alloc( compacted_gas_size )
state.sphere_gas_handle = state.context.accelCompact(
0, #CUDA stream
state.sphere_gas_handle,
state.d_sphere_gas_output_buffer,
compacted_gas_size
)
else:
state.d_sphere_gas_output_buffer = d_output_buffer
def create_sphere_xform( state ):
motion_keys = [
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.5,
0.0, 0.0, 1.0, 0.0
]
motion_options = optix.MotionOptions()
motion_options.numKeys = 2
motion_options.timeBegin = 0.0
motion_options.timeEnd = 1.0
motion_options.flags = optix.MOTION_FLAG_NONE
motion_transform = optix.MatrixMotionTransform(
child = state.sphere_gas_handle,
motionOptions = motion_options,
transform = motion_keys
)
xform_bytes = optix.getDeviceRepresentation( motion_transform )
state.d_sphere_motion_transform = cp.array( np.frombuffer( xform_bytes, dtype='B' ) )
state.sphere_motion_transform_handle = optix.convertPointerToTraversableHandle(
state.context,
state.d_sphere_motion_transform.data.ptr,
optix.TRAVERSABLE_TYPE_MATRIX_MOTION_TRANSFORM
)
def build_ias( state ):
instance_xform = [
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0
]
sphere_instance = optix.Instance(
transform = instance_xform,
flags = optix.INSTANCE_FLAG_NONE,
instanceId = 0,
sbtOffset = 0,
visibilityMask = 1,
traversableHandle = state.sphere_motion_transform_handle
)
triangle_instance = optix.Instance(
transform = instance_xform,
flags = optix.INSTANCE_FLAG_NONE,
instanceId = 1,
sbtOffset = 1,
visibilityMask = 1,
traversableHandle = state.tri_gas_handle
)
instances = [ sphere_instance, triangle_instance ]
instances_bytes = optix.getDeviceRepresentation( instances )
d_instances = cp.array( np.frombuffer( instances_bytes, dtype='B' ) )
instance_input = optix.BuildInputInstanceArray(
instances = d_instances.data.ptr,
numInstances = len( instances )
)
motion_options = optix.MotionOptions()
motion_options.numKeys = 2
motion_options.timeBegin = 0.0
motion_options.timeEnd = 1.0
motion_options.flags = optix.MOTION_FLAG_NONE
accel_options = optix.AccelBuildOptions(
buildFlags = optix.BUILD_FLAG_NONE,
operation = optix.BUILD_OPERATION_BUILD,
motionOptions = motion_options
)
ias_buffer_sizes = state.context.accelComputeMemoryUsage(
[ accel_options ],
[ instance_input ]
)
d_temp_buffer = cp.cuda.alloc( ias_buffer_sizes.tempSizeInBytes )
state.d_ias_output_buffer = cp.cuda.alloc( ias_buffer_sizes.outputSizeInBytes )
state.ias_handle = state.context.accelBuild(
0, # CUDA stream
[ accel_options ],
[ instance_input ],
d_temp_buffer.ptr,
ias_buffer_sizes.tempSizeInBytes,
state.d_ias_output_buffer.ptr,
ias_buffer_sizes.outputSizeInBytes,
[] # emitted properties
)
def create_module( state ):
module_compile_options = optix.ModuleCompileOptions()
module_compile_options.maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT
module_compile_options.optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT
module_compile_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
state.pipeline_compile_options = optix.PipelineCompileOptions(
traversableGraphFlags = optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_ANY,
numPayloadValues = 3,
numAttributeValues = 3,
usesMotionBlur = True,
exceptionFlags = optix.EXCEPTION_FLAG_NONE,
pipelineLaunchParamsVariableName = "params"
)
simple_motion_blur_cu = os.path.join(os.path.dirname(__file__), 'simpleMotionBlur.cu')
simple_motion_blur_ptx = compile_cuda( simple_motion_blur_cu )
state.ptx_module, log = state.context.moduleCreateFromPTX(
module_compile_options,
state.pipeline_compile_options,
simple_motion_blur_ptx
)
def create_program_groups( state ):
raygen_program_group_desc = optix.ProgramGroupDesc(
raygenModule = state.ptx_module,
raygenEntryFunctionName = "__raygen__rg"
)
state.raygen_prog_group, log = state.context.programGroupCreate(
[ raygen_program_group_desc ]
)
print( "\tProgramGroup raygen create log: <<<{}>>>".format( log ) )
miss_prog_group_desc = optix.ProgramGroupDesc(
missModule = state.ptx_module,
missEntryFunctionName = "__miss__camera"
)
state.miss_group, log = state.context.programGroupCreate(
[ miss_prog_group_desc ]
)
print( "\tProgramGroup miss create log: <<<{}>>>".format( log ) )
hitgroup_prog_group_desc = optix.ProgramGroupDesc(
hitgroupModuleCH = state.ptx_module,
hitgroupEntryFunctionNameCH = "__closesthit__camera",
)
state.tri_hit_group, log = state.context.programGroupCreate(
[ hitgroup_prog_group_desc ]
)
print( "\tProgramGroup triangle hit create log: <<<{}>>>".format( log ) )
hitgroup_prog_group_desc.hitgroupModuleIS = state.ptx_module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameIS = "__intersection__sphere"
state.sphere_hit_group, log = state.context.programGroupCreate(
[ hitgroup_prog_group_desc ]
)
print( "\tProgramGroup sphere hit create log: <<<{}>>>".format( log ) )
def create_pipeline( state ):
program_groups = [
state.raygen_prog_group,
state.miss_group,
state.sphere_hit_group,
state.tri_hit_group
]
pipeline_link_options = optix.PipelineLinkOptions(
maxTraceDepth = 2,
debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
)
log = ""
state.pipeline = state.context.pipelineCreate(
state.pipeline_compile_options,
pipeline_link_options,
program_groups,
log
)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes( prog_group, stack_sizes )
( dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size ) = \
optix.util.computeStackSizes(
stack_sizes,
1, # maxTraceDepth
0, # maxCCDepth
0 # maxDCDepth
)
state.pipeline.setStackSize(
1024, #dc_stack_size_from_trav,
1024, #dc_stack_size_from_state,
1024, #cc_stack_size,
3 # maxTraversableDepth ( 3 since largest depth is IAS->MT->GAS )
)
def create_sbt( state ):
print( "Creating sbt ... " )
header_format = '{}V'.format( optix.SBT_RECORD_HEADER_SIZE )
#
# raygen record
#
formats = [ header_format ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header'],
'formats' : formats,
'itemsize' : itemsize,
'aligned' : True
} )
h_raygen_record = np.array(
[ optix.sbtRecordGetHeader( state.raygen_prog_group) ],
dtype = dtype
)
optix.sbtRecordPackHeader( state.raygen_prog_group, h_raygen_record )
state.d_raygen_record = array_to_device_memory( h_raygen_record )
#
# miss records
#
formats = [ header_format, 'f4','f4','f4', 'u4' ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : [ 'header', 'r', 'g', 'b', 'pad' ],
'formats' : formats,
'itemsize' : itemsize,
'aligned' : True
} )
h_miss_record = np.array( [ (
optix.sbtRecordGetHeader( state.miss_group ),
0.1, 0.1, 0.1,
0
) ],
dtype=dtype
)
optix.sbtRecordPackHeader( state.miss_group, h_miss_record )
state.d_miss_records = array_to_device_memory( h_miss_record )
#
# hit group records
#
formats = [
header_format,
'f4','f4','f4',
'f4','f4','f4',
'f4',
'u4'
]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
hit_record_dtype = np.dtype( {
'names' : [
'header',
'r','g','b',
'x','y','z',
'rad',
'pad'
],
'formats' : formats,
'itemsize' : itemsize,
'aligned' : True
} )
sphere_record_header = optix.sbtRecordGetHeader( state.sphere_hit_group )
triangle_record_header = optix.sbtRecordGetHeader( state.tri_hit_group )
h_hitgroup_records = np.array( [
(
sphere_record_header,
0.9, 0.1, 0.1,
-1.0, -0.5, 0.1,
0.5,
0.0
),
(
triangle_record_header,
0.1, 0.1, 0.9,
0.0, 0.0, 0.0, # unused
0.0, # unused
0.0
) ],
dtype=hit_record_dtype
)
state.d_hitgroup_records = array_to_device_memory( h_hitgroup_records )
state.sbt = optix.ShaderBindingTable(
raygenRecord = state.d_raygen_record.ptr,
missRecordBase = state.d_miss_records.ptr,
missRecordStrideInBytes = h_miss_record.dtype.itemsize,
missRecordCount = 1,
hitgroupRecordBase = state.d_hitgroup_records.ptr,
hitgroupRecordStrideInBytes = h_hitgroup_records.dtype.itemsize,
hitgroupRecordCount = 2
)
def launch( state ):
print( "Launching ... " )
pix_bytes = pix_width * pix_height * 4
h_accum = np.zeros( (pix_width, pix_height, 4 ), 'f4' )
h_accum[0:pix_width, 0:pix_height] = [255, 128, 0, 255]
d_accum = cp.array( h_accum )
h_frame = np.zeros( (pix_width, pix_height, 4 ), 'B' )
h_frame[0:pix_width, 0:pix_height] = [255, 128, 0, 255]
d_frame = cp.array( h_frame )
params = [
( 'u4', 'image_width', pix_width ),
( 'u4', 'image_height', pix_height ),
( 'u8', 'accum', d_accum.data.ptr ),
( 'u8', 'frame', d_frame.data.ptr ),
( 'u4', 'subframe index', 0 ),
( 'f4', 'cam_eye_x', 0 ),
( 'f4', 'cam_eye_y', 0 ),
( 'f4', 'cam_eye_z', 5.0 ),
( 'f4', 'cam_U_x', 1.10457 ),
( 'f4', 'cam_U_y', 0 ),
( 'f4', 'cam_U_z', 0 ),
( 'f4', 'cam_V_x', 0 ),
( 'f4', 'cam_V_y', 0.828427 ),
( 'f4', 'cam_V_z', 0 ),
( 'f4', 'cam_W_x', 0 ),
( 'f4', 'cam_W_y', 0 ),
( 'f4', 'cam_W_z', -2.0 ),
( 'u8', 'trav_handle', state.ias_handle )
#( 'u8', 'trav_handle', state.tri_gas_handle)
]
formats = [ x[0] for x in params ]
names = [ x[1] for x in params ]
values = [ x[2] for x in params ]
itemsize = get_aligned_itemsize( formats, 8 )
params_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'itemsize': itemsize,
'aligned' : True
} )
h_params = np.array( [ tuple(values) ], dtype=params_dtype )
d_params = array_to_device_memory( h_params )
stream = cp.cuda.Stream()
optix.launch(
state.pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
state.sbt,
pix_width,
pix_height,
1 # depth
)
stream.synchronize()
h_pix = cp.asnumpy( d_frame )
return h_pix
#-------------------------------------------------------------------------------
#
# main
#
#-------------------------------------------------------------------------------
def main():
state = State()
create_context ( state )
build_triangle_gas ( state )
build_sphere_gas ( state )
create_sphere_xform ( state )
build_ias ( state )
create_module ( state )
create_program_groups( state )
create_pipeline ( state )
create_sbt ( state )
pix = launch( state )
print( "Total number of log messages: {}".format( logger.num_mssgs ) )
pix = pix.reshape( ( pix_height, pix_width, 4 ) ) # PIL expects [ y, x ] resolution
img = ImageOps.flip( Image.fromarray( pix, 'RGBA' ) ) # PIL expects y = 0 at bottom
img.show()
img.save( 'my.png' )
if __name__ == "__main__":
main()
| otk-pyoptix-master | examples/simpleMotionBlur.py |
#!/usr/bin/env python3
# Copyright (c) 2022 NVIDIA CORPORATION All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import optix
import os
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import array
import ctypes # C interop helpers
from PIL import Image, ImageOps # Image IO
from pynvrtc.compiler import Program
import path_util
#-------------------------------------------------------------------------------
#
# Util
#
#-------------------------------------------------------------------------------
pix_width = 1024
pix_height = 768
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def round_up( val, mult_of ):
return val if val % mult_of == 0 else val + mult_of - val % mult_of
def get_aligned_itemsize( formats, alignment ):
names = []
for i in range( len(formats ) ):
names.append( 'x'+str(i) )
temp_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'align' : True
} )
return round_up( temp_dtype.itemsize, alignment )
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def array_to_device_memory( numpy_array, stream=cp.cuda.Stream() ):
byte_size = numpy_array.size*numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p( numpy_array.ctypes.data )
d_mem = cp.cuda.memory.alloc( byte_size )
d_mem.copy_from_async( h_ptr, byte_size, stream )
return d_mem
def compile_cuda( cuda_file ):
with open( cuda_file, 'rb' ) as f:
src = f.read()
nvrtc_dll = os.environ.get('NVRTC_DLL')
if nvrtc_dll is None:
nvrtc_dll = ''
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program( src.decode(), cuda_file,
lib_name= nvrtc_dll )
compile_options = [
'-use_fast_math',
'-lineinfo',
'-default-device',
'-std=c++11',
'-rdc',
'true',
#'-IC:\\Program Files\\NVIDIA GPU Computing Toolkit\CUDA\\v11.1\include'
f'-I{path_util.cuda_tk_path}',
f'-I{path_util.include_path}'
]
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
if (optix.version()[1] == 0):
compile_options.append( f'-I{path_util.stddef_path}' )
ptx = prog.compile( compile_options )
return ptx
#-------------------------------------------------------------------------------
#
# Optix setup
#
#-------------------------------------------------------------------------------
def init_optix():
print( "Initializing cuda ..." )
cp.cuda.runtime.free( 0 )
print( "Initializing optix ..." )
optix.init()
def create_ctx():
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix.version()[1] >= 2:
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
def create_accel( ctx ):
accel_options = optix.AccelBuildOptions(
buildFlags = int( optix.BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS),
operation = optix.BUILD_OPERATION_BUILD
)
global vertices
vertices = cp.array( [
-0.5, -0.5, 0.0,
0.5, -0.5, 0.0,
0.0, 0.5, 0.0
], dtype = 'f4')
triangle_input_flags = [ optix.GEOMETRY_FLAG_NONE ]
triangle_input = optix.BuildInputTriangleArray()
triangle_input.vertexFormat = optix.VERTEX_FORMAT_FLOAT3
triangle_input.numVertices = len( vertices )
triangle_input.vertexBuffers = [ vertices.data.ptr ]
triangle_input.flags = triangle_input_flags
triangle_input.numSbtRecords = 1;
gas_buffer_sizes = ctx.accelComputeMemoryUsage( [accel_options], [triangle_input] )
d_temp_buffer_gas = cp.cuda.alloc( gas_buffer_sizes.tempSizeInBytes )
d_gas_output_buffer = cp.cuda.alloc( gas_buffer_sizes.outputSizeInBytes)
gas_handle = ctx.accelBuild(
0, # CUDA stream
[ accel_options ],
[ triangle_input ],
d_temp_buffer_gas.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_gas_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[] # emitted properties
)
return (gas_handle, d_gas_output_buffer)
def set_pipeline_options():
if optix.version()[1] >= 2:
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ),
numPayloadValues = 3,
numAttributeValues = 3,
exceptionFlags = int( optix.EXCEPTION_FLAG_NONE ),
pipelineLaunchParamsVariableName = "params",
usesPrimitiveTypeFlags = optix.PRIMITIVE_TYPE_FLAGS_TRIANGLE
)
else:
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ),
numPayloadValues = 3,
numAttributeValues = 3,
exceptionFlags = int( optix.EXCEPTION_FLAG_NONE ),
pipelineLaunchParamsVariableName = "params"
)
def create_module( ctx, pipeline_options, triangle_ptx ):
print( "Creating optix module ..." )
module_options = optix.ModuleCompileOptions(
maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT,
debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
)
module, log = ctx.moduleCreateFromPTX(
module_options,
pipeline_options,
triangle_ptx
)
print( "\tModule create log: <<<{}>>>".format( log ) )
return module
def create_program_groups( ctx, module ):
print( "Creating program groups ... " )
raygen_prog_group_desc = optix.ProgramGroupDesc()
raygen_prog_group_desc.raygenModule = module
raygen_prog_group_desc.raygenEntryFunctionName = "__raygen__rg"
raygen_prog_group, log = ctx.programGroupCreate(
[ raygen_prog_group_desc ]
)
print( "\tProgramGroup raygen create log: <<<{}>>>".format( log ) )
miss_prog_group_desc = optix.ProgramGroupDesc()
miss_prog_group_desc.missModule = module
miss_prog_group_desc.missEntryFunctionName = "__miss__ms"
program_group_options = optix.ProgramGroupOptions()
miss_prog_group, log = ctx.programGroupCreate(
[ miss_prog_group_desc ]
)
print( "\tProgramGroup miss create log: <<<{}>>>".format( log ) )
hitgroup_prog_group_desc = optix.ProgramGroupDesc()
hitgroup_prog_group_desc.hitgroupModuleCH = module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__ch"
hitgroup_prog_group, log = ctx.programGroupCreate(
[ hitgroup_prog_group_desc ]
)
print( "\tProgramGroup hitgroup create log: <<<{}>>>".format( log ) )
return [ raygen_prog_group, miss_prog_group, hitgroup_prog_group ]
def create_pipeline( ctx, program_groups, pipeline_compile_options ):
print( "Creating pipeline ... " )
max_trace_depth = 1
pipeline_link_options = optix.PipelineLinkOptions()
pipeline_link_options.maxTraceDepth = max_trace_depth
pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
log = ""
pipeline = ctx.pipelineCreate(
pipeline_compile_options,
pipeline_link_options,
program_groups,
log)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes( prog_group, stack_sizes )
(dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size) = \
optix.util.computeStackSizes(
stack_sizes,
max_trace_depth,
0, # maxCCDepth
0 # maxDCDepth
)
pipeline.setStackSize(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
1 # maxTraversableDepth
)
return pipeline
def create_sbt( prog_groups ):
print( "Creating sbt ... " )
(raygen_prog_group, miss_prog_group, hitgroup_prog_group ) = prog_groups
global d_raygen_sbt
global d_miss_sbt
header_format = '{}B'.format( optix.SBT_RECORD_HEADER_SIZE )
#
# raygen record
#
formats = [ header_format ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_raygen_sbt = np.array( [ 0 ], dtype=dtype )
optix.sbtRecordPackHeader( raygen_prog_group, h_raygen_sbt )
global d_raygen_sbt
d_raygen_sbt = array_to_device_memory( h_raygen_sbt )
#
# miss record
#
formats = [ header_format, 'f4', 'f4', 'f4']
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'r', 'g', 'b' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_miss_sbt = np.array( [ (0, 0.3, 0.1, 0.2) ], dtype=dtype )
optix.sbtRecordPackHeader( miss_prog_group, h_miss_sbt )
global d_miss_sbt
d_miss_sbt = array_to_device_memory( h_miss_sbt )
#
# hitgroup record
#
formats = [ header_format ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_hitgroup_sbt = np.array( [ (0) ], dtype=dtype )
optix.sbtRecordPackHeader( hitgroup_prog_group, h_hitgroup_sbt )
global d_hitgroup_sbt
d_hitgroup_sbt = array_to_device_memory( h_hitgroup_sbt )
return optix.ShaderBindingTable(
raygenRecord = d_raygen_sbt.ptr,
missRecordBase = d_miss_sbt.ptr,
missRecordStrideInBytes = h_miss_sbt.dtype.itemsize,
missRecordCount = 1,
hitgroupRecordBase = d_hitgroup_sbt.ptr,
hitgroupRecordStrideInBytes = h_hitgroup_sbt.dtype.itemsize,
hitgroupRecordCount = 1
)
def launch( pipeline, sbt, trav_handle ):
print( "Launching ... " )
pix_bytes = pix_width*pix_height*4
h_pix = np.zeros( (pix_width,pix_height,4), 'B' )
h_pix[0:pix_width, 0:pix_height] = [255, 128, 0, 255]
d_pix = cp.array( h_pix )
params = [
( 'u8', 'image', d_pix.data.ptr ),
( 'u4', 'image_width', pix_width ),
( 'u4', 'image_height', pix_height ),
( 'f4', 'cam_eye_x', 0 ),
( 'f4', 'cam_eye_y', 0 ),
( 'f4', 'cam_eye_z', 2.0 ),
( 'f4', 'cam_U_x', 1.10457 ),
( 'f4', 'cam_U_y', 0 ),
( 'f4', 'cam_U_z', 0 ),
( 'f4', 'cam_V_x', 0 ),
( 'f4', 'cam_V_y', 0.828427 ),
( 'f4', 'cam_V_z', 0 ),
( 'f4', 'cam_W_x', 0 ),
( 'f4', 'cam_W_y', 0 ),
( 'f4', 'cam_W_z', -2.0 ),
( 'u8', 'trav_handle', trav_handle )
]
formats = [ x[0] for x in params ]
names = [ x[1] for x in params ]
values = [ x[2] for x in params ]
itemsize = get_aligned_itemsize( formats, 8 )
params_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_params = np.array( [ tuple(values) ], dtype=params_dtype )
d_params = array_to_device_memory( h_params )
stream = cp.cuda.Stream()
optix.launch(
pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
sbt,
pix_width,
pix_height,
1 # depth
)
stream.synchronize()
h_pix = cp.asnumpy( d_pix )
return h_pix
#-------------------------------------------------------------------------------
#
# main
#
#-------------------------------------------------------------------------------
def main():
triangle_cu = os.path.join(os.path.dirname(__file__), 'triangle.cu')
triangle_ptx = compile_cuda( triangle_cu )
ctx = create_ctx()
gas_handle, d_gas_output_buffer = create_accel(ctx)
pipeline_options = set_pipeline_options()
module = create_module( ctx, pipeline_options, triangle_ptx )
prog_groups = create_program_groups( ctx, module )
pipeline = create_pipeline( ctx, prog_groups, pipeline_options )
sbt = create_sbt( prog_groups )
pix = launch( pipeline, sbt, gas_handle )
print( "Total number of log messages: {}".format( logger.num_mssgs ) )
pix = pix.reshape( ( pix_height, pix_width, 4 ) ) # PIL expects [ y, x ] resolution
img = ImageOps.flip( Image.fromarray( pix, 'RGBA' ) ) # PIL expects y = 0 at bottom
img.show()
img.save( 'my.png' )
if __name__ == "__main__":
main()
| otk-pyoptix-master | examples/triangle.py |
#!/usr/bin/env python3
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from signal import default_int_handler
from telnetlib import DEBUGLEVEL
import optix
import os
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import array
import ctypes # C interop helpers
from PIL import Image, ImageOps # Image IO
from pynvrtc.compiler import Program
import path_util
#-------------------------------------------------------------------------------
#
# Util
#
#-------------------------------------------------------------------------------
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def round_up( val, mult_of ):
return val if val % mult_of == 0 else val + mult_of - val % mult_of
def get_aligned_itemsize( formats, alignment ):
names = []
for i in range( len(formats ) ):
names.append( 'x'+str(i) )
temp_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'align' : True
} )
return round_up( temp_dtype.itemsize, alignment )
def array_to_device_memory( numpy_array, stream=cp.cuda.Stream() ):
byte_size = numpy_array.size*numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p( numpy_array.ctypes.data )
d_mem = cp.cuda.memory.alloc( byte_size )
d_mem.copy_from_async( h_ptr, byte_size, stream )
return d_mem
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def compile_cuda( cuda_file ):
with open( cuda_file, 'rb' ) as f:
src = f.read()
nvrtc_dll = os.environ.get('NVRTC_DLL')
if nvrtc_dll is None:
nvrtc_dll = ''
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program( src.decode(), cuda_file,
lib_name= nvrtc_dll )
compile_options = [
'-use_fast_math',
'-lineinfo',
'-default-device',
'-std=c++11',
'-rdc',
'true',
f'-I{path_util.include_path}',
f'-I{path_util.cuda_tk_path}'
]
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
#if (optix.version()[1] == 0):
if (path_util.stddef_path):
compile_options.append( f'-I{path_util.stddef_path}' )
print("pynvrtc compile options = {}".format(compile_options))
ptx = prog.compile( compile_options )
return ptx
#-------------------------------------------------------------------------------
#
# Optix setup
#
#-------------------------------------------------------------------------------
width = 1024
height = 768
def create_ctx():
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix.version()[1] >= 2:
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
device_context = create_ctx()
def create_accel():
accel_options = optix.AccelBuildOptions(
buildFlags = int( optix.BUILD_FLAG_ALLOW_RANDOM_VERTEX_ACCESS),
operation = optix.BUILD_OPERATION_BUILD
)
radius = 0.4
global vertices
vertices = cp.array( [
-1.5, -3.5, 0.0,
-1.0, 0.5, 0.0,
1.0, 0.5, 0.0,
1.5, -3.5, 0.0
], dtype = 'f4' )
global widths
widths = cp.array( [
0.01, radius, radius, 0.01
], dtype = 'f4' )
global segment_indices
segment_indices = cp.array( [ 0 ], dtype = 'int' )
curve_input = optix.BuildInputCurveArray()
curve_input.numPrimitives = 1
curve_input.numVertices = len( vertices )
curve_input.vertexBuffers = [ vertices.data.ptr ]
curve_input.widthBuffers = [ widths.data.ptr ]
curve_input.normalBuffers = [ 0 ]
curve_input.indexBuffer = segment_indices.data.ptr
curve_input.curveType = optix.PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE
curve_input.flag = optix.GEOMETRY_FLAG_NONE
curve_input.primitiveIndexOffset = 0
gas_buffer_sizes = device_context.accelComputeMemoryUsage( [accel_options], [curve_input] )
d_temp_buffer_gas = cp.cuda.alloc( gas_buffer_sizes.tempSizeInBytes )
d_gas_output_buffer = cp.cuda.alloc( gas_buffer_sizes.outputSizeInBytes )
gas_handle = device_context.accelBuild(
0, # CUDA stream
[ accel_options ],
[ curve_input ],
d_temp_buffer_gas.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_gas_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[] # emitted properties
)
return ( gas_handle, d_gas_output_buffer )
gas_handle, d_gas_output_buffer = create_accel()
def set_pipeline_options():
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ),
numPayloadValues = 3,
numAttributeValues = 1,
exceptionFlags = int( optix.EXCEPTION_FLAG_NONE ),
pipelineLaunchParamsVariableName = "params",
usesPrimitiveTypeFlags = int( optix.PRIMITIVE_TYPE_FLAGS_ROUND_CUBIC_BSPLINE )
)
pipeline_compile_options = set_pipeline_options()
def create_module():
print( "Creating optix module ..." )
module_compile_options = optix.ModuleCompileOptions(
maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT,
debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
)
intersector_options = optix.BuiltinISOptions(
builtinISModuleType = optix.PRIMITIVE_TYPE_ROUND_CUBIC_BSPLINE,
usesMotionBlur = False
)
device_context.builtinISModuleGet(
module_compile_options,
pipeline_compile_options,
intersector_options
)
curves_cu = os.path.join(os.path.dirname(__file__), 'curves.cu' )
curves_ptx = compile_cuda( curves_cu )
shading_module, log = device_context.moduleCreateFromPTX(
module_compile_options,
pipeline_compile_options,
curves_ptx
)
geometry_module = device_context.builtinISModuleGet(
module_compile_options,
pipeline_compile_options,
intersector_options
)
print( "\tModule create log: <<<{}>>>".format( log ) )
return geometry_module, shading_module
geometry_module, shading_module = create_module()
def create_program_groups():
print( "Creating program groups ... " )
raygen_prog_group_desc = optix.ProgramGroupDesc()
raygen_prog_group_desc.raygenModule = shading_module
raygen_prog_group_desc.raygenEntryFunctionName = "__raygen__rg"
raygen_prog_group, log = device_context.programGroupCreate(
[ raygen_prog_group_desc ]
)
print( "\tProgramGroup raygen create log: <<<{}>>>".format( log ) )
miss_prog_group_desc = optix.ProgramGroupDesc()
miss_prog_group_desc.missModule = shading_module
miss_prog_group_desc.missEntryFunctionName = "__miss__ms"
miss_prog_group, log = device_context.programGroupCreate(
[ miss_prog_group_desc ]
)
print( "\tProgramGroup miss create log: <<<{}>>>".format( log ) )
hitgroup_prog_group_desc = optix.ProgramGroupDesc()
hitgroup_prog_group_desc.hitgroupModuleCH = shading_module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__ch"
hitgroup_prog_group_desc.hitgroupModuleIS = geometry_module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameIS = "" # supplied by built-in module
hitgroup_prog_group, log = device_context.programGroupCreate(
[ hitgroup_prog_group_desc ]
)
print( "\tProgramGroup hitgroup create log: <<<{}>>>".format( log ) )
return [ raygen_prog_group, miss_prog_group, hitgroup_prog_group ]
program_groups = create_program_groups()
def create_pipeline():
print( "Creating pipeline ... " )
max_trace_depth = 1
pipeline_link_options = optix.PipelineLinkOptions()
pipeline_link_options.maxTraceDepth = max_trace_depth
pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
log = ""
pipeline = device_context.pipelineCreate(
pipeline_compile_options,
pipeline_link_options,
program_groups,
log
)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes( prog_group, stack_sizes )
( dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size ) = \
optix.util.computeStackSizes(
stack_sizes,
max_trace_depth,
0, # maxCCDepth
0, # maxDCDepth
)
pipeline.setStackSize(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
1 # maxTraversableDepth
)
return pipeline
pipeline = create_pipeline()
def create_sbt():
print( "Creating sbt ... " )
( raygen_prog_group, miss_prog_group, hitgroup_prog_group ) = program_groups
global d_raygen_sbt
global d_miss_sbt
header_format = '{}B'.format( optix.SBT_RECORD_HEADER_SIZE )
#
# raygen record
#
formats = [ header_format ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_raygen_sbt = np.array( [ 0 ], dtype=dtype )
optix.sbtRecordPackHeader( raygen_prog_group, h_raygen_sbt )
global d_raygen_sbt
d_raygen_sbt = array_to_device_memory( h_raygen_sbt )
#
# miss record
#
formats = [ header_format, 'f4', 'f4', 'f4']
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'r', 'g', 'b' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_miss_sbt = np.array( [ (0, 0.0, 0.2, 0.6) ], dtype=dtype )
optix.sbtRecordPackHeader( miss_prog_group, h_miss_sbt )
global d_miss_sbt
d_miss_sbt = array_to_device_memory( h_miss_sbt )
#
# hitgroup record
#
formats = [ header_format ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header' ],
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_hitgroup_sbt = np.array( [ (0) ], dtype=dtype )
optix.sbtRecordPackHeader( hitgroup_prog_group, h_hitgroup_sbt )
global d_hitgroup_sbt
d_hitgroup_sbt = array_to_device_memory( h_hitgroup_sbt )
return optix.ShaderBindingTable(
raygenRecord = d_raygen_sbt.ptr,
missRecordBase = d_miss_sbt.ptr,
missRecordStrideInBytes = d_miss_sbt.mem.size,
missRecordCount = 1,
hitgroupRecordBase = d_hitgroup_sbt.ptr,
hitgroupRecordStrideInBytes = d_hitgroup_sbt.mem.size,
hitgroupRecordCount = 1
)
sbt = create_sbt()
def launch():
print( "Launching ... " )
width = 1024
height = 768
pix_bytes = width * height
h_pix = np.zeros( ( width, height,4), 'B' )
h_pix[0:width, 0:height] = [255, 128, 0, 255]
d_pix = cp.array( h_pix )
params = [
( 'u8', 'image', d_pix.data.ptr ),
( 'u4', 'image_width', width ),
( 'u4', 'image_height', height ),
( 'f4', 'cam_eye_x', 0 ),
( 'f4', 'cam_eye_y', 0 ),
( 'f4', 'cam_eye_z', 2.0 ),
( 'f4', 'cam_U_x', 1.10457 ),
( 'f4', 'cam_U_y', 0 ),
( 'f4', 'cam_U_z', 0 ),
( 'f4', 'cam_V_x', 0 ),
( 'f4', 'cam_V_y', 0.828427 ),
( 'f4', 'cam_V_z', 0 ),
( 'f4', 'cam_W_x', 0 ),
( 'f4', 'cam_W_y', 0 ),
( 'f4', 'cam_W_z', -2.0 ),
( 'u8', 'trav_handle', gas_handle )
]
formats = [ x[0] for x in params ]
names = [ x[1] for x in params ]
values = [ x[2] for x in params ]
itemsize = get_aligned_itemsize( formats, 8 )
params_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_params = np.array( [ tuple(values) ], dtype=params_dtype )
d_params = array_to_device_memory( h_params )
stream = cp.cuda.Stream()
optix.launch(
pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
sbt,
width,
height,
1 # depth
)
stream.synchronize()
h_pix = cp.asnumpy( d_pix )
return h_pix
pix = launch()
print( "Total number of log messages: {}".format( logger.num_mssgs ) )
pix = pix.reshape( ( height, width, 4 ) ) # PIL expects [ y, x ] resolution
img = ImageOps.flip( Image.fromarray( pix, 'RGBA' ) ) # PIL expects y = 0 at bottom
img.show()
img.save( 'my.png' )
| otk-pyoptix-master | examples/curves.py |
#!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import optix
import os
import cupy as cp # CUDA bindings
import numpy as np # Packing of structures in C-compatible format
import array
import ctypes # C interop helpers
from PIL import Image, ImageOps # Image IO
from pynvrtc.compiler import Program
import path_util
#-------------------------------------------------------------------------------
#
# Util
#
#-------------------------------------------------------------------------------
pix_width = 1024
pix_height = 768
class Logger:
def __init__( self ):
self.num_mssgs = 0
def __call__( self, level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
self.num_mssgs += 1
def log_callback( level, tag, mssg ):
print( "[{:>2}][{:>12}]: {}".format( level, tag, mssg ) )
def round_up( val, mult_of ):
return val if val % mult_of == 0 else val + mult_of - val % mult_of
def get_aligned_itemsize( formats, alignment ):
names = []
for i in range( len(formats ) ):
names.append( 'x'+str(i) )
temp_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'align' : True
} )
return round_up( temp_dtype.itemsize, alignment )
def optix_version_gte( version ):
if optix.version()[0] > version[0]:
return True
if optix.version()[0] == version[0] and optix.version()[1] >= version[1]:
return True
return False
def array_to_device_memory( numpy_array, stream=cp.cuda.Stream() ):
byte_size = numpy_array.size*numpy_array.dtype.itemsize
h_ptr = ctypes.c_void_p( numpy_array.ctypes.data )
d_mem = cp.cuda.memory.alloc( byte_size )
d_mem.copy_from_async( h_ptr, byte_size, stream )
return d_mem
def compile_cuda( cuda_file ):
with open( cuda_file, 'rb' ) as f:
src = f.read()
nvrtc_dll = os.environ.get('NVRTC_DLL')
if nvrtc_dll is None:
nvrtc_dll = ''
print("NVRTC_DLL = {}".format(nvrtc_dll))
prog = Program( src.decode(), cuda_file,
lib_name= nvrtc_dll )
compile_options = [
'-use_fast_math',
'-lineinfo',
'-default-device',
'-std=c++11',
'-rdc',
'true',
#'-IC:\\Program Files\\NVIDIA GPU Computing Toolkit\CUDA\\v11.1\include'
f'-I{path_util.cuda_tk_path}',
f'-I{path_util.include_path}'
]
# Optix 7.0 compiles need path to system stddef.h
# the value of optix.stddef_path is compiled in constant. When building
# the module, the value can be specified via an environment variable, e.g.
# export PYOPTIX_STDDEF_DIR="/usr/include/linux"
if (optix.version()[1] == 0):
compile_options.append( f'-I{path_util.stddef_path}' )
ptx = prog.compile( compile_options )
return ptx
#-------------------------------------------------------------------------------
#
# Optix setup
#
#-------------------------------------------------------------------------------
def create_ctx():
print( "Creating optix device context ..." )
# Note that log callback data is no longer needed. We can
# instead send a callable class instance as the log-function
# which stores any data needed
global logger
logger = Logger()
# OptiX param struct fields can be set with optional
# keyword constructor arguments.
ctx_options = optix.DeviceContextOptions(
logCallbackFunction = logger,
logCallbackLevel = 4
)
# They can also be set and queried as properties on the struct
if optix.version()[1] >= 2:
ctx_options.validationMode = optix.DEVICE_CONTEXT_VALIDATION_MODE_ALL
cu_ctx = 0
return optix.deviceContextCreate( cu_ctx, ctx_options )
def create_accel( ctx ):
accel_options = optix.AccelBuildOptions(
buildFlags = int( optix.BUILD_FLAG_ALLOW_COMPACTION ),
operation = optix.BUILD_OPERATION_BUILD
)
aabb = cp.array( [
-1.5, -1.5, -1.5,
1.5, 1.5, 1.5 ],
dtype = 'f4' )
aabb_input_flags = [ optix.GEOMETRY_FLAG_NONE ]
aabb_input = optix.BuildInputCustomPrimitiveArray(
aabbBuffers = [ aabb.data.ptr ],
numPrimitives = 1,
flags = aabb_input_flags,
numSbtRecords = 1,
)
gas_buffer_sizes = ctx.accelComputeMemoryUsage( [accel_options], [aabb_input] )
d_temp_buffer_gas = cp.cuda.alloc( gas_buffer_sizes.tempSizeInBytes )
d_gas_output_buffer = cp.cuda.alloc( gas_buffer_sizes.outputSizeInBytes )
gas_handle = ctx.accelBuild(
0, # CUDA stream
[ accel_options ],
[ aabb_input ],
d_temp_buffer_gas.ptr,
gas_buffer_sizes.tempSizeInBytes,
d_gas_output_buffer.ptr,
gas_buffer_sizes.outputSizeInBytes,
[] # emitted properties
)
return ( gas_handle, d_gas_output_buffer )
def set_pipeline_options():
if optix.version()[1] >= 2:
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ),
numPayloadValues = 3,
numAttributeValues = 4,
exceptionFlags = int( optix.EXCEPTION_FLAG_NONE ),
pipelineLaunchParamsVariableName = "params",
usesPrimitiveTypeFlags = optix.PRIMITIVE_TYPE_FLAGS_CUSTOM
)
else:
return optix.PipelineCompileOptions(
usesMotionBlur = False,
traversableGraphFlags = int( optix.TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_GAS ),
numPayloadValues = 3,
numAttributeValues = 4,
exceptionFlags = int( optix.EXCEPTION_FLAG_NONE ),
pipelineLaunchParamsVariableName = "params",
)
def create_module( ctx, pipeline_options, sphere_ptx ):
print( "Creating OptiX module ..." )
module_options = optix.ModuleCompileOptions(
maxRegisterCount = optix.COMPILE_DEFAULT_MAX_REGISTER_COUNT,
optLevel = optix.COMPILE_OPTIMIZATION_DEFAULT,
debugLevel = optix.COMPILE_DEBUG_LEVEL_DEFAULT
)
module, log = ctx.moduleCreateFromPTX(
module_options,
pipeline_options,
sphere_ptx
)
print( "\tModule create log: <<<{}>>>".format( log ) )
return module
def create_program_groups( ctx, module ):
print( "Creating program groups ... " )
raygen_program_desc = optix.ProgramGroupDesc()
raygen_program_desc.raygenModule = module
raygen_program_desc.raygenEntryFunctionName = "__raygen__rg"
raygen_prog_group, log = ctx.programGroupCreate(
[ raygen_program_desc ]
)
print( "\tProgramGroup raygen create log: <<<{}>>>".format( log ) )
miss_prog_group_desc = optix.ProgramGroupDesc()
miss_prog_group_desc.missModule = module
miss_prog_group_desc.missEntryFunctionName = "__miss__ms"
miss_prog_group, log = ctx.programGroupCreate(
[ miss_prog_group_desc ]
)
print( "\tProgramGroup mis create log: <<<{}>>>".format( log ) )
hitgroup_prog_group_desc = optix.ProgramGroupDesc()
hitgroup_prog_group_desc.hitgroupModuleCH = module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameCH = "__closesthit__ch"
hitgroup_prog_group_desc.hitgroupModuleIS = module
hitgroup_prog_group_desc.hitgroupEntryFunctionNameIS = "__intersection__sphere"
hitgroup_prog_group, log = ctx.programGroupCreate(
[ hitgroup_prog_group_desc ]
)
print( "\tProgramGroup hitgroup create log: <<<{}>>>".format( log ) )
return [ raygen_prog_group, miss_prog_group, hitgroup_prog_group ]
def create_pipeline( ctx, program_groups, pipeline_compile_options ):
print( "Creating pipeline ... " )
max_trace_depth = 1
pipeline_link_options = optix.PipelineLinkOptions()
pipeline_link_options.maxTraceDepth = max_trace_depth
pipeline_link_options.debugLevel = optix.COMPILE_DEBUG_LEVEL_FULL
log = ""
pipeline = ctx.pipelineCreate(
pipeline_compile_options,
pipeline_link_options,
program_groups,
log)
stack_sizes = optix.StackSizes()
for prog_group in program_groups:
optix.util.accumulateStackSizes( prog_group, stack_sizes )
( dc_stack_size_from_trav, dc_stack_size_from_state, cc_stack_size ) = \
optix.util.computeStackSizes(
stack_sizes,
max_trace_depth,
0, # maxCCDepth
0 # maxDCDepth
)
pipeline.setStackSize(
dc_stack_size_from_trav,
dc_stack_size_from_state,
cc_stack_size,
1 # maxTraversableDepth
)
return pipeline
def create_sbt( prog_groups ):
print( "Creating sbt ... " )
(raygen_prog_group, miss_prog_group, hitgroup_prog_group ) = prog_groups
header_format = '{}B'.format( optix.SBT_RECORD_HEADER_SIZE )
#
# raygen record
#
formats = [ header_format, 'f4','f4','f4',
'f4','f4','f4',
'f4','f4','f4',
'f4','f4','f4' ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'eye_x','eye_y','eye_z',
'u_x', 'u_y', 'u_z',
'v_x', 'v_y', 'v_z',
'w_x', 'w_y', 'w_z'
],
'formats' : formats,
'itemsize' : itemsize,
'align' : True
})
h_raygen_sbt = np.array( [ ( 0, 0.0, 0.0, 3.0,
2.31, -0.0, 0.0,
0.0, 1.73, 0.0,
0.0, 0.0, -3.0
) ], dtype = dtype )
optix.sbtRecordPackHeader( raygen_prog_group, h_raygen_sbt )
global d_raygen_sbt
d_raygen_sbt = array_to_device_memory( h_raygen_sbt )
#
# miss record
#
formats = [ header_format, 'f4', 'f4', 'f4' ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'r', 'g', 'b' ],
'formats' : formats,
'itemsize' : itemsize,
'align' : True
})
h_miss_sbt = np.array( [ (0, 0.3, 0.1, 0.2) ], dtype = dtype )
optix.sbtRecordPackHeader( miss_prog_group, h_miss_sbt )
global d_miss_sbt
d_miss_sbt = array_to_device_memory( h_miss_sbt )
#
# hitgroup record
#
formats = [ header_format, 'f4', 'f4', 'f4', 'f4' ]
itemsize = get_aligned_itemsize( formats, optix.SBT_RECORD_ALIGNMENT )
dtype = np.dtype( {
'names' : ['header', 'center_x', 'center_y', 'center_z', 'radius' ],
'formats' : formats,
'itemsize' : itemsize,
'align' : True
} )
h_hitgroup_sbt = np.array( [ ( 0, 0.0, 0.0, 0.0, 1.5) ], dtype=dtype )
optix.sbtRecordPackHeader( hitgroup_prog_group, h_hitgroup_sbt )
global d_hitgroup_sbt
d_hitgroup_sbt = array_to_device_memory( h_hitgroup_sbt )
return optix.ShaderBindingTable(
raygenRecord = d_raygen_sbt.ptr,
missRecordBase = d_miss_sbt.ptr,
missRecordStrideInBytes = h_miss_sbt.dtype.itemsize,
missRecordCount = 1,
hitgroupRecordBase = d_hitgroup_sbt.ptr,
hitgroupRecordStrideInBytes = h_hitgroup_sbt.dtype.itemsize,
hitgroupRecordCount = 1
)
def launch( pipeline, sbt, trav_handle ):
print( "Launching ... " )
pix_bytes = pix_width*pix_height*4
h_pix = np.zeros( (pix_width, pix_height, 4 ), 'B' )
h_pix[0:pix_width, 0:pix_height] = [255, 128, 0, 255]
d_pix = cp.array( h_pix )
params = [
( 'u8', 'image', d_pix.data.ptr ),
( 'u4', 'image_width', pix_width ),
( 'u4', 'image_height', pix_height ),
( 'u4', 'origin_x', pix_width / 2 ),
( 'u4', 'origin_y', pix_height / 2 ),
( 'u8', 'trav_handle', trav_handle )
]
formats = [ x[0] for x in params ]
names = [ x[1] for x in params ]
values = [ x[2] for x in params ]
itemsize = get_aligned_itemsize( formats, 8 )
params_dtype = np.dtype( {
'names' : names,
'formats' : formats,
'itemsize': itemsize,
'align' : True
} )
h_params = np.array( [ tuple(values) ], dtype=params_dtype )
d_params = array_to_device_memory( h_params )
stream = cp.cuda.Stream()
optix.launch(
pipeline,
stream.ptr,
d_params.ptr,
h_params.dtype.itemsize,
sbt,
pix_width,
pix_height,
1 # depth
)
stream.synchronize()
h_pix = cp.asnumpy( d_pix )
return h_pix
#-------------------------------------------------------------------------------
#
# main
#
#-------------------------------------------------------------------------------
def main():
sphere_cu = os.path.join(os.path.dirname(__file__), 'sphere.cu')
sphere_ptx = compile_cuda( sphere_cu )
ctx = create_ctx()
gas_handle, d_gas_output_buffer = create_accel(ctx)
pipeline_options = set_pipeline_options()
module = create_module( ctx, pipeline_options, sphere_ptx )
prog_groups = create_program_groups( ctx, module )
pipeline = create_pipeline( ctx, prog_groups, pipeline_options )
sbt = create_sbt( prog_groups )
pix = launch( pipeline, sbt, gas_handle )
print( "Total number of log messages: {}".format( logger.num_mssgs ) )
pix = pix.reshape( ( pix_height, pix_width, 4 ) ) # PIL expects [ y, x ] resolution
img = ImageOps.flip( Image.fromarray(pix , 'RGBA' ) ) # PIL expects y = 0 at bottom
img.show()
img.save( 'my.png' )
if __name__ == "__main__":
main()
| otk-pyoptix-master | examples/sphere.py |
# Use this file to bootstrap packman into your Python environment (3.7.x). Simply
# add the path by doing sys.insert to where packmanconf.py is located and then execute:
#
# >>> import packmanconf
# >>> packmanconf.init()
#
# It will use the configured remote(s) and the version of packman in the same folder,
# giving you full access to the packman API via the following module
#
# >> import packmanapi
# >> dir(packmanapi)
import os
import platform
import sys
def init():
"""Call this function to initialize the packman configuration.
Calls to the packman API will work after successfully calling this function.
Note:
This function only needs to be called once during the execution of your
program. Calling it repeatedly is harmless but wasteful.
Compatibility with your Python interpreter is checked and upon failure
the function will report what is required.
Example:
>>> import packmanconf
>>> packmanconf.init()
>>> import packmanapi
>>> packmanapi.set_verbosity_level(packmanapi.VERBOSITY_HIGH)
"""
major = sys.version_info[0]
minor = sys.version_info[1]
if major != 3 or minor != 7:
raise RuntimeError(
f"This version of packman requires Python 3.7.x, but {major}.{minor} was provided"
)
conf_dir = os.path.dirname(os.path.abspath(__file__))
os.environ["PM_INSTALL_PATH"] = conf_dir
packages_root = get_packages_root(conf_dir)
version = get_version(conf_dir)
module_dir = get_module_dir(conf_dir, packages_root, version)
sys.path.insert(1, module_dir)
def get_packages_root(conf_dir: str) -> str:
root = os.getenv("PM_PACKAGES_ROOT")
if not root:
platform_name = platform.system()
if platform_name == "Windows":
drive, _ = os.path.splitdrive(conf_dir)
root = os.path.join(drive, "packman-repo")
elif platform_name == "Darwin":
# macOS
root = "/Library/Caches/packman"
elif platform_name == "Linux":
root = "/var/tmp/packman"
else:
raise RuntimeError(f"Unsupported platform '{platform_name}'")
# make sure the path exists:
os.makedirs(root, exist_ok=True)
return root
def get_module_dir(conf_dir, packages_root: str, version: str) -> str:
module_dir = os.path.join(packages_root, "packman-common", version)
if not os.path.exists(module_dir):
import tempfile
tf = tempfile.NamedTemporaryFile(delete=False)
target_name = tf.name
tf.close()
url = f"http://bootstrap.packman.nvidia.com/packman-common@{version}.zip"
print(f"Downloading '{url}' ...")
import urllib.request
urllib.request.urlretrieve(url, target_name)
from importlib.machinery import SourceFileLoader
# import module from path provided
script_path = os.path.join(conf_dir, "bootstrap", "install_package.py")
ip = SourceFileLoader("install_package", script_path).load_module()
print("Unpacking ...")
ip.install_package(target_name, module_dir)
os.unlink(tf.name)
return module_dir
def get_version(conf_dir: str):
path = os.path.join(conf_dir, "packman")
if not os.path.exists(path): # in dev repo fallback
path += ".sh"
with open(path, "rt", encoding="utf8") as launch_file:
for line in launch_file.readlines():
if line.startswith("PM_PACKMAN_VERSION"):
_, value = line.split("=")
return value.strip()
raise RuntimeError(f"Unable to find 'PM_PACKMAN_VERSION' in '{path}'")
| cccl-main | cub/docs/tools/packman/packmanconf.py |
# Copyright 2019 NVIDIA CORPORATION
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import zipfile
import tempfile
import sys
import os
import stat
import time
from typing import Any, Callable
RENAME_RETRY_COUNT = 100
RENAME_RETRY_DELAY = 0.1
logging.basicConfig(level=logging.WARNING, format="%(message)s")
logger = logging.getLogger("install_package")
def remove_directory_item(path):
if os.path.islink(path) or os.path.isfile(path):
try:
os.remove(path)
except PermissionError:
# make sure we have access and try again:
os.chmod(path, stat.S_IRWXU)
os.remove(path)
else:
# try first to delete the dir because this will work for folder junctions, otherwise we would follow the junctions and cause destruction!
clean_out_folder = False
try:
# make sure we have access preemptively - this is necessary because recursing into a directory without permissions
# will only lead to heart ache
os.chmod(path, stat.S_IRWXU)
os.rmdir(path)
except OSError:
clean_out_folder = True
if clean_out_folder:
# we should make sure the directory is empty
names = os.listdir(path)
for name in names:
fullname = os.path.join(path, name)
remove_directory_item(fullname)
# now try to again get rid of the folder - and not catch if it raises:
os.rmdir(path)
class StagingDirectory:
def __init__(self, staging_path):
self.staging_path = staging_path
self.temp_folder_path = None
os.makedirs(staging_path, exist_ok=True)
def __enter__(self):
self.temp_folder_path = tempfile.mkdtemp(prefix="ver-", dir=self.staging_path)
return self
def get_temp_folder_path(self):
return self.temp_folder_path
# this function renames the temp staging folder to folder_name, it is required that the parent path exists!
def promote_and_rename(self, folder_name):
abs_dst_folder_name = os.path.join(self.staging_path, folder_name)
os.rename(self.temp_folder_path, abs_dst_folder_name)
def __exit__(self, type, value, traceback):
# Remove temp staging folder if it's still there (something went wrong):
path = self.temp_folder_path
if os.path.isdir(path):
remove_directory_item(path)
def rename_folder(staging_dir: StagingDirectory, folder_name: str):
try:
staging_dir.promote_and_rename(folder_name)
except OSError as exc:
# if we failed to rename because the folder now exists we can assume that another packman process
# has managed to update the package before us - in all other cases we re-raise the exception
abs_dst_folder_name = os.path.join(staging_dir.staging_path, folder_name)
if os.path.exists(abs_dst_folder_name):
logger.warning(
f"Directory {abs_dst_folder_name} already present, package installation already completed"
)
else:
raise
def call_with_retry(
op_name: str, func: Callable, retry_count: int = 3, retry_delay: float = 20
) -> Any:
retries_left = retry_count
while True:
try:
return func()
except (OSError, IOError) as exc:
logger.warning(f"Failure while executing {op_name} [{str(exc)}]")
if retries_left:
retry_str = "retry" if retries_left == 1 else "retries"
logger.warning(
f"Retrying after {retry_delay} seconds"
f" ({retries_left} {retry_str} left) ..."
)
time.sleep(retry_delay)
else:
logger.error("Maximum retries exceeded, giving up")
raise
retries_left -= 1
def rename_folder_with_retry(staging_dir: StagingDirectory, folder_name):
dst_path = os.path.join(staging_dir.staging_path, folder_name)
call_with_retry(
f"rename {staging_dir.get_temp_folder_path()} -> {dst_path}",
lambda: rename_folder(staging_dir, folder_name),
RENAME_RETRY_COUNT,
RENAME_RETRY_DELAY,
)
def install_package(package_path, install_path):
staging_path, version = os.path.split(install_path)
with StagingDirectory(staging_path) as staging_dir:
output_folder = staging_dir.get_temp_folder_path()
with zipfile.ZipFile(package_path, allowZip64=True) as zip_file:
zip_file.extractall(output_folder)
# attempt the rename operation
rename_folder_with_retry(staging_dir, version)
print(f"Package successfully installed to {install_path}")
if __name__ == "__main__":
install_package(sys.argv[1], sys.argv[2])
| cccl-main | cub/docs/tools/packman/bootstrap/install_package.py |
import os
import sys
import io
import contextlib
import packmanapi
REPO_ROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../..")
REPO_DEPS_FILE = os.path.join(REPO_ROOT, "deps/repo-deps.packman.xml")
def bootstrap():
"""
Bootstrap all omni.repo modules.
Pull with packman from repo.packman.xml and add them all to python sys.path to enable importing.
"""
#with contextlib.redirect_stdout(io.StringIO()):
deps = packmanapi.pull(REPO_DEPS_FILE)
for dep_path in deps.values():
if dep_path not in sys.path:
sys.path.append(dep_path)
if __name__ == "__main__":
bootstrap()
import omni.repo.man
omni.repo.man.main(REPO_ROOT)
| cccl-main | cub/docs/tools/repoman/repoman.py |
#!/usr/bin/env python
import hpccm
hpccm.config.set_container_format('docker')
Stage0 += hpccm.primitives.baseimage(image='nvidia/cuda:12.2.0-devel-ubuntu22.04')
Stage0 += hpccm.building_blocks.apt_get(ospackages=['git', 'tmux', 'gcc', 'g++', 'vim', 'python3', 'python-is-python3', 'ninja-build'])
# Stage0 += hpccm.building_blocks.llvm(version='15', extra_tools=True, toolset=True)
Stage0 += hpccm.building_blocks.cmake(eula=True, version='3.26.3')
# Stage0 += hpccm.building_blocks.nsight_compute(eula=True, version='2023.1.1')
Stage0 += hpccm.building_blocks.pip(packages=['fpzip', 'numpy', 'pandas', 'pynvml'], pip='pip3')
Stage0 += hpccm.primitives.environment(variables={'CUDA_MODULE_LOADING': 'EAGER'})
| cccl-main | cub/benchmarks/docker/recipe.py |
#!/usr/bin/env python3
import cub.bench as bench
# TODO:
# - driver version
# - host compiler + version
# - gpu clocks / pm
# - ecc
def main():
center_estimator = bench.MedianCenterEstimator()
bench.search(bench.BruteForceSeeker(center_estimator, center_estimator))
if __name__ == "__main__":
main()
| cccl-main | cub/benchmarks/scripts/search.py |
#!/usr/bin/env python3
import sys
import argparse
import cub.bench
def parse_arguments():
parser = argparse.ArgumentParser(description='Verify tuning variant')
parser.add_argument('--variant', type=str, help='Variant to verify', default=None, required=True)
variant = parser.parse_known_args()[0].variant
sys.argv.remove('--variant={}'.format(variant))
return variant
def workload_header(ct_workload_space, rt_workload_space):
for ct_workload in ct_workload_space:
for rt_workload in rt_workload_space:
workload_point = ct_workload + rt_workload
return ", ".join([x.split('=')[0] for x in workload_point])
def workload_entry(ct_workload, rt_workload):
workload_point = ct_workload + rt_workload
return ", ".join([x.split('=')[1] for x in workload_point])
class VerifySeeker:
def __init__(self, variant_label):
self.label = variant_label
self.estimator = cub.bench.MedianCenterEstimator()
def __call__(self, algname, ct_workload_space, rt_workload_space):
variant_point = cub.bench.Config().label_to_variant_point(algname, self.label)
print("{}, MinS, MedianS, MaxS".format(workload_header(ct_workload_space, rt_workload_space)))
for ct_workload in ct_workload_space:
bench = cub.bench.Bench(algname, variant_point, list(ct_workload))
if bench.build():
base = bench.get_base()
for rt_workload in rt_workload_space:
workload_point = ct_workload + rt_workload
base_samples, base_elapsed = base.do_run(workload_point, None)
variant_samples, _ = bench.do_run(workload_point, base_elapsed * 10)
min_speedup = min(base_samples) / min(variant_samples)
median_speedup = self.estimator(base_samples) / self.estimator(variant_samples)
max_speedup = max(base_samples) / max(variant_samples)
point_str = workload_entry(ct_workload, rt_workload)
print("{}, {}, {}, {}".format(point_str, min_speedup, median_speedup, max_speedup))
def main():
cub.bench.search(VerifySeeker(parse_arguments()))
if __name__ == "__main__":
main()
| cccl-main | cub/benchmarks/scripts/verify.py |
#!/usr/bin/env python3
import os
import re
import cub
import math
import argparse
import itertools
import functools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import mannwhitneyu
from scipy.stats.mstats import hdquantiles
pd.options.display.max_colwidth = 100
default_colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_cycle = itertools.cycle(default_colors)
color_map = {}
precision = 0.01
sensitivity = 0.5
def get_bench_columns():
return ['variant', 'elapsed', 'center', 'samples', 'bw']
def get_extended_bench_columns():
return get_bench_columns() + ['speedup', 'base_samples']
def compute_speedup(df):
bench_columns = get_bench_columns()
workload_columns = [col for col in df.columns if col not in bench_columns]
base_df = df[df['variant'] == 'base'].drop(columns=['variant']).rename(
columns={'center': 'base_center', 'samples': 'base_samples'})
base_df.drop(columns=['elapsed', 'bw'], inplace=True)
merged_df = df.merge(
base_df, on=[col for col in df.columns if col in workload_columns])
merged_df['speedup'] = merged_df['base_center'] / merged_df['center']
merged_df = merged_df.drop(columns=['base_center'])
return merged_df
def get_ct_axes(df):
ct_axes = []
for col in df.columns:
if '{ct}' in col:
ct_axes.append(col)
return ct_axes
def get_rt_axes(df):
rt_axes = []
excluded_columns = get_ct_axes(df) + get_extended_bench_columns()
for col in df.columns:
if col not in excluded_columns:
rt_axes.append(col)
return rt_axes
def ct_space(df):
ct_axes = get_ct_axes(df)
unique_ct_combinations = []
for _, row in df[ct_axes].drop_duplicates().iterrows():
unique_ct_combinations.append({})
for col in ct_axes:
unique_ct_combinations[-1][col] = row[col]
return unique_ct_combinations
def extract_case(df, ct_point):
tuning_df_loc = None
for ct_axis in ct_point:
if tuning_df_loc is None:
tuning_df_loc = (df[ct_axis] == ct_point[ct_axis])
else:
tuning_df_loc = tuning_df_loc & (df[ct_axis] == ct_point[ct_axis])
tuning_df = df.loc[tuning_df_loc].copy()
for ct_axis in ct_point:
tuning_df.drop(columns=[ct_axis], inplace=True)
return tuning_df
def extract_rt_axes_values(df):
rt_axes = get_rt_axes(df)
rt_axes_values = {}
for rt_axis in rt_axes:
rt_axes_values[rt_axis] = list(df[rt_axis].unique())
return rt_axes_values
def extract_rt_space(df):
rt_axes = get_rt_axes(df)
rt_axes_values = []
for rt_axis in rt_axes:
values = df[rt_axis].unique()
rt_axes_values.append(["{}={}".format(rt_axis, v) for v in values])
return list(itertools.product(*rt_axes_values))
def filter_variants(df, group):
rt_axes = get_rt_axes(df)
unique_combinations = set(
df[rt_axes].drop_duplicates().itertuples(index=False))
group_combinations = set(
group[rt_axes].drop_duplicates().itertuples(index=False))
has_all_combinations = group_combinations == unique_combinations
return has_all_combinations
def extract_complete_variants(df):
return df.groupby('variant').filter(functools.partial(filter_variants, df))
def compute_workload_score(rt_axes_values, rt_axes_ids, weights, row):
rt_workload = []
for rt_axis in rt_axes_values:
rt_workload.append("{}={}".format(rt_axis, row[rt_axis]))
weight = cub.bench.get_workload_weight(rt_workload, rt_axes_values, rt_axes_ids, weights)
return row['speedup'] * weight
def compute_variant_score(rt_axes_values, rt_axes_ids, weight_matrix, group):
workload_score_closure = functools.partial(compute_workload_score, rt_axes_values, rt_axes_ids, weight_matrix)
score_sum = group.apply(workload_score_closure, axis=1).sum()
return score_sum
def extract_scores(dfs):
rt_axes_values = {}
for subbench in dfs:
rt_axes_values[subbench] = extract_rt_axes_values(dfs[subbench])
rt_axes_ids = cub.bench.compute_axes_ids(rt_axes_values)
weights = cub.bench.compute_weight_matrices(rt_axes_values, rt_axes_ids)
score_dfs = []
for subbench in dfs:
score_closure = functools.partial(
compute_variant_score, rt_axes_values[subbench], rt_axes_ids[subbench], weights[subbench])
grouped = dfs[subbench].groupby('variant')
scores = grouped.apply(score_closure).reset_index()
scores.columns = ['variant', 'score']
stat = grouped.agg(mins=('speedup', 'min'),
means=('speedup', 'mean'),
maxs=('speedup', 'max'))
scores = pd.merge(scores, stat, on='variant')
score_dfs.append(scores)
score_df = pd.concat(score_dfs)
result = score_df.groupby('variant').agg(
{'score': 'sum', 'mins': 'min', 'means': 'mean', 'maxs': 'max'}).reset_index()
return result.sort_values(by=['score'], ascending=False)
def distributions_are_different(alpha, row):
ref_samples = row['base_samples']
cmp_samples = row['samples']
# H0: the distributions are not different
# H1: the distribution are different
_, p = mannwhitneyu(ref_samples, cmp_samples)
# Reject H0
return p < alpha
def remove_matching_distributions(alpha, df):
closure = functools.partial(distributions_are_different, alpha)
return df[df.apply(closure, axis=1)]
def get_filenames_map(arr):
if not arr:
return []
prefix = arr[0]
for string in arr:
while not string.startswith(prefix):
prefix = prefix[:-1]
if not prefix:
break
return {string: string[len(prefix):] for string in arr}
def iterate_case_dfs(args, callable):
storages = {}
algnames = set()
filenames_map = get_filenames_map(args.files)
for file in args.files:
storage = cub.bench.StorageBase(file)
algnames.update(storage.algnames())
storages[filenames_map[file]] = storage
pattern = re.compile(args.R)
for algname in algnames:
if not pattern.match(algname):
continue
case_dfs = {}
for subbench in storage.subbenches(algname):
for file in storages:
storage = storages[file]
df = storage.alg_to_df(algname, subbench)
with pd.option_context('mode.use_inf_as_na', True):
df = df.dropna(subset=['center'], how='all')
for _, row in df[['ctk', 'cub']].drop_duplicates().iterrows():
ctk_version = row['ctk']
cub_version = row['cub']
ctk_cub_df = df[(df['ctk'] == ctk_version) &
(df['cub'] == cub_version)]
for gpu in ctk_cub_df['gpu'].unique():
target_df = ctk_cub_df[ctk_cub_df['gpu'] == gpu]
target_df = target_df.drop(columns=['ctk', 'cub', 'gpu'])
target_df = compute_speedup(target_df)
for ct_point in ct_space(target_df):
point_str = ", ".join(["{}={}".format(k, ct_point[k]) for k in ct_point])
case_df = extract_complete_variants(extract_case(target_df, ct_point))
case_df['variant'] = case_df['variant'].astype(str) + " ({})".format(file)
if point_str not in case_dfs:
case_dfs[point_str] = {}
if subbench not in case_dfs[point_str]:
case_dfs[point_str][subbench] = case_df
else:
case_dfs[point_str][subbench] = pd.concat([case_dfs[point_str][subbench], case_df])
for point_str in case_dfs:
callable(algname, point_str, case_dfs[point_str])
def case_top(alpha, N, algname, ct_point_name, case_dfs):
print("{}[{}]:".format(algname, ct_point_name))
if alpha < 1.0:
case_df = remove_matching_distributions(alpha, case_df)
for subbench in case_dfs:
case_dfs[subbench] = extract_complete_variants(case_dfs[subbench])
print(extract_scores(case_dfs).head(N))
def top(args):
iterate_case_dfs(args, functools.partial(case_top, args.alpha, args.top))
def case_coverage(algname, ct_point_name, case_dfs):
num_variants = cub.bench.Config().variant_space_size(algname)
min_coverage = 100.0
for subbench in case_dfs:
num_covered_variants = len(case_dfs[subbench]['variant'].unique())
coverage = (num_covered_variants / num_variants) * 100
min_coverage = min(min_coverage, coverage)
case_str = "{}[{}]".format(algname, ct_point_name)
print("{} coverage: {} / {} ({:.4f}%)".format(
case_str, num_covered_variants, num_variants, min_coverage))
def coverage(args):
iterate_case_dfs(args, case_coverage)
def parallel_coordinates_plot(df, title):
# Parallel coordinates plot adaptation of https://stackoverflow.com/a/69411450
import matplotlib.cm as cm
from matplotlib.path import Path
import matplotlib.patches as patches
# Variables (the first variable must be categoric):
my_vars = df.columns.tolist()
df_plot = df[my_vars]
df_plot = df_plot.dropna()
df_plot = df_plot.reset_index(drop=True)
# Convert to numeric matrix:
ym = []
dics_vars = []
for v, var in enumerate(my_vars):
if df_plot[var].dtype.kind not in ["i", "u", "f"]:
dic_var = dict([(val, c)
for c, val in enumerate(df_plot[var].unique())])
dics_vars += [dic_var]
ym += [[dic_var[i] for i in df_plot[var].tolist()]]
else:
ym += [df_plot[var].tolist()]
ym = np.array(ym).T
# Padding:
ymins = ym.min(axis=0)
ymaxs = ym.max(axis=0)
dys = ymaxs - ymins
ymins -= dys*0.05
ymaxs += dys*0.05
dys = ymaxs - ymins
# Adjust to the main axis:
zs = np.zeros_like(ym)
zs[:, 0] = ym[:, 0]
zs[:, 1:] = (ym[:, 1:] - ymins[1:])/dys[1:]*dys[0] + ymins[0]
# Plot:
fig, host_ax = plt.subplots(figsize=(20, 10), tight_layout=True)
# Make the axes:
axes = [host_ax] + [host_ax.twinx() for i in range(ym.shape[1] - 1)]
dic_count = 0
for i, ax in enumerate(axes):
ax.set_ylim(
bottom=ymins[i],
top=ymaxs[i]
)
ax.spines.top.set_visible(False)
ax.spines.bottom.set_visible(False)
ax.ticklabel_format(style='plain')
if ax != host_ax:
ax.spines.left.set_visible(False)
ax.yaxis.set_ticks_position("right")
ax.spines.right.set_position(("axes", i/(ym.shape[1] - 1)))
if df_plot.iloc[:, i].dtype.kind not in ["i", "u", "f"]:
dic_var_i = dics_vars[dic_count]
ax.set_yticks(range(len(dic_var_i)))
if i == 0:
ax.set_yticklabels([])
else:
ax.set_yticklabels([key_val for key_val in dics_vars[dic_count].keys()])
dic_count += 1
host_ax.set_xlim(left=0, right=ym.shape[1] - 1)
host_ax.set_xticks(range(ym.shape[1]))
host_ax.set_xticklabels(my_vars, fontsize=14)
host_ax.tick_params(axis="x", which="major", pad=7)
# Color map:
colormap = cm.get_cmap('turbo')
# Normalize speedups:
df["speedup_normalized"] = (
df["speedup"] - df["speedup"].min()) / (df["speedup"].max() - df["speedup"].min())
# Make the curves:
host_ax.spines.right.set_visible(False)
host_ax.xaxis.tick_top()
for j in range(ym.shape[0]):
verts = list(zip([x for x in np.linspace(0, len(ym) - 1, len(ym)*3 - 2,
endpoint=True)],
np.repeat(zs[j, :], 3)[1: -1]))
codes = [Path.MOVETO] + [Path.CURVE4 for _ in range(len(verts) - 1)]
path = Path(verts, codes)
color_first_cat_var = colormap(df.loc[j, "speedup_normalized"])
patch = patches.PathPatch(
path, facecolor="none", lw=2, alpha=0.05, edgecolor=color_first_cat_var)
host_ax.add_patch(patch)
host_ax.set_title(title)
plt.show()
def case_coverage_plot(algname, ct_point_name, case_dfs):
data_list = []
for subbench in case_dfs:
for _, row_description in case_dfs[subbench].iterrows():
variant = row_description['variant']
speedup = row_description['speedup']
if variant.startswith('base'):
continue
varname, _ = variant.split(' ')
params = varname.split('.')
data_dict = {'variant': variant}
for param in params:
print(variant)
name, val = param.split('_')
data_dict[name] = int(val)
data_dict['speedup'] = speedup
# data_dict['variant'] = variant
data_list.append(data_dict)
df = pd.DataFrame(data_list)
parallel_coordinates_plot(df, "{} ({})".format(algname, ct_point_name))
def coverage_plot(args):
iterate_case_dfs(args, case_coverage_plot)
def case_pair_plot(algname, ct_point_name, case_dfs):
import seaborn as sns
data_list = []
for subbench in case_dfs:
for _, row_description in case_dfs[subbench].iterrows():
variant = row_description['variant']
speedup = row_description['speedup']
if variant.startswith('base'):
continue
varname, _ = variant.split(' ')
params = varname.split('.')
data_dict = {}
for param in params:
print(variant)
name, val = param.split('_')
data_dict[name] = int(val)
data_dict['speedup'] = speedup
data_list.append(data_dict)
df = pd.DataFrame(data_list)
sns.pairplot(df, hue='speedup')
plt.title("{} ({})".format(algname, ct_point_name))
plt.show()
def pair_plot(args):
iterate_case_dfs(args, case_pair_plot)
def qrde_hd(samples):
"""
Computes quantile-respectful density estimation based on the Harrell-Davis
quantile estimator. The implementation is based on the following post:
https://aakinshin.net/posts/qrde-hd by Andrey Akinshin
"""
min_sample, max_sample = min(samples), max(samples)
num_quantiles = math.ceil(1.0 / precision)
quantiles = np.linspace(precision, 1 - precision, num_quantiles - 1)
hd_quantiles = [min_sample] + list(hdquantiles(samples, quantiles)) + [max_sample]
width = [hd_quantiles[idx + 1] - hd_quantiles[idx] for idx in range(num_quantiles)]
p = 1.0 / precision
height = [1.0 / (p * w) for w in width]
return width, height
def extract_peaks(pdf):
peaks = []
for i in range(1, len(pdf) - 1):
if pdf[i - 1] < pdf[i] > pdf[i + 1]:
peaks.append(i)
return peaks
def extract_modes(samples):
"""
Extract modes from the given samples based on the lowland algorithm:
https://aakinshin.net/posts/lowland-multimodality-detection/ by Andrey Akinshin
Implementation is based on the https://github.com/AndreyAkinshin/perfolizer
LowlandModalityDetector class.
"""
mode_ids = []
widths, heights = qrde_hd(samples)
peak_ids = extract_peaks(heights)
bin_area = 1.0 / len(heights)
x = min(samples)
peak_xs = []
peak_ys = []
bin_lower = [x]
for idx in range(len(heights)):
if idx in peak_ids:
peak_ys.append(heights[idx])
peak_xs.append(x + widths[idx] / 2)
x += widths[idx]
bin_lower.append(x)
def lowland_between(mode_candidate, left_peak, right_peak):
left, right = left_peak, right_peak
min_height = min(heights[left_peak], heights[right_peak])
while left < right and heights[left] > min_height:
left += 1
while left < right and heights[right] > min_height:
right -= 1
width = bin_lower[right + 1] - bin_lower[left]
total_area = width * min_height
total_bin_area = (right - left + 1) * bin_area
if total_bin_area / total_area < sensitivity:
mode_ids.append(mode_candidate)
return True
return False
previousPeaks = [peak_ids[0]]
for i in range(1, len(peak_ids)):
currentPeak = peak_ids[i]
while previousPeaks and heights[previousPeaks[-1]] < heights[currentPeak]:
if lowland_between(previousPeaks[0], previousPeaks[-1], currentPeak):
previousPeaks = []
else:
previousPeaks.pop()
if previousPeaks and heights[previousPeaks[-1]] > heights[currentPeak]:
if lowland_between(previousPeaks[0], previousPeaks[-1], currentPeak):
previousPeaks = []
previousPeaks.append(currentPeak)
mode_ids.append(previousPeaks[0])
return mode_ids
def hd_displot(samples, label, ax):
if label not in color_map:
color_map[label] = next(color_cycle)
color = color_map[label]
widths, heights = qrde_hd(samples)
mode_ids = extract_modes(samples)
min_sample, max_sample = min(samples), max(samples)
xs = [min_sample]
ys = [0]
peak_xs = []
peak_ys = []
x = min(samples)
for idx in range(len(widths)):
xs.append(x + widths[idx] / 2)
ys.append(heights[idx])
if idx in mode_ids:
peak_ys.append(heights[idx])
peak_xs.append(x + widths[idx] / 2)
x += widths[idx]
xs = xs + [max_sample]
ys = ys + [0]
ax.fill_between(xs, ys, 0, alpha=0.4, color=color)
quartiles_of_interest = [0.25, 0.5, 0.75]
for quartile in quartiles_of_interest:
bin = int(quartile / precision) + 1
ax.plot([xs[bin], xs[bin]], [0, ys[bin]], color=color)
ax.plot(xs, ys, label=label, color=color)
ax.plot(peak_xs, peak_ys, 'o', color=color)
ax.legend()
def displot(data, ax):
for variant in data:
hd_displot(data[variant], variant, ax)
def variant_ratio(data, variant, ax):
if variant not in color_map:
color_map[variant] = next(color_cycle)
color = color_map[variant]
variant_samples = data[variant]
base_samples = data['base']
variant_widths, variant_heights = qrde_hd(variant_samples)
base_widths, base_heights = qrde_hd(base_samples)
quantiles = []
ratios = []
base_x = min(base_samples)
variant_x = min(variant_samples)
for i in range(1, len(variant_heights) - 1):
base_x += base_widths[i] / 2
variant_x += variant_widths[i] / 2
quantiles.append(i * precision)
ratios.append(base_x / variant_x)
ax.plot(quantiles, ratios, label=variant, color=color)
ax.axhline(1, color='red', alpha=0.7)
ax.legend()
ax.tick_params(axis='both', direction='in', pad=-22)
def ratio(data, ax):
for variant in data:
if variant != 'base':
variant_ratio(data, variant, ax)
def case_variants(pattern, mode, algname, ct_point_name, case_dfs):
for subbench in case_dfs:
case_df = case_dfs[subbench]
title = "{}[{}]:".format(algname + '/' + subbench, ct_point_name)
df = case_df[case_df['variant'].str.contains(pattern, regex=True)].reset_index(drop=True)
rt_axes = get_rt_axes(df)
rt_axes_values = extract_rt_axes_values(df)
vertical_axis_name = rt_axes[0]
if 'Elements{io}[pow2]' in rt_axes:
vertical_axis_name = 'Elements{io}[pow2]'
horizontal_axes = rt_axes
horizontal_axes.remove(vertical_axis_name)
vertical_axis_values = rt_axes_values[vertical_axis_name]
vertical_axis_ids = {}
for idx, val in enumerate(vertical_axis_values):
vertical_axis_ids[val] = idx
def extract_horizontal_space(df):
values = []
for rt_axis in horizontal_axes:
values.append(["{}={}".format(rt_axis, v) for v in df[rt_axis].unique()])
return list(itertools.product(*values))
if len(horizontal_axes) > 0:
idx = 0
horizontal_axis_ids = {}
for point in extract_horizontal_space(df):
horizontal_axis_ids[" / ".join(point)] = idx
idx = idx + 1
num_rows = len(vertical_axis_ids)
num_cols = max(1, len(extract_horizontal_space(df)))
if num_rows == 0:
return
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, gridspec_kw = {'wspace': 0, 'hspace': 0})
for _, vertical_row_description in df[[vertical_axis_name]].drop_duplicates().iterrows():
vertical_val = vertical_row_description[vertical_axis_name]
vertical_id = vertical_axis_ids[vertical_val]
vertical_name = "{}={}".format(vertical_axis_name, vertical_val)
vertical_df = df[df[vertical_axis_name] == vertical_val]
for _, horizontal_row_description in vertical_df[horizontal_axes].drop_duplicates().iterrows():
horizontal_df = vertical_df
for axis in horizontal_axes:
horizontal_df = horizontal_df[horizontal_df[axis] == horizontal_row_description[axis]]
horizontal_id = 0
if len(horizontal_axes) > 0:
horizontal_point = []
for rt_axis in horizontal_axes:
horizontal_point.append("{}={}".format(rt_axis, horizontal_row_description[rt_axis]))
horizontal_name = " / ".join(horizontal_point)
horizontal_id = horizontal_axis_ids[horizontal_name]
ax=axes[vertical_id, horizontal_id]
else:
ax=axes[vertical_id]
ax.set_ylabel(vertical_name)
data = {}
for _, variant in horizontal_df[['variant']].drop_duplicates().iterrows():
variant_name = variant['variant']
if 'base' not in data:
data['base'] = horizontal_df[horizontal_df['variant'] == variant_name].iloc[0]['base_samples']
data[variant_name] = horizontal_df[horizontal_df['variant'] == variant_name].iloc[0]['samples']
if mode == 'pdf':
# sns.histplot(data=data, ax=ax, kde=True)
displot(data, ax)
else:
ratio(data, ax)
if len(horizontal_axes) > 0:
ax=axes[vertical_id, horizontal_id]
if vertical_id == (num_rows - 1):
ax.set_xlabel(horizontal_name)
if horizontal_id == 0:
ax.set_ylabel(vertical_name)
else:
ax.set_ylabel('')
for ax in axes.flat:
ax.set_xticklabels([])
fig.suptitle(title)
plt.tight_layout()
plt.show()
def variants(args, mode):
pattern = re.compile(args.variants_pdf) if mode == 'pdf' else re.compile(args.variants_ratio)
iterate_case_dfs(args, functools.partial(case_variants, pattern, mode))
def file_exists(value):
if not os.path.isfile(value):
raise argparse.ArgumentTypeError(f"The file '{value}' does not exist.")
return value
def parse_arguments():
parser = argparse.ArgumentParser(description="Analyze benchmark results.")
parser.add_argument(
'-R', type=str, default='.*', help="Regex for benchmarks selection.")
parser.add_argument(
'--list-benches', action=argparse.BooleanOptionalAction, help="Show available benchmarks.")
parser.add_argument(
'--coverage', action=argparse.BooleanOptionalAction, help="Show variant space coverage.")
parser.add_argument(
'--coverage-plot', action=argparse.BooleanOptionalAction, help="Plot variant space coverage.")
parser.add_argument(
'--pair-plot', action=argparse.BooleanOptionalAction, help="Pair plot.")
parser.add_argument(
'--top', default=7, type=int, action='store', nargs='?', help="Show top N variants with highest score.")
parser.add_argument(
'files', type=file_exists, nargs='+', help='At least one file is required.')
parser.add_argument(
'--alpha', default=1.0, type=float)
parser.add_argument(
'--variants-pdf', type=str, help="Show matching variants data.")
parser.add_argument(
'--variants-ratio', type=str, help="Show matching variants data.")
return parser.parse_args()
def main():
args = parse_arguments()
if args.list_benches:
cub.bench.list_benches()
return
if args.coverage:
coverage(args)
return
if args.coverage_plot:
coverage_plot(args)
return
if args.pair_plot:
pair_plot(args)
return
if args.variants_pdf:
variants(args, 'pdf')
return
if args.variants_ratio:
variants(args, 'ratio')
return
top(args)
if __name__ == "__main__":
main()
| cccl-main | cub/benchmarks/scripts/analyze.py |
from . import bench
| cccl-main | cub/benchmarks/scripts/cub/__init__.py |
class Build:
def __init__(self, code, elapsed):
self.code = code
self.elapsed = elapsed
def __repr__(self):
return "Build(code = {}, elapsed = {:.4f}s)".format(self.code, self.elapsed)
| cccl-main | cub/benchmarks/scripts/cub/bench/build.py |
import os
import sys
import random
import itertools
def randomized_cartesian_product(list_of_lists):
length = 1
for l in list_of_lists:
length *= len(l)
visited = set()
while len(visited) < length:
variant = tuple(map(random.choice, list_of_lists))
if variant not in visited:
visited.add(variant)
yield variant
class Range:
def __init__(self, definition, label, low, high, step):
self.definition = definition
self.label = label
self.low = low
self.high = high
self.step = step
class RangePoint:
def __init__(self, definition, label, value):
self.definition = definition
self.label = label
self.value = value
class VariantPoint:
def __init__(self, range_points):
self.range_points = range_points
def label(self):
if self.is_base():
return 'base'
return '.'.join(["{}_{}".format(point.label, point.value) for point in self.range_points])
def is_base(self):
return len(self.range_points) == 0
def tuning(self):
if self.is_base():
return ""
tuning = "#pragma once\n\n"
for point in self.range_points:
tuning += "#define {} {}\n".format(point.definition, point.value)
return tuning
class BasePoint(VariantPoint):
def __init__(self):
VariantPoint.__init__(self, [])
def parse_ranges(columns):
ranges = []
for column in columns:
definition, label_range = column.split('|')
label, range = label_range.split('=')
start, end, step = [int(x) for x in range.split(':')]
ranges.append(Range(definition, label, start, end + 1, step))
return ranges
def parse_meta():
if not os.path.isfile("cub_bench_meta.csv"):
print("cub_bench_meta.csv not found", file=sys.stderr)
print("make sure to run the script from the CUB build directory",
file=sys.stderr)
benchmarks = {}
ctk_version = "0.0.0"
cub_revision = "0.0-0-0000"
with open("cub_bench_meta.csv", "r") as f:
lines = f.readlines()
for line in lines:
columns = line.split(',')
name = columns[0]
if name == "ctk_version":
ctk_version = columns[1].rstrip()
elif name == "cub_revision":
cub_revision = columns[1].rstrip()
else:
benchmarks[name] = parse_ranges(columns[1:])
return ctk_version, cub_revision, benchmarks
class Config:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.ctk, cls._instance.cub, cls._instance.benchmarks = parse_meta()
return cls._instance
def label_to_variant_point(self, algname, label):
if label == "base":
return BasePoint()
label_to_definition = {}
for param_space in self.benchmarks[algname]:
label_to_definition[param_space.label] = param_space.definition
points = []
for point in label.split('.'):
label, value = point.split('_')
points.append(RangePoint(label_to_definition[label], label, int(value)))
return VariantPoint(points)
def variant_space(self, algname):
variants = []
for param_space in self.benchmarks[algname]:
variants.append([])
for value in range(param_space.low, param_space.high, param_space.step):
variants[-1].append(RangePoint(param_space.definition, param_space.label, value))
return (VariantPoint(points) for points in randomized_cartesian_product(variants))
def variant_space_size(self, algname):
num_variants = 1
for param_space in self.benchmarks[algname]:
num_variants = num_variants * len(range(param_space.low, param_space.high, param_space.step))
return num_variants
| cccl-main | cub/benchmarks/scripts/cub/bench/config.py |
import os
import time
import signal
import subprocess
from .build import Build
from .config import Config
from .storage import Storage
from .logger import *
def create_builds_table(conn):
with conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS builds (
ctk TEXT NOT NULL,
cub TEXT NOT NULL,
bench TEXT NOT NULL,
code TEXT NOT NULL,
elapsed REAL
);
""")
class CMakeCache:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
create_builds_table(Storage().connection())
return cls._instance
def pull_build(self, bench):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
query = "SELECT code, elapsed FROM builds WHERE ctk = ? AND cub = ? AND bench = ?;"
result = conn.execute(query, (ctk, cub, bench.label())).fetchone()
if result:
code, elapsed = result
return Build(int(code), float(elapsed))
return result
def push_build(self, bench, build):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
conn.execute("INSERT INTO builds (ctk, cub, bench, code, elapsed) VALUES (?, ?, ?, ?, ?);",
(ctk, cub, bench.label(), build.code, build.elapsed))
class CMake:
def __init__(self):
pass
def do_build(self, bench, timeout):
logger = Logger()
try:
if not bench.is_base():
with open(bench.exe_name() + ".h", "w") as f:
f.writelines(bench.definitions())
cmd = ["cmake", "--build", ".", "--target", bench.exe_name()]
logger.info("starting build for {}: {}".format(bench.label(), " ".join(cmd)))
begin = time.time()
p = subprocess.Popen(cmd,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.wait(timeout=timeout)
elapsed = time.time() - begin
logger.info("finished build for {} ({}) in {}s".format(bench.label(), p.returncode, elapsed))
return Build(p.returncode, elapsed)
except subprocess.TimeoutExpired:
logger.info("build for {} reached timeout of {}s".format(bench.label(), timeout))
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
return Build(424242, float('inf'))
def build(self, bench):
logger = Logger()
timeout = None
cache = CMakeCache()
if bench.is_base():
# Only base build can be pulled from cache
build = cache.pull_build(bench)
if build:
logger.info("found cached base build for {}".format(bench.label()))
if bench.is_base():
if not os.path.exists("bin/{}".format(bench.exe_name())):
self.do_build(bench, None)
return build
else:
base_build = self.build(bench.get_base())
if base_build.code != 0:
raise Exception("Base build failed")
timeout = base_build.elapsed * 10
build = self.do_build(bench, timeout)
cache.push_build(bench, build)
return build
def clean():
cmd = ["cmake", "--build", ".", "--target", "clean"]
p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.wait()
if p.returncode != 0:
raise Exception("Unable to clean build directory")
| cccl-main | cub/benchmarks/scripts/cub/bench/cmake.py |
from .config import *
from .storage import *
from .bench import Bench
from .cmake import CMake
from .score import *
from .search import *
| cccl-main | cub/benchmarks/scripts/cub/bench/__init__.py |
import logging
class Logger:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler('cub_bench_meta.log')
file_handler.setFormatter(logging.Formatter('%(asctime)s: %(message)s'))
logger.addHandler(file_handler)
cls._instance.logger = logger
return cls._instance
def info(self, message):
self.logger.info(message)
| cccl-main | cub/benchmarks/scripts/cub/bench/logger.py |
import os
import json
import time
import fpzip
import signal
import itertools
import subprocess
import numpy as np
from .cmake import CMake
from .config import *
from .storage import Storage, get_bench_table_name
from .score import *
from .logger import *
def first_val(my_dict):
values = list(my_dict.values())
first_value = values[0]
if not all(value == first_value for value in values):
raise ValueError('All values in the dictionary are not equal')
return first_value
class JsonCache:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.bench_cache = {}
cls._instance.device_cache = {}
return cls._instance
def get_bench(self, algname):
if algname not in self.bench_cache:
result = subprocess.check_output(
[os.path.join('.', 'bin', algname + '.base'), "--jsonlist-benches"])
self.bench_cache[algname] = json.loads(result)
return self.bench_cache[algname]
def get_device(self, algname):
if algname not in self.device_cache:
result = subprocess.check_output(
[os.path.join('.', 'bin', algname + '.base'), "--jsonlist-devices"])
devices = json.loads(result)["devices"]
if len(devices) != 1:
raise Exception(
"NVBench doesn't work well with multiple GPUs, use `CUDA_VISIBLE_DEVICES`")
self.device_cache[algname] = devices[0]
return self.device_cache[algname]
def json_benches(algname):
return JsonCache().get_bench(algname)
def create_benches_tables(conn, subbench, bench_axes):
with conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS subbenches (
algorithm TEXT NOT NULL,
bench TEXT NOT NULL,
UNIQUE(algorithm, bench)
);
""")
for algorithm_name in bench_axes:
axes = bench_axes[algorithm_name]
column_names = ", ".join(["\"{}\"".format(name) for name in axes])
columns = ", ".join(["\"{}\" TEXT".format(name) for name in axes])
conn.execute("""
INSERT INTO subbenches (algorithm, bench)
VALUES (?, ?)
ON CONFLICT DO NOTHING;
""", (algorithm_name, subbench))
if axes:
columns = ", " + columns
column_names = ", " + column_names
conn.execute("""
CREATE TABLE IF NOT EXISTS "{0}" (
ctk TEXT NOT NULL,
cub TEXT NOT NULL,
gpu TEXT NOT NULL,
variant TEXT NOT NULL,
elapsed REAL,
center REAL,
bw REAL,
samples BLOB
{1}
, UNIQUE(ctk, cub, gpu, variant {2})
);
""".format(get_bench_table_name(subbench, algorithm_name), columns, column_names))
def read_json(filename):
with open(filename, "r") as f:
file_root = json.load(f)
return file_root
def extract_filename(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "filename", summary_data))
assert (value_data["type"] == "string")
return value_data["value"]
def extract_size(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "size", summary_data))
assert (value_data["type"] == "int64")
return int(value_data["value"])
def extract_bw(summary):
summary_data = summary["data"]
value_data = next(filter(lambda v: v["name"] == "value", summary_data))
assert (value_data["type"] == "float64")
return float(value_data["value"])
def parse_samples_meta(state):
summaries = state["summaries"]
if not summaries:
return None, None
summary = next(filter(lambda s: s["tag"] == "nv/json/bin:nv/cold/sample_times",
summaries),
None)
if not summary:
return None, None
sample_filename = extract_filename(summary)
sample_count = extract_size(summary)
return sample_count, sample_filename
def parse_samples(state):
sample_count, samples_filename = parse_samples_meta(state)
if not sample_count or not samples_filename:
return np.array([], dtype=np.float32)
with open(samples_filename, "rb") as f:
samples = np.fromfile(f, "<f4")
samples.sort()
assert (sample_count == len(samples))
return samples
def parse_bw(state):
bwutil = next(filter(lambda s: s["tag"] == "nv/cold/bw/global/utilization",
state['summaries']), None)
if not bwutil:
return None
return extract_bw(bwutil)
class SubBenchState:
def __init__(self, state, axes_names, axes_values):
self.samples = parse_samples(state)
self.bw = parse_bw(state)
self.point = {}
for axis in state["axis_values"]:
name = axes_names[axis['name']]
value = axes_values[axis['name']][axis['value']]
self.point[name] = value
def __repr__(self):
return str(self.__dict__)
def name(self):
return ' '.join(f'{k}={v}' for k, v in self.point.items())
def center(self, estimator):
return estimator(self.samples)
class SubBenchResult:
def __init__(self, bench):
axes_names = {}
axes_values = {}
for axis in bench["axes"]:
short_name = axis["name"]
full_name = get_axis_name(axis)
axes_names[short_name] = full_name
axes_values[short_name] = {}
for value in axis["values"]:
if "value" in value:
axes_values[axis["name"]][str(value["value"])] = value["input_string"]
else:
axes_values[axis["name"]][value["input_string"]] = value["input_string"]
self.states = []
for state in bench["states"]:
self.states.append(SubBenchState(state, axes_names, axes_values))
def __repr__(self):
return str(self.__dict__)
def centers(self, estimator):
result = {}
for state in self.states:
result[state.name()] = state.center(estimator)
return result
class BenchResult:
def __init__(self, json_path, code, elapsed):
self.code = code
self.elapsed = elapsed
if json_path:
self.subbenches = {}
if code == 0:
for bench in read_json(json_path)["benchmarks"]:
self.subbenches[bench["name"]] = SubBenchResult(bench)
def __repr__(self):
return str(self.__dict__)
def centers(self, estimator):
result = {}
for subbench in self.subbenches:
result[subbench] = self.subbenches[subbench].centers(estimator)
return result
def device_json(algname):
return JsonCache().get_device(algname)
def get_device_name(device):
gpu_name = device["name"]
bw = device["global_memory_bus_width"]
sms = device["number_of_sms"]
ecc = "eccon" if device["ecc_state"] else "eccoff"
name = "{} ({}, {}, {})".format(gpu_name, bw, sms, ecc)
return name.replace('NVIDIA ', '')
def is_ct_axis(name):
return '{ct}' in name
def state_to_rt_workload(bench, state):
rt_workload = []
for param in state.split(' '):
name, value = param.split('=')
if is_ct_axis(name):
continue
rt_workload.append("{}={}".format(name, value))
return rt_workload
def create_runs_table(conn):
with conn:
conn.execute("""
CREATE TABLE IF NOT EXISTS runs (
ctk TEXT NOT NULL,
cub TEXT NOT NULL,
bench TEXT NOT NULL,
code TEXT NOT NULL,
elapsed REAL
);
""")
class RunsCache:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
create_runs_table(Storage().connection())
return cls._instance
def pull_run(self, bench):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
query = "SELECT code, elapsed FROM runs WHERE ctk = ? AND cub = ? AND bench = ?;"
result = conn.execute(query, (ctk, cub, bench.label())).fetchone()
if result:
code, elapsed = result
return int(code), float(elapsed)
return result
def push_run(self, bench, code, elapsed):
config = Config()
ctk = config.ctk
cub = config.cub
conn = Storage().connection()
with conn:
conn.execute("INSERT INTO runs (ctk, cub, bench, code, elapsed) VALUES (?, ?, ?, ?, ?);",
(ctk, cub, bench.label(), code, elapsed))
class BenchCache:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.existing_tables = set()
return cls._instance
def create_table_if_not_exists(self, conn, bench):
bench_base = bench.get_base()
alg_name = bench_base.algorithm_name()
if alg_name not in self.existing_tables:
subbench_axes_names = bench_base.axes_names()
for subbench in subbench_axes_names:
create_benches_tables(conn, subbench, {alg_name: subbench_axes_names[subbench]})
self.existing_tables.add(alg_name)
def push_bench_centers(self, bench, result, estimator):
config = Config()
ctk = config.ctk
cub = config.cub
gpu = get_device_name(device_json(bench.algname))
conn = Storage().connection()
self.create_table_if_not_exists(conn, bench)
centers = {}
with conn:
for subbench in result.subbenches:
centers[subbench] = {}
for state in result.subbenches[subbench].states:
table_name = get_bench_table_name(subbench, bench.algorithm_name())
columns = ""
placeholders = ""
values = []
for name in state.point:
value = state.point[name]
columns = columns + ", \"{}\"".format(name)
placeholders = placeholders + ", ?"
values.append(value)
values = tuple(values)
samples = fpzip.compress(state.samples)
center = estimator(state.samples)
to_insert = (ctk, cub, gpu, bench.variant_name(),
result.elapsed, center, state.bw, samples) + values
query = """
INSERT INTO "{0}" (ctk, cub, gpu, variant, elapsed, center, bw, samples {1})
VALUES (?, ?, ?, ?, ?, ?, ?, ? {2})
ON CONFLICT(ctk, cub, gpu, variant {1}) DO NOTHING;
""".format(table_name, columns, placeholders)
conn.execute(query, to_insert)
centers[subbench][state.name()] = center
return centers
def pull_bench_centers(self, bench, ct_workload_point, rt_values):
config = Config()
ctk = config.ctk
cub = config.cub
gpu = get_device_name(device_json(bench.algname))
conn = Storage().connection()
self.create_table_if_not_exists(conn, bench)
centers = {}
with conn:
for subbench in rt_values:
centers[subbench] = {}
table_name = get_bench_table_name(subbench, bench.algorithm_name())
for rt_point in values_to_space(rt_values[subbench]):
point_map = {}
point_checks = ""
workload_point = list(ct_workload_point) + list(rt_point)
for axis in workload_point:
name, value = axis.split('=')
point_map[name] = value
point_checks = point_checks + " AND \"{}\" = \"{}\"".format(name, value)
query = """
SELECT center FROM "{0}" WHERE ctk = ? AND cub = ? AND gpu = ? AND variant = ?{1};
""".format(table_name, point_checks)
result = conn.execute(query, (ctk, cub, gpu, bench.variant_name())).fetchone()
if result is None:
return None
state_name = ' '.join(f'{k}={v}' for k, v in point_map.items())
centers[subbench][state_name] = float(result[0])
return centers
def get_axis_name(axis):
name = axis["name"]
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
return name
def speedup(base, variant):
# If one of the runs failed, dict is empty
if not base or not variant:
return {}
benchmarks = set(base.keys())
if benchmarks != set(variant.keys()):
raise Exception("Benchmarks do not match.")
result = {}
for bench in benchmarks:
base_states = base[bench]
variant_states = variant[bench]
state_names = set(base_states.keys())
if state_names != set(variant_states.keys()):
raise Exception("States do not match.")
result[bench] = {}
for state in state_names:
result[bench][state] = base_states[state] / variant_states[state]
return result
def values_to_space(axes):
result = []
for axis in axes:
result.append(["{}={}".format(axis, value) for value in axes[axis]])
return list(itertools.product(*result))
class ProcessRunner:
_instance = None
def __new__(cls, *args, **kwargs):
if not isinstance(cls._instance, cls):
cls._instance = super(ProcessRunner, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
self.process = None
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
def new_process(self, cmd):
self.process = subprocess.Popen(cmd,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
return self.process
def signal_handler(self, signum, frame):
self.kill_process()
raise SystemExit('search was interrupted')
def kill_process(self):
if self.process is not None:
self.process.kill()
class Bench:
def __init__(self, algorithm_name, variant, ct_workload):
self.algname = algorithm_name
self.variant = variant
self.ct_workload = ct_workload
def label(self):
return self.algname + '.' + self.variant.label()
def variant_name(self):
return self.variant.label()
def algorithm_name(self):
return self.algname
def is_base(self):
return self.variant.is_base()
def get_base(self):
return BaseBench(self.algorithm_name())
def exe_name(self):
if self.is_base():
return self.algorithm_name() + '.base'
return self.algorithm_name() + '.variant'
def bench_names(self):
return [bench['name'] for bench in json_benches(self.algname)["benchmarks"]]
def axes_names(self):
subbench_names = {}
for bench in json_benches(self.algname)["benchmarks"]:
names = []
for axis in bench["axes"]:
names.append(get_axis_name(axis))
subbench_names[bench['name']] = names
return subbench_names
def axes_values(self, sub_space, ct):
subbench_space = {}
for bench in json_benches(self.algname)["benchmarks"]:
space = {}
for axis in bench["axes"]:
name = get_axis_name(axis)
if ct:
if not '{ct}' in name:
continue
else:
if '{ct}' in name:
continue
axis_space = []
if name in sub_space:
for value in sub_space[name]:
axis_space.append(value)
else:
for value in axis["values"]:
axis_space.append(value["input_string"])
space[name] = axis_space
subbench_space[bench['name']] = space
return subbench_space
def ct_axes_value_descriptions(self):
subbench_descriptions = {}
for bench in json_benches(self.algname)["benchmarks"]:
descriptions = {}
for axis in bench["axes"]:
name = axis["name"]
if not '{ct}' in name:
continue
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
descriptions[name] = {}
for value in axis["values"]:
descriptions[name][value["input_string"]] = value["description"]
subbench_descriptions[bench["name"]] = descriptions
return first_val(subbench_descriptions)
def axis_values(self, axis_name):
result = json_benches(self.algname)
if len(result["benchmarks"]) != 1:
raise Exception("Executable should contain exactly one benchmark")
for axis in result["benchmarks"][0]["axes"]:
name = axis["name"]
if axis["flags"]:
name = name + "[{}]".format(axis["flags"])
if name != axis_name:
continue
values = []
for value in axis["values"]:
values.append(value["input_string"])
return values
return []
def build(self):
if not self.is_base():
self.get_base().build()
build = CMake().build(self)
return build.code == 0
def definitions(self):
definitions = self.variant.tuning()
definitions = definitions + "\n"
descriptions = self.ct_axes_value_descriptions()
for ct_component in self.ct_workload:
ct_axis_name, ct_value = ct_component.split('=')
description = descriptions[ct_axis_name][ct_value]
ct_axis_name = ct_axis_name.replace('{ct}', '')
definitions = definitions + "#define TUNE_{} {}\n".format(ct_axis_name, description)
return definitions
def do_run(self, ct_point, rt_values, timeout):
logger = Logger()
try:
result_path = 'result.json'
if os.path.exists(result_path):
os.remove(result_path)
bench_path = os.path.join('.', 'bin', self.exe_name())
cmd = [bench_path]
for value in ct_point:
cmd.append('-a')
cmd.append(value)
cmd.append('--jsonbin')
cmd.append(result_path)
# Allow noise because we rely on min samples
cmd.append("--max-noise")
cmd.append("100")
# Need at least 70 samples
cmd.append("--min-samples")
cmd.append("70")
# NVBench is currently broken for multiple GPUs, use `CUDA_VISIBLE_DEVICES`
cmd.append("-d")
cmd.append("0")
for bench in rt_values:
cmd.append('-b')
cmd.append(bench)
for axis in rt_values[bench]:
cmd.append('-a')
cmd.append("{}=[{}]".format(axis, ",".join(rt_values[bench][axis])))
logger.info("starting benchmark {} with {}: {}".format(self.label(), ct_point, " ".join(cmd)))
begin = time.time()
p = ProcessRunner().new_process(cmd)
p.wait(timeout=timeout)
elapsed = time.time() - begin
logger.info("finished benchmark {} with {} ({}) in {}s".format(self.label(), ct_point, p.returncode, elapsed))
return BenchResult(result_path, p.returncode, elapsed)
except subprocess.TimeoutExpired:
logger.info("benchmark {} with {} reached timeout of {}s".format(self.label(), ct_point, timeout))
os.killpg(os.getpgid(p.pid), signal.SIGTERM)
return BenchResult(None, 42, float('inf'))
def ct_workload_space(self, sub_space):
if not self.build():
raise Exception("Unable to build benchmark: " + self.label())
return values_to_space(first_val(self.axes_values(sub_space, True)))
def rt_axes_values(self, sub_space):
if not self.build():
raise Exception("Unable to build benchmark: " + self.label())
return self.axes_values(sub_space, False)
def run(self, ct_workload_point, rt_values, estimator):
logger = Logger()
bench_cache = BenchCache()
runs_cache = RunsCache()
cached_centers = bench_cache.pull_bench_centers(self, ct_workload_point, rt_values)
if cached_centers:
logger.info("found benchmark {} in cache".format(self.label()))
return cached_centers
timeout = None
if not self.is_base():
code, elapsed = runs_cache.pull_run(self.get_base())
if code != 0:
raise Exception("Base bench return code = " + code)
timeout = elapsed * 50
result = self.do_run(ct_workload_point, rt_values, timeout)
runs_cache.push_run(self, result.code, result.elapsed)
return bench_cache.push_bench_centers(self, result, estimator)
def speedup(self, ct_workload_point, rt_values, base_estimator, variant_estimator):
if self.is_base():
return 1.0
base = self.get_base()
base_center = base.run(ct_workload_point, rt_values, base_estimator)
self_center = self.run(ct_workload_point, rt_values, variant_estimator)
return speedup(base_center, self_center)
def score(self, ct_workload, rt_values, base_estimator, variant_estimator):
if self.is_base():
return 1.0
speedups = self.speedup(ct_workload, rt_values, base_estimator, variant_estimator)
if not speedups:
return float('-inf')
rt_axes_ids = compute_axes_ids(rt_values)
weight_matrices = compute_weight_matrices(rt_values, rt_axes_ids)
score = 0
for bench in speedups:
for state in speedups[bench]:
rt_workload = state_to_rt_workload(bench, state)
weights = weight_matrices[bench]
weight = get_workload_weight(rt_workload, rt_values[bench], rt_axes_ids[bench], weights)
speedup = speedups[bench][state]
score = score + weight * speedup
return score
class BaseBench(Bench):
def __init__(self, algname):
super().__init__(algname, BasePoint(), [])
| cccl-main | cub/benchmarks/scripts/cub/bench/bench.py |
import os
import fpzip
import sqlite3
import numpy as np
import pandas as pd
db_name = "cub_bench_meta.db"
def get_bench_table_name(subbench, algname):
return "{}.{}".format(algname, subbench)
def blob_to_samples(blob):
return np.squeeze(fpzip.decompress(blob))
class StorageBase:
def __init__(self, db_path):
self.conn = sqlite3.connect(db_path)
def connection(self):
return self.conn
def exists(self):
return os.path.exists(db_name)
def algnames(self):
with self.conn:
rows = self.conn.execute('SELECT DISTINCT algorithm FROM subbenches').fetchall()
return [row[0] for row in rows]
def subbenches(self, algname):
with self.conn:
rows = self.conn.execute('SELECT DISTINCT bench FROM subbenches WHERE algorithm=?', (algname,)).fetchall()
return [row[0] for row in rows]
def alg_to_df(self, algname, subbench):
table = get_bench_table_name(subbench, algname)
with self.conn:
df = pd.read_sql_query("SELECT * FROM \"{}\"".format(table), self.conn)
df['samples'] = df['samples'].apply(blob_to_samples)
return df
def store_df(self, algname, df):
df['samples'] = df['samples'].apply(fpzip.compress)
df.to_sql(algname, self.conn, if_exists='replace', index=False)
class Storage:
_instance = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
cls._instance.base = StorageBase(db_name)
return cls._instance
def connection(self):
return self.base.connection()
def exists(self):
return self.base.exists()
def algnames(self):
return self.base.algnames()
def alg_to_df(self, algname, subbench):
return self.base.alg_to_df(algname, subbench)
| cccl-main | cub/benchmarks/scripts/cub/bench/storage.py |
import re
import argparse
import numpy as np
from .bench import Bench, BaseBench
from .config import Config
from .storage import Storage
from .cmake import CMake
def list_benches():
print("### Benchmarks")
config = Config()
for algname in config.benchmarks:
space_size = config.variant_space_size(algname)
print(" * `{}`: {} variants: ".format(algname, space_size))
for param_space in config.benchmarks[algname]:
param_name = param_space.label
param_rng = (param_space.low, param_space.high, param_space.step)
print(" * `{}`: {}".format(param_name, param_rng))
def parse_sub_space(args):
sub_space = {}
for axis in args:
name, value = axis.split('=')
if '[' in value:
value = value.replace('[', '').replace(']', '')
values = value.split(',')
else:
values = [value]
sub_space[name] = values
return sub_space
def parse_arguments():
parser = argparse.ArgumentParser(
description="Runs benchmarks and stores results in a database.")
parser.add_argument('-R', type=str, default='.*',
help="Regex for benchmarks selection.")
parser.add_argument('-a', '--args', action='append',
type=str, help="Parameter in the format `Param=Value`.")
parser.add_argument(
'--list-benches', action=argparse.BooleanOptionalAction, help="Show available benchmarks.")
return parser.parse_args()
def run_benches(benchmarks, sub_space, regex, seeker):
pattern = re.compile(regex)
for algname in benchmarks:
if pattern.match(algname):
bench = BaseBench(algname)
ct_space = bench.ct_workload_space(sub_space)
rt_values = bench.rt_axes_values(sub_space)
seeker(algname, ct_space, rt_values)
def search(seeker):
args = parse_arguments()
if not Storage().exists():
CMake().clean()
config = Config()
print("ctk: ", config.ctk)
print("cub: ", config.cub)
workload_sub_space = {}
if args.args:
workload_sub_space = parse_sub_space(args.args)
if args.list_benches:
list_benches()
return
run_benches(config.benchmarks, workload_sub_space, args.R, seeker)
class MedianCenterEstimator:
def __init__(self):
pass
def __call__(self, samples):
if len(samples) == 0:
return float("inf")
return float(np.median(samples))
class BruteForceSeeker:
def __init__(self, base_center_estimator, variant_center_estimator):
self.base_center_estimator = base_center_estimator
self.variant_center_estimator = variant_center_estimator
def __call__(self, algname, ct_workload_space, rt_values):
variants = Config().variant_space(algname)
for ct_workload in ct_workload_space:
for variant in variants:
bench = Bench(algname, variant, list(ct_workload))
if bench.build():
score = bench.score(ct_workload,
rt_values,
self.base_center_estimator,
self.variant_center_estimator)
print(bench.label(), score)
| cccl-main | cub/benchmarks/scripts/cub/bench/search.py |
import math
import numpy as np
def importance_function(x):
return 1 - math.exp(-x)
def x_by_importance(y):
return -math.log(1 - y)
def compute_weights(num_values):
least_importance = 0.6
most_importance = 0.999
assert(least_importance < most_importance)
assert(least_importance >= 0 and least_importance < 1)
assert(most_importance > 0 and most_importance < 1)
begin = x_by_importance(least_importance)
end = x_by_importance(most_importance)
rng = end - begin
step = rng / num_values
weights = np.array([begin + x * step for x in range(num_values)])
weights = weights / sum(weights)
return weights
def io_weights(values):
return compute_weights(len(values))
def ei_weights(values):
return np.ones(len(values))
def compute_axes_ids(rt_axes_values):
result = {}
for bench in rt_axes_values:
rt_axes_ids = {}
axis_id = 0
for rt_axis in rt_axes_values[bench]:
rt_axes_ids[rt_axis] = axis_id
axis_id = axis_id + 1
result[bench] = rt_axes_ids
return result
def compute_raw_weight_matrix(rt_axes_values, rt_axes_ids):
rt_axes_weights = {}
first_rt_axis = True
first_rt_axis_name = None
for rt_axis in rt_axes_values:
if first_rt_axis:
first_rt_axis_name = rt_axis
first_rt_axis = False
values = rt_axes_values[rt_axis]
rt_axes_values[rt_axis] = values
if '{io}' in rt_axis:
rt_axes_weights[rt_axis] = io_weights(values)
else:
rt_axes_weights[rt_axis] = ei_weights(values)
num_rt_axes = len(rt_axes_ids)
for rt_axis in rt_axes_weights:
shape = [1] * num_rt_axes
shape[rt_axes_ids[rt_axis]] = -1
rt_axes_weights[rt_axis] = rt_axes_weights[rt_axis].reshape(*shape)
weights_matrix = rt_axes_weights[first_rt_axis_name]
for rt_axis in rt_axes_weights:
if rt_axis == first_rt_axis_name:
continue
weights_matrix = weights_matrix * rt_axes_weights[rt_axis]
return weights_matrix
def compute_weight_matrices(rt_axes_values, rt_axes_ids):
matrices = {}
aggregate = 0.0
for bench in rt_axes_values:
matrices[bench] = compute_raw_weight_matrix(rt_axes_values[bench], rt_axes_ids[bench])
aggregate = aggregate + np.sum(matrices[bench])
for bench in rt_axes_values:
matrices[bench] = matrices[bench] / aggregate
return matrices
def get_workload_coordinates(rt_workload, rt_axes_values, rt_axes_ids):
coordinates = [0] * len(rt_axes_ids)
for point in rt_workload:
rt_axis, rt_value = point.split('=')
coordinates[rt_axes_ids[rt_axis]] = rt_axes_values[rt_axis].index(rt_value)
return coordinates
def get_workload_weight(rt_workload, rt_axes_values, rt_axes_ids, weights_matrix):
coordinates = get_workload_coordinates(rt_workload, rt_axes_values, rt_axes_ids)
return weights_matrix[tuple(coordinates)]
| cccl-main | cub/benchmarks/scripts/cub/bench/score.py |
#!/usr/bin/env python
# Generate set of projects mk files.
# Usage: python generate_mk.py PROJECTS_MK_DIR THRUST_SOURCE_DIR
# The program scans through unit tests and examples in THRUST_SOURCE_DIR
# and generates project mk for each of the tests and examples in PROJECTS_MK_DIR
# A single example or unit test source file generates its own executable
# This program is called by a top level Makefile, but can also be used stand-alone for debugging
# This program also generates testing.mk, examples.mk and dependencies.mk
from __future__ import print_function
import sys
import shutil as sh
import os
import glob
import re
test_template = """
TEST_SRC := %(TEST_SRC)s
TEST_NAME := %(TEST_NAME)s
include $(ROOTDIR)/thrust/internal/build/generic_test.mk
"""
example_template = """
EXAMPLE_SRC := %(EXAMPLE_SRC)s
EXAMPLE_NAME := %(EXAMPLE_NAME)s
include $(ROOTDIR)/thrust/internal/build/generic_example.mk
"""
def Glob(pattern, directory,exclude='\B'):
src = glob.glob(os.path.join(directory,pattern))
p = re.compile(exclude)
src = [s for s in src if not p.match(s)]
return src
def generate_test_mk(mk_path, test_path, group, TEST_DIR):
print('Generating makefiles in "'+mk_path+'" for tests in "'+test_path+'"')
src_cu = Glob("*.cu", test_path, ".*testframework.cu$")
src_cxx = Glob("*.cpp", test_path)
src_cu.sort();
src_cxx.sort();
src_all = src_cu + src_cxx;
tests_all = []
dependencies_all = []
for s in src_all:
fn = os.path.splitext(os.path.basename(s));
t = "thrust."+group+"."+fn[0]
e = fn[1]
mkfile = test_template % {"TEST_SRC" : s, "TEST_NAME" : t}
f = open(os.path.join(mk_path,t+".mk"), 'w')
f.write(mkfile)
f.close()
tests_all.append(os.path.join(mk_path,t))
dependencies_all.append(t+": testframework")
return [tests_all, dependencies_all]
def generate_example_mk(mk_path, example_path, group, EXAMPLE_DIR):
print('Generating makefiles in "'+mk_path+'" for examples in "'+example_path+'"')
src_cu = Glob("*.cu", example_path)
src_cxx = Glob("*.cpp", example_path)
src_cu.sort();
src_cxx.sort();
src_all = src_cu + src_cxx;
examples_all = []
for s in src_all:
fn = os.path.splitext(os.path.basename(s));
t = "thrust."+group+"."+fn[0]
e = fn[1]
mkfile = example_template % {"EXAMPLE_SRC" : s, "EXAMPLE_NAME" : t}
f = open(os.path.join(mk_path,t+".mk"), 'w')
f.write(mkfile)
f.close()
examples_all.append(os.path.join(mk_path,t))
return examples_all
## relpath : backported from os.relpath form python 2.6+
def relpath(path, start):
"""Return a relative version of a path"""
import posixpath
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
mk_path=sys.argv[1]
REL_DIR="../../"
if (len(sys.argv) > 2):
root_path=sys.argv[2];
mk_path = relpath(mk_path, root_path)
REL_DIR = relpath(root_path,mk_path)
try:
sh.rmtree(mk_path)
except:
pass
os.makedirs(mk_path)
tests_all, dependencies_all = generate_test_mk(mk_path, "testing/", "test", REL_DIR)
tests_cu, dependencies_cu = generate_test_mk(mk_path, "testing/cuda/", "test.cuda", REL_DIR)
tests_all.extend(tests_cu)
dependencies_all.extend(dependencies_cu)
testing_mk = ""
for t in tests_all:
testing_mk += "PROJECTS += "+t+"\n"
testing_mk += "PROJECTS += internal/build/testframework\n"
f = open(os.path.join(mk_path,"testing.mk"),'w')
f.write(testing_mk)
f.close()
dependencies_mk = ""
for d in dependencies_all:
dependencies_mk += d + "\n"
f = open(os.path.join(mk_path,"dependencies.mk"),'w')
f.write(dependencies_mk)
f.close()
examples_mk = ""
examples_all = generate_example_mk(mk_path, "examples/", "example", REL_DIR)
examples_cuda = generate_example_mk(mk_path, "examples/cuda/", "example.cuda", REL_DIR)
examples_all.extend(examples_cuda)
for e in examples_all:
examples_mk += "PROJECTS += "+e+"\n"
f = open(os.path.join(mk_path,"examples.mk"),'w')
f.write(examples_mk)
f.close()
| cccl-main | thrust/generate_mk.py |
#! /usr/bin/env python
# Copyright (c) 2022 NVIDIA Corporation
# Reply-To: Allison Vacanti <[email protected]>
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Released under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
help_text = """%(prog)s [reference.json compare.json | reference_dir/ compare_dir/]
This script:
1. Runs `top -bco RES`, continuously extracting the memory usage of each process.
2. If a process uses more than `log_threshold` GiB and exceeds any other recorded
entry for the process, it is stored in `entries`.
3. When this script receives SIGINT, it writes two files:
* `log_file` will contain all recorded max-memory-per-process entries
* `fail_file` will contain all entries that exceed `fail_threshold`
"""
import argparse
import os
import re
import signal
import sys
from subprocess import Popen, PIPE, STDOUT
parser = argparse.ArgumentParser(prog='memmon.py', usage=help_text)
parser.add_argument('--log-threshold', type=float, dest='log_threshold',
default=0.5,
help='Logging threshold in GiB.')
parser.add_argument('--fail-threshold', type=float, dest='fail_threshold',
default=2,
help='Failure threshold in GiB.')
parser.add_argument('--log-file', type=str, dest='log_file', default='memmon_log',
help='Output file for log entries.')
args, unused = parser.parse_known_args()
entries = {}
def signal_handler(sig, frame):
# Sort by mem:
sortentries = sorted(entries.items(), key=lambda x: x[1], reverse=True)
lf = open(args.log_file, "w")
for com, mem in sortentries:
status = "PASS"
if mem >= args.fail_threshold:
status = "FAIL"
line = "%4s | %3.1f GiB | %s\n" % (status, mem, com)
lf.write(line)
lf.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Find the toprc config file and configure top's env.
# This config:
# - Hides all columns except for RES and COMMAND
# - Sorts by RES
# - Enables long command strings (-c)
script_dir = os.path.dirname(os.path.realpath(__file__))
config_dir = os.path.join(script_dir, 'memmon_config')
proc = Popen(["top", "-b", "-w", "512"],
stdin=PIPE, stdout=PIPE, stderr=STDOUT,
env={"XDG_CONFIG_HOME": config_dir})
regex = re.compile("^\\s*([0-9.]+[kmgtp]?)\\s+(.+)\\s*$")
# Convert a memory string from top into floating point GiB
def parse_mem(mem_str):
if mem_str[-1] == "k":
return float(mem_str[:-1]) / (1024 * 1024)
elif mem_str[-1] == "m":
return float(mem_str[:-1]) / (1024)
elif mem_str[-1] == "g":
return float(mem_str[:-1])
elif mem_str[-1] == "t":
return float(mem_str[:-1]) * 1024
elif mem_str[-1] == "p": # please no
return float(mem_str[:-1]) * 1024 * 1024
# bytes:
return float(mem_str) / (1024 * 1024 * 1024)
for line in proc.stdout:
line = line.decode()
match = regex.match(line)
if match:
mem = parse_mem(match.group(1))
if mem < args.log_threshold and mem < args.fail_threshold:
continue
com = match.group(2)
if com in entries and entries[com] > mem:
continue
if mem >= args.fail_threshold:
# Print a notice immediately -- this helps identify the failures
# as they happen, since `com` may not provide enough info.
print("memmon.py failure: Build step exceed memory threshold:\n"
" - Threshold: %3.1f GiB\n"
" - Usage: %3.1f GiB\n"
" - Command: %s" % (args.fail_threshold, mem, com))
entries[com] = mem
| cccl-main | thrust/ci/common/memmon.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
###############################################################################
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# XXX Put code shared with `compare_benchmark_results.py` in a common place.
# XXX Relative uncertainty.
from sys import exit, stdout
from os.path import splitext
from itertools import imap # Lazy map.
from math import sqrt, log10, floor
from collections import deque
from argparse import ArgumentParser as argument_parser
from csv import DictReader as csv_dict_reader
from csv import DictWriter as csv_dict_writer
from re import compile as regex_compile
###############################################################################
def unpack_tuple(f):
"""Return a unary function that calls `f` with its argument unpacked."""
return lambda args: f(*iter(args))
def strip_dict(d):
"""Strip leading and trailing whitespace from all keys and values in `d`."""
d.update({key: value.strip() for (key, value) in d.items()})
def merge_dicts(d0, d1):
"""Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
d = d0.copy()
d.update(d1)
return d
def strip_list(l):
"""Strip leading and trailing whitespace from all values in `l`."""
for i, value in enumerate(l): l[i] = value.strip()
###############################################################################
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x)
def try_int_or_float(x):
"""Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
returned unmodified if conversion fails.
"""
try:
return int_or_float(x)
except ValueError:
return x
###############################################################################
def find_significant_digit(x):
"""Return the significant digit of the number x. The result is the number of
digits after the decimal place to round to (negative numbers indicate rounding
before the decimal place)."""
if x == 0: return 0
return -int(floor(log10(abs(x))))
def round_with_int_conversion(x, ndigits = None):
"""Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
than 1, convert the result to `int`. If `ndigits` is `None`, the significant
digit of `x` is used."""
if ndigits is None: ndigits = find_significant_digit(x)
x_rounded = round(x, ndigits)
return int(x_rounded) if ndigits < 1 else x_rounded
###############################################################################
class measured_variable(object):
"""A meta-variable representing measured data. It is composed of three raw
variables plus units meta-data.
Attributes:
quantity (`str`) :
Name of the quantity variable of this object.
uncertainty (`str`) :
Name of the uncertainty variable of this object.
sample_size (`str`) :
Name of the sample size variable of this object.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
class measured_value(object):
"""An object that represents a value determined by multiple measurements.
Attributes:
quantity (scalar) :
The quantity of the value, e.g. the arithmetic mean.
uncertainty (scalar) :
The measurement uncertainty, e.g. the sample standard deviation.
sample_size (`int`) :
The number of observations contributing to the value.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
###############################################################################
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
def sample_variance(X, u = None):
"""Computes the sample variance of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
"""
if u is None: u = arithmetic_mean(X)
return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
def sample_standard_deviation(X, u = None, v = None):
"""Computes the sample standard deviation of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
v (number) : The sample variance of `X`.
"""
if u is None: u = arithmetic_mean(X)
if v is None: v = sample_variance(X, u)
return sqrt(v)
def combine_sample_size(As):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
.. math::
n = \sum{i = 0}^{g - 1} n_i
"""
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
def combine_arithmetic_mean(As, n = None):
"""Computes the combined arithmetic mean of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
.. math::
u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
"""
if n is None: n = combine_sample_size(As)
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
def combine_sample_variance(As, n = None, u = None):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
.. math::
v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
return sum(imap(unpack_tuple(
lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
), As)) / (n - 1)
def combine_sample_standard_deviation(As, n = None, u = None, v = None):
"""Computes the combined sample standard deviation of a group of
`measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
v (number) : The combined sample variance of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
if v is None: v = combine_sample_variance(As, n, u)
return sqrt(v)
###############################################################################
def process_program_arguments():
ap = argument_parser(
description = (
"Aggregates the results of multiple runs of benchmark results stored in "
"CSV format."
)
)
ap.add_argument(
"-d", "--dependent-variable",
help = ("Treat the specified three variables as a dependent variable. The "
"1st variable is the measured quantity, the 2nd is the uncertainty "
"of the measurement and the 3rd is the sample size. The defaults "
"are the dependent variables of Thrust's benchmark suite. May be "
"specified multiple times."),
action = "append", type = str, dest = "dependent_variables",
metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
)
ap.add_argument(
"-p", "--preserve-whitespace",
help = ("Don't trim leading and trailing whitespace from each CSV cell."),
action = "store_true", default = False
)
ap.add_argument(
"-o", "--output-file",
help = ("The file that results are written to. If `-`, results are "
"written to stdout."),
action = "store", type = str, default = "-",
metavar = "OUTPUT"
)
ap.add_argument(
"input_files",
help = ("Input CSV files. The first two rows should be a header. The 1st "
"header row specifies the name of each variable, and the 2nd "
"header row specifies the units for that variable."),
type = str, nargs = "+",
metavar = "INPUTS"
)
return ap.parse_args()
###############################################################################
def filter_comments(f, s = "#"):
"""Return an iterator to the file `f` which filters out all lines beginning
with `s`."""
return filter(lambda line: not line.startswith(s), f)
###############################################################################
class io_manager(object):
"""Manages I/O operations and represents the input data as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`. It can be used with `with`.
Attributes:
preserve_whitespace (`bool`) :
If `False`, leading and trailing whitespace is stripped from each CSV cell.
writer (`csv_dict_writer`) :
CSV writer object that the output is written to.
output_file (`file` or `stdout`) :
The output `file` object.
readers (`list` of `csv_dict_reader`s) :
List of input files as CSV reader objects.
input_files (list of `file`s) :
List of input `file` objects.
variable_names (`list` of `str`s) :
Names of the variables, in order.
variable_units (`list` of `str`s) :
Units of the variables, in order.
"""
def __init__(self, input_files, output_file, preserve_whitespace = True):
"""Read input files and open the output file and construct a new `io_manager`
object.
If `preserve_whitespace` is `False`, leading and trailing whitespace is
stripped from each CSV cell.
Raises
AssertionError :
If `len(input_files) <= 0` or `type(preserve_whitespace) != bool`.
"""
assert len(input_files) > 0, "No input files provided."
assert type(preserve_whitespace) == bool
self.preserve_whitespace = preserve_whitespace
self.readers = deque()
self.variable_names = None
self.variable_units = None
self.input_files = deque()
for input_file in input_files:
input_file_object = open(input_file)
reader = csv_dict_reader(filter_comments(input_file_object))
if not self.preserve_whitespace:
strip_list(reader.fieldnames)
if self.variable_names is None:
self.variable_names = reader.fieldnames
else:
# Make sure all inputs have the same schema.
assert self.variable_names == reader.fieldnames, \
"Input file (`" + input_file + "`) variable schema `" + \
str(reader.fieldnames) + "` does not match the variable schema `" + \
str(self.variable_names) + "`."
# Consume the next row, which should be the second line of the header.
variable_units = reader.next()
if not self.preserve_whitespace:
strip_dict(variable_units)
if self.variable_units is None:
self.variable_units = variable_units
else:
# Make sure all inputs have the same units schema.
assert self.variable_units == variable_units, \
"Input file (`" + input_file + "`) units schema `" + \
str(variable_units) + "` does not match the units schema `" + \
str(self.variable_units) + "`."
self.readers.append(reader)
self.input_files.append(input_file_object)
if output_file == "-": # Output to stdout.
self.output_file = stdout
else: # Output to user-specified file.
self.output_file = open(output_file, "w")
self.writer = csv_dict_writer(
self.output_file, fieldnames = self.variable_names
)
def __enter__(self):
"""Called upon entering a `with` statement."""
return self
def __exit__(self, *args):
"""Called upon exiting a `with` statement."""
if self.output_file is stdout:
self.output_file = None
elif self.output_file is not None:
self.output_file.__exit__(*args)
for input_file in self.input_files:
input_file.__exit__(*args)
#############################################################################
# Input Stream.
def __iter__(self):
"""Return an iterator to the input sequence.
This is a requirement for the `Iterable` protocol.
"""
return self
def next(self):
"""Consume and return the next record (a `dict` representing a CSV row) in
the input.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more input.
"""
if len(self.readers) == 0:
raise StopIteration()
try:
row = self.readers[0].next()
if not self.preserve_whitespace: strip_dict(row)
return row
except StopIteration:
# The current reader is empty, so pop it, pop it's input file, close the
# input file, and then call ourselves again.
self.readers.popleft()
self.input_files.popleft().close()
return self.next()
#############################################################################
# Output.
def write_header(self):
"""Write the header for the output CSV file."""
# Write the first line of the header.
self.writer.writeheader()
# Write the second line of the header.
self.writer.writerow(self.variable_units)
def write(self, d):
"""Write a record (a `dict`) to the output CSV file."""
self.writer.writerow(d)
###############################################################################
class dependent_variable_parser(object):
"""Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
#############################################################################
# Grammar
# Parse a variable_name.
variable_name_rule = r'[^,]+'
# Parse a variable classification.
dependent_variable_rule = r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')'
engine = regex_compile(dependent_variable_rule)
#############################################################################
def __call__(self, s):
"""Parses the string `s` with the form "AVG,STDEV,TRIALS".
Returns:
A `measured_variable`.
Raises:
AssertionError : If parsing fails.
"""
match = self.engine.match(s)
assert match is not None, \
"Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
"`AVG,STDEV,TRIALS`."
return measured_variable(match.group(1), match.group(2), match.group(3))
###############################################################################
class record_aggregator(object):
"""Consumes and combines records and represents the result as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`.
Attributes:
dependent_variables (`list` of `measured_variable`s) :
A list of dependent variables provided on the command line.
dataset (`dict`) :
A mapping of distinguishing (e.g. control + independent) values (`tuple`s
of variable-quantity pairs) to `list`s of dependent values (`dict`s from
variables to lists of cells).
in_order_dataset_keys :
A list of unique dataset keys (e.g. distinguishing variables) in order of
appearance.
"""
parse_dependent_variable = dependent_variable_parser()
def __init__(self, raw_dependent_variables):
"""Parse dependent variables and construct a new `record_aggregator` object.
Raises:
AssertionError : If parsing of dependent variables fails.
"""
self.dependent_variables = []
if raw_dependent_variables is not None:
for variable in raw_dependent_variables:
self.dependent_variables.append(self.parse_dependent_variable(variable))
self.dataset = {}
self.in_order_dataset_keys = deque()
#############################################################################
# Insertion.
def append(self, record):
"""Add `record` to the dataset.
Raises:
ValueError : If any `str`-to-numeric conversions fail.
"""
# The distinguishing variables are the control and independent variables.
# They form the key for each record in the dataset. Records with the same
# distinguishing variables are treated as observations of the same data
# point.
dependent_values = {}
# To allow the same sample size variable to be used for multiple dependent
# variables, we don't pop sample size variables until we're done processing
# all variables.
sample_size_variables = []
# Separate the dependent values from the distinguishing variables and
# perform `str`-to-numeric conversions.
for variable in self.dependent_variables:
quantity, uncertainty, sample_size, units = variable.as_tuple()
dependent_values[quantity] = [int_or_float(record.pop(quantity))]
dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
dependent_values[sample_size] = [int(record[sample_size])]
sample_size_variables.append(sample_size)
# Pop sample size variables.
for sample_size_variable in sample_size_variables:
# Allowed to fail, as we may have duplicates.
record.pop(sample_size_variable, None)
# `dict`s aren't hashable, so create a tuple of key-value pairs.
distinguishing_values = tuple(record.items())
if distinguishing_values in self.dataset:
# These distinguishing values already exist, so get the `dict` they're
# mapped to, look up each key in `dependent_values` in the `dict`, and
# add the corresponding quantity in `dependent_values` to the list in the
# the `dict`.
for variable, columns in dependent_values.iteritems():
self.dataset[distinguishing_values][variable] += columns
else:
# These distinguishing values aren't in the dataset, so add them and
# record them in `in_order_dataset_keys`.
self.dataset[distinguishing_values] = dependent_values
self.in_order_dataset_keys.append(distinguishing_values)
#############################################################################
# Postprocessing.
def combine_dependent_values(self, dependent_values):
"""Takes a mapping of dependent variables to lists of cells and returns
a new mapping with the cells combined.
Raises:
AssertionError : If class invariants were violated.
"""
combined_dependent_values = dependent_values.copy()
for variable in self.dependent_variables:
quantity, uncertainty, sample_size, units = variable.as_tuple()
quantities = dependent_values[quantity]
uncertainties = dependent_values[uncertainty]
sample_sizes = dependent_values[sample_size]
if type(sample_size) is list:
# Sample size hasn't been combined yet.
assert len(quantities) == len(uncertainties) \
and len(uncertainties) == len(sample_sizes), \
"Length of quantities list `(" + str(len(quantities)) + ")`, " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
"),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
")` are not the same."
else:
# Another dependent variable that uses our sample size has combined it
# already.
assert len(quantities) == len(uncertainties), \
"Length of quantities list `(" + str(len(quantities)) + ")` and " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
")` are not the same."
# Convert the three separate `list`s into one list of `measured_value`s.
measured_values = []
for i in range(len(quantities)):
mv = measured_value(
quantities[i], uncertainties[i], sample_sizes[i], units
)
measured_values.append(mv)
# Combine the `measured_value`s.
combined_sample_size = combine_sample_size(
measured_values
)
combined_arithmetic_mean = combine_arithmetic_mean(
measured_values, combined_sample_size
)
combined_sample_standard_deviation = combine_sample_standard_deviation(
measured_values, combined_sample_size, combined_arithmetic_mean
)
# Round the quantity and uncertainty to the significant digit of
# uncertainty and insert the combined values into the results.
sigdig = find_significant_digit(combined_sample_standard_deviation)
# combined_arithmetic_mean = round_with_int_conversion(
# combined_arithmetic_mean, sigdig
# )
# combined_sample_standard_deviation = round_with_int_conversion(
# combined_sample_standard_deviation, sigdig
# )
combined_dependent_values[quantity] = combined_arithmetic_mean
combined_dependent_values[uncertainty] = combined_sample_standard_deviation
combined_dependent_values[sample_size] = combined_sample_size
return combined_dependent_values
#############################################################################
# Output Stream.
def __iter__(self):
"""Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol.
"""
return self
def records(self):
"""Return an iterator to the output sequence of CSV rows (`dict`s of
variables to values).
"""
return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
def next(self):
"""Produce the components of the next output record - a tuple of two
`dict`s. The first `dict` is a mapping of distinguishing variables to
distinguishing values, the second `dict` is a mapping of dependent
variables to combined dependent values. Combining the two dicts forms a
CSV row suitable for output.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more output.
AssertionError : If class invariants were violated.
"""
assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
"Number of dataset keys (`" + str(len(self.dataset.keys())) + \
"`) is not equal to the number of keys in the ordering list (`" + \
str(len(self.in_order_dataset_keys)) + "`)."
if len(self.in_order_dataset_keys) == 0:
raise StopIteration()
# Get the next set of distinguishing values and convert them to a `dict`.
raw_distinguishing_values = self.in_order_dataset_keys.popleft()
distinguishing_values = dict(raw_distinguishing_values)
dependent_values = self.dataset.pop(raw_distinguishing_values)
combined_dependent_values = self.combine_dependent_values(dependent_values)
return (distinguishing_values, combined_dependent_values)
###############################################################################
args = process_program_arguments()
if args.dependent_variables is None:
args.dependent_variables = [
"STL Average Walltime,STL Walltime Uncertainty,STL Trials",
"STL Average Throughput,STL Throughput Uncertainty,STL Trials",
"Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
"Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
]
# Read input files and open the output file.
with io_manager(args.input_files,
args.output_file,
args.preserve_whitespace) as iom:
# Parse dependent variable options.
ra = record_aggregator(args.dependent_variables)
# Add all input data to the `record_aggregator`.
for record in iom:
ra.append(record)
iom.write_header()
# Write combined results out.
for record in ra.records():
iom.write(record)
| cccl-main | thrust/internal/benchmark/combine_benchmark_results.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
###############################################################################
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# XXX Put code shared with `combine_benchmark_results.py` in a common place.
# XXX Relative uncertainty.
# XXX Create uncertain value class which is quantity + uncertainty.
from sys import exit, stdout
from os.path import splitext
from itertools import imap # Lazy map.
from math import sqrt, log10, floor
from collections import deque
from argparse import ArgumentParser as argument_parser
from argparse import Action as argument_action
from csv import DictReader as csv_dict_reader
from csv import DictWriter as csv_dict_writer
from re import compile as regex_compile
###############################################################################
def unpack_tuple(f):
"""Return a unary function that calls `f` with its argument unpacked."""
return lambda args: f(*iter(args))
def strip_dict(d):
"""Strip leading and trailing whitespace from all keys and values in `d`.
Returns:
The modified dict `d`.
"""
d.update({key: value.strip() for (key, value) in d.items()})
return d
def merge_dicts(d0, d1):
"""Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
d = d0.copy()
d.update(d1)
return d
def change_key_in_dict(d, old_key, new_key):
"""Change the key of the entry in `d` with key `old_key` to `new_key`. If
there is an existing entry
Returns:
The modified dict `d`.
Raises:
KeyError : If `old_key` is not in `d`.
"""
d[new_key] = d.pop(old_key)
return d
def key_from_dict(d):
"""Create a hashable key from a `dict` by converting the `dict` to a tuple."""
return tuple(sorted(d.items()))
def strip_list(l):
"""Strip leading and trailing whitespace from all values in `l`."""
for i, value in enumerate(l): l[i] = value.strip()
return l
def remove_from_list(l, item):
"""Remove the first occurence of `item` from list `l` and return a tuple of
the index that was removed and the element that was removed.
Raises:
ValueError : If `item` is not in `l`.
"""
idx = l.index(item)
item = l.pop(idx)
return (idx, item)
###############################################################################
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x)
def try_int_or_float(x):
"""Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
returned unmodified if conversion fails.
"""
try:
return int_or_float(x)
except ValueError:
return x
###############################################################################
def ranges_overlap(x1, x2, y1, y2):
"""Returns true if the ranges `[x1, x2]` and `[y1, y2]` overlap,
where `x1 <= x2` and `y1 <= y2`.
Raises:
AssertionError : If `x1 > x2` or `y1 > y2`.
"""
assert x1 <= x2
assert y1 <= y2
return x1 <= y2 and y1 <= x2
def ranges_overlap_uncertainty(x, x_unc, y, y_unc):
"""Returns true if the ranges `[x - x_unc, x + x_unc]` and
`[y - y_unc, y + y_unc]` overlap, where `x_unc >= 0` and `y_unc >= 0`.
Raises:
AssertionError : If `x_unc < 0` or `y_unc < 0`.
"""
assert x_unc >= 0
assert y_unc >= 0
return ranges_overlap(x - x_unc, x + x_unc, y - y_unc, y + y_unc)
###############################################################################
# Formulas for propagation of uncertainty from:
#
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
#
# Even though it's Wikipedia, I trust it as I helped write that table.
#
# XXX Replace with a proper reference.
def uncertainty_multiplicative(f, A, A_abs_unc, B, B_abs_unc):
"""Compute the propagated uncertainty from the multiplication of two
uncertain values, `A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = AB` or
`f = A/B`, where `A != 0` and `B != 0`, the uncertainty in `f` is
approximately:
.. math::
\sigma_f = |f| \sqrt{\frac{\sigma_A}{A} ^ 2 + \frac{\sigma_B}{B} ^ 2}
Raises:
ZeroDivisionError : If `A == 0` or `B == 0`.
"""
return abs(f) * sqrt((A_abs_unc / A) ** 2 + (B_abs_unc / B) ** 2);
def uncertainty_additive(c, A_abs_unc, d, B_abs_unc):
"""Compute the propagated uncertainty from addition of two uncertain values,
`A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = cA + dB`, where `c` and
`d` are certain constants, the uncertainty in `f` is approximately:
.. math::
f_{\sigma} = \sqrt{c ^ 2 * A_{\sigma} ^ 2 + d ^ 2 * B_{\sigma} ^ 2}
"""
return sqrt(((c ** 2) * (A_abs_unc ** 2)) + ((d ** 2) * (B_abs_unc ** 2)))
###############################################################################
# XXX Create change class.
def absolute_change(old, new):
"""Computes the absolute change from old to new:
.. math::
absolute_change = new - old
"""
return new - old
def absolute_change_uncertainty(old, old_unc, new, new_unc):
"""Computes the uncertainty in the absolute change from old to new and returns
a tuple of the absolute change and the absolute change uncertainty.
"""
absolute_change = new - old
absolute_change_unc = uncertainty_additive(1.0, new_unc, -1.0, old_unc)
return (absolute_change, absolute_change_unc)
def percent_change(old, new):
"""Computes the percent change from old to new:
.. math::
percent_change = 100 \frac{new - old}{abs(old)}
"""
return float(new - old) / abs(old)
def percent_change_uncertainty(old, old_unc, new, new_unc):
"""Computes the uncertainty in the percent change from old to new and returns
a tuple of the absolute change, the absolute change uncertainty, the percent
change and the percent change uncertainty.
"""
# Let's break this down into a few sub-operations:
#
# absolute_change = new - old <- Additive propagation.
# relative_change = change / abs(old) <- Multiplicative propagation.
# percent_change = 100 * y <- Multiplicative propagation.
if old == 0:
# We can't compute relative change because the old value is 0.
return (float("nan"), float("nan"), float("nan"), float("nan"))
(absolute_change, absolute_change_unc) = absolute_change_uncertainty(
old, old_unc, new, new_unc
)
if absolute_change == 0:
# We can't compute relative change uncertainty because the relative
# uncertainty of a value of 0 is undefined.
return (absolute_change, absolute_change_unc, float("nan"), float("nan"))
relative_change = float(absolute_change) / abs(old)
relative_change_unc = uncertainty_multiplicative(
relative_change, absolute_change, absolute_change_unc, old, old_unc
)
percent_change = 100.0 * relative_change
percent_change_unc = uncertainty_multiplicative(
percent_change, 100.0, 0.0, relative_change, relative_change_unc
)
return (
absolute_change, absolute_change_unc, percent_change, percent_change_unc
)
###############################################################################
def find_significant_digit(x):
"""Return the significant digit of the number x. The result is the number of
digits after the decimal place to round to (negative numbers indicate rounding
before the decimal place)."""
if x == 0: return 0
return -int(floor(log10(abs(x))))
def round_with_int_conversion(x, ndigits = None):
"""Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
than 1, convert the result to `int`. If `ndigits` is `None`, the significant
digit of `x` is used."""
if ndigits is None: ndigits = find_significant_digit(x)
x_rounded = round(x, ndigits)
return int(x_rounded) if ndigits < 1 else x_rounded
###############################################################################
class measured_variable(object):
"""A meta-variable representing measured data. It is composed of three raw
variables plus units meta-data.
Attributes:
quantity (`str`) :
Name of the quantity variable of this object.
uncertainty (`str`) :
Name of the uncertainty variable of this object.
sample_size (`str`) :
Name of the sample size variable of this object.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
class measured_value(object):
"""An object that represents a value determined by multiple measurements.
Attributes:
quantity (scalar) :
The quantity of the value, e.g. the arithmetic mean.
uncertainty (scalar) :
The measurement uncertainty, e.g. the sample standard deviation.
sample_size (`int`) :
The number of observations contributing to the value.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
###############################################################################
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
def sample_variance(X, u = None):
"""Computes the sample variance of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
"""
if u is None: u = arithmetic_mean(X)
return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
def sample_standard_deviation(X, u = None, v = None):
"""Computes the sample standard deviation of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
v (number) : The sample variance of `X`.
"""
if u is None: u = arithmetic_mean(X)
if v is None: v = sample_variance(X, u)
return sqrt(v)
def combine_sample_size(As):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
.. math::
n = \sum{i = 0}^{g - 1} n_i
"""
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
def combine_arithmetic_mean(As, n = None):
"""Computes the combined arithmetic mean of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
.. math::
u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
"""
if n is None: n = combine_sample_size(As)
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
def combine_sample_variance(As, n = None, u = None):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
.. math::
v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
return sum(imap(unpack_tuple(
lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
), As)) / (n - 1)
def combine_sample_standard_deviation(As, n = None, u = None, v = None):
"""Computes the combined sample standard deviation of a group of
`measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v &= \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
s &= \sqrt{v}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
v (number) : The combined sample variance of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
if v is None: v = combine_sample_variance(As, n, u)
return sqrt(v)
###############################################################################
def store_const_multiple(const, *destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `const`."""
class store_const_multiple_action(argument_action):
def __init__(self, *args, **kwargs):
super(store_const_multiple_action, self).__init__(
metavar = None, nargs = 0, const = const, *args, **kwargs
)
def __call__(self, parser, namespace, values, option_string = None):
for destination in destinations:
setattr(namespace, destination, const)
return store_const_multiple_action
def store_true_multiple(*destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `True`."""
return store_const_multiple(True, *destinations)
def store_false_multiple(*destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `False`."""
return store_const_multiple(False, *destinations)
###############################################################################
def process_program_arguments():
ap = argument_parser(
description = (
"Compares two sets of combined performance results and identifies "
"statistically significant changes."
)
)
ap.add_argument(
"baseline_input_file",
help = ("CSV file containing the baseline performance results. The first "
"two rows should be a header. The 1st header row specifies the "
"name of each variable, and the 2nd header row specifies the units "
"for that variable. The baseline results may be a superset of the "
"observed performance results, but the reverse is not true. The "
"baseline results must contain data for every datapoint in the "
"observed performance results."),
type = str
)
ap.add_argument(
"observed_input_file",
help = ("CSV file containing the observed performance results. The first "
"two rows should be a header. The 1st header row specifies the name "
"of header row specifies the units for that variable."),
type = str
)
ap.add_argument(
"-o", "--output-file",
help = ("The file that results are written to. If `-`, results are "
"written to stdout."),
action = "store", type = str, default = "-",
metavar = "OUTPUT"
)
ap.add_argument(
"-c", "--control-variable",
help = ("Treat the specified variable as a control variable. This means "
"it will be filtered out when forming dataset keys. For example, "
"this could be used to ignore a timestamp variable that is "
"different in the baseline and observed results. May be specified "
"multiple times."),
action = "append", type = str, dest = "control_variables", default = [],
metavar = "QUANTITY"
)
ap.add_argument(
"-d", "--dependent-variable",
help = ("Treat the specified three variables as a dependent variable. The "
"1st variable is the measured quantity, the 2nd is the uncertainty "
"of the measurement and the 3rd is the sample size. The defaults "
"are the dependent variables of Thrust's benchmark suite. May be "
"specified multiple times."),
action = "append", type = str, dest = "dependent_variables", default = [],
metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
)
ap.add_argument(
"-t", "--change-threshold",
help = ("Treat relative changes less than this amount (a percentage) as "
"statistically insignificant. The default is 5%%."),
action = "store", type = float, default = 5,
metavar = "PERCENTAGE"
)
ap.add_argument(
"-p", "--preserve-whitespace",
help = ("Don't trim leading and trailing whitespace from each CSV cell."),
action = "store_true", default = False
)
ap.add_argument(
"--output-all-variables",
help = ("Don't omit original absolute values in output."),
action = "store_true", default = False
)
ap.add_argument(
"--output-all-datapoints",
help = ("Don't omit datapoints that are statistically indistinguishable "
"in output."),
action = "store_true", default = False
)
ap.add_argument(
"-a", "--output-all",
help = ("Equivalent to `--output-all-variables --output-all-datapoints`."),
action = store_true_multiple("output_all_variables", "output_all_datapoints")
)
return ap.parse_args()
###############################################################################
def filter_comments(f, s = "#"):
"""Return an iterator to the file `f` which filters out all lines beginning
with `s`."""
return filter(lambda line: not line.startswith(s), f)
###############################################################################
class io_manager(object):
"""Manages I/O operations and represents the input data as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`. It can be used with `with`.
Attributes:
preserve_whitespace (`bool`) :
If `False`, leading and trailing whitespace is stripped from each CSV cell.
writer (`csv_dict_writer`) :
CSV writer object that the output is written to.
output_file (`file` or `stdout`) :
The output `file` object.
baseline_reader (`csv_dict_reader`) :
CSV reader object for the baseline results.
observed_reader (`csv_dict_reader`) :
CSV reader object for the observed results.
baseline_input_file (`file`) :
`file` object for the baseline results.
observed_input_file (`file`) :
`file` object for the observed results..
variable_names (`list` of `str`s) :
Names of the variables, in order.
variable_units (`list` of `str`s) :
Units of the variables, in order.
"""
def __init__(self,
baseline_input_file, observed_input_file,
output_file,
preserve_whitespace = False):
"""Read input files and open the output file and construct a new `io_manager`
object.
If `preserve_whitespace` is `False`, leading and trailing whitespace is
stripped from each CSV cell.
Raises
AssertionError :
If `type(preserve_whitespace) != bool`.
"""
assert type(preserve_whitespace) == bool
self.preserve_whitespace = preserve_whitespace
# Open baseline results.
self.baseline_input_file = open(baseline_input_file)
self.baseline_reader = csv_dict_reader(
filter_comments(self.baseline_input_file)
)
if not self.preserve_whitespace:
strip_list(self.baseline_reader.fieldnames)
self.variable_names = list(self.baseline_reader.fieldnames) # Copy.
self.variable_units = self.baseline_reader.next()
if not self.preserve_whitespace:
strip_dict(self.variable_units)
# Open observed results.
self.observed_input_file = open(observed_input_file)
self.observed_reader = csv_dict_reader(
filter_comments(self.observed_input_file)
)
if not self.preserve_whitespace:
strip_list(self.observed_reader.fieldnames)
# Make sure all inputs have the same variables schema.
assert self.variable_names == self.observed_reader.fieldnames, \
"Observed results input file (`" + observed_input_file + "`) " + \
"variable schema `" + str(self.observed_reader.fieldnames) + "` does " + \
"not match the baseline results input file (`" + baseline_input_file + \
"`) variable schema `" + str(self.variable_names) + "`."
# Consume the next row, which should be the second line of the header.
observed_variable_units = self.observed_reader.next()
if not self.preserve_whitespace:
strip_dict(observed_variable_units)
# Make sure all inputs have the same units schema.
assert self.variable_units == observed_variable_units, \
"Observed results input file (`" + observed_input_file + "`) " + \
"units schema `" + str(observed_variable_units) + "` does not " + \
"match the baseline results input file (`" + baseline_input_file + \
"`) units schema `" + str(self.variable_units) + "`."
if output_file == "-": # Output to stdout.
self.output_file = stdout
else: # Output to user-specified file.
self.output_file = open(output_file, "w")
self.writer = csv_dict_writer(
self.output_file, fieldnames = self.variable_names
)
def __enter__(self):
"""Called upon entering a `with` statement."""
return self
def __exit__(self, *args):
"""Called upon exiting a `with` statement."""
if self.output_file is stdout:
self.output_file = None
elif self.output_file is not None:
self.output_file.__exit__(*args)
self.baseline_input_file.__exit__(*args)
self.observed_input_file.__exit__(*args)
def append_variable(self, name, units):
"""Add a new variable to the output schema."""
self.variable_names.append(name)
self.variable_units.update({name : units})
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
def insert_variable(self, idx, name, units):
"""Insert a new variable into the output schema at index `idx`."""
self.variable_names.insert(idx, name)
self.variable_units.update({name : units})
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
def remove_variable(self, name):
"""Remove variable from the output schema and return a tuple of the variable
index and the variable units.
Raises:
ValueError : If `name` is not in the output schema.
"""
# Remove the variable and get its index, which we'll need to remove the
# corresponding units entry.
(idx, item) = remove_from_list(self.variable_names, name)
# Remove the units entry.
units = self.variable_units.pop(item)
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
return (idx, units)
#############################################################################
# Input Stream.
def baseline(self):
"""Return an iterator to the baseline results input sequence."""
return imap(lambda row: strip_dict(row), self.baseline_reader)
def observed(self):
"""Return an iterator to the observed results input sequence."""
return imap(lambda row: strip_dict(row), self.observed_reader)
#############################################################################
# Output.
def write_header(self):
"""Write the header for the output CSV file."""
# Write the first line of the header.
self.writer.writeheader()
# Write the second line of the header.
self.writer.writerow(self.variable_units)
def write(self, d):
"""Write a record (a `dict`) to the output CSV file."""
self.writer.writerow(d)
###############################################################################
class dependent_variable_parser(object):
"""Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
#############################################################################
# Grammar
# Parse a variable_name.
variable_name_rule = r'[^,]+'
# Parse a variable classification.
dependent_variable_rule = r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')'
engine = regex_compile(dependent_variable_rule)
#############################################################################
def __call__(self, s):
"""Parses the string `s` with the form "AVG,STDEV,TRIALS".
Returns:
A `measured_variable`.
Raises:
AssertionError : If parsing fails.
"""
match = self.engine.match(s)
assert match is not None, \
"Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
"`AVG,STDEV,TRIALS`."
return measured_variable(match.group(1), match.group(2), match.group(3))
###############################################################################
class record_aggregator(object):
"""Consumes and combines records and represents the result as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`.
Attributes:
dependent_variables (`list` of `measured_variable`s) :
A list of dependent variables provided on the command line.
control_variables (`list` of `str`s) :
A list of control variables provided on the command line.
dataset (`dict`) :
A mapping of distinguishing (e.g. control + independent) values (`tuple`s
of variable-quantity pairs) to `list`s of dependent values (`dict`s from
variables to lists of cells).
in_order_dataset_keys :
A list of unique dataset keys (e.g. distinguishing variables) in order of
appearance.
"""
def __init__(self, dependent_variables, control_variables):
"""Construct a new `record_aggregator` object.
Raises:
AssertionError : If parsing of dependent variables fails.
"""
self.dependent_variables = dependent_variables
self.control_variables = control_variables
self.dataset = {}
self.in_order_dataset_keys = deque()
#############################################################################
# Insertion.
def key_from_dict(self, d):
"""Create a hashable key from a `dict` by filtering out control variables
and then converting the `dict` to a tuple.
Raises:
AssertionError : If any control variable was not found in `d`.
"""
distinguishing_values = d.copy()
# Filter out control variables.
for var in self.control_variables:
distinguishing_values.pop(var, None)
return key_from_dict(distinguishing_values)
def append(self, record):
"""Add `record` to the dataset.
Raises:
ValueError : If any `str`-to-numeric conversions fail.
"""
# The distinguishing variables are the control and independent variables.
# They form the key for each record in the dataset. Records with the same
# distinguishing variables are treated as observations of the same
# datapoint.
dependent_values = {}
# To allow the same sample size variable to be used for multiple dependent
# variables, we don't pop sample size variables until we're done processing
# all variables.
sample_size_variables = []
# Separate the dependent values from the distinguishing variables and
# perform `str`-to-numeric conversions.
for var in self.dependent_variables:
quantity, uncertainty, sample_size, units = var.as_tuple()
dependent_values[quantity] = [int_or_float(record.pop(quantity))]
dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
dependent_values[sample_size] = [int(record[sample_size])]
sample_size_variables.append(sample_size)
# Pop sample size variables.
for var in sample_size_variables:
# Allowed to fail, as we may have duplicates.
record.pop(var, None)
distinguishing_values = self.key_from_dict(record)
if distinguishing_values in self.dataset:
# These distinguishing values already exist, so get the `dict` they're
# mapped to, look up each key in `dependent_values` in the `dict`, and
# add the corresponding quantity in `dependent_values` to the list in the
# the `dict`.
for var, columns in dependent_values.iteritems():
self.dataset[distinguishing_values][var] += columns
else:
# These distinguishing values aren't in the dataset, so add them and
# record them in `in_order_dataset_keys`.
self.dataset[distinguishing_values] = dependent_values
self.in_order_dataset_keys.append(distinguishing_values)
#############################################################################
# Postprocessing.
def combine_dependent_values(self, dependent_values):
"""Takes a mapping of dependent variables to lists of cells and returns
a new mapping with the cells combined.
Raises:
AssertionError : If class invariants were violated.
"""
combined_dependent_values = dependent_values.copy()
for var in self.dependent_variables:
quantity, uncertainty, sample_size, units = var.as_tuple()
quantities = dependent_values[quantity]
uncertainties = dependent_values[uncertainty]
sample_sizes = dependent_values[sample_size]
if type(sample_size) is list:
# Sample size hasn't been combined yet.
assert len(quantities) == len(uncertainties) \
and len(uncertainties) == len(sample_sizes), \
"Length of quantities list `(" + str(len(quantities)) + ")`, " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
"),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
")` are not the same."
else:
# Another dependent variable that uses our sample size has combined it
# already.
assert len(quantities) == len(uncertainties), \
"Length of quantities list `(" + str(len(quantities)) + ")` and " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
")` are not the same."
# Convert the three separate `list`s into one list of `measured_value`s.
measured_values = []
for i in range(len(quantities)):
mv = measured_value(
quantities[i], uncertainties[i], sample_sizes[i], units
)
measured_values.append(mv)
# Combine the `measured_value`s.
combined_sample_size = combine_sample_size(
measured_values
)
combined_arithmetic_mean = combine_arithmetic_mean(
measured_values, combined_sample_size
)
combined_sample_standard_deviation = combine_sample_standard_deviation(
measured_values, combined_sample_size, combined_arithmetic_mean
)
# Round the quantity and uncertainty to the significant digit of
# uncertainty and insert the combined values into the results.
sigdig = find_significant_digit(combined_sample_standard_deviation)
# combined_arithmetic_mean = round_with_int_conversion(
# combined_arithmetic_mean, sigdig
# )
# combined_sample_standard_deviation = round_with_int_conversion(
# combined_sample_standard_deviation, sigdig
# )
combined_dependent_values[quantity] = combined_arithmetic_mean
combined_dependent_values[uncertainty] = combined_sample_standard_deviation
combined_dependent_values[sample_size] = combined_sample_size
return combined_dependent_values
#############################################################################
# Output Stream.
def __iter__(self):
"""Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol.
"""
return self
def records(self):
"""Return an iterator to the output sequence of CSV rows (`dict`s of
variables to values).
"""
return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
def next(self):
"""Produce the components of the next output record - a tuple of two
`dict`s. The first `dict` is a mapping of distinguishing variables to
distinguishing values, the second `dict` is a mapping of dependent
variables to combined dependent values. Combining the two dicts forms a
CSV row suitable for output.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more output.
AssertionError : If class invariants were violated.
"""
assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
"Number of dataset keys (`" + str(len(self.dataset.keys())) + \
"`) is not equal to the number of keys in the ordering list (`" + \
str(len(self.in_order_dataset_keys)) + "`)."
if len(self.in_order_dataset_keys) == 0:
raise StopIteration()
# Get the next set of distinguishing values and convert them to a `dict`.
raw_distinguishing_values = self.in_order_dataset_keys.popleft()
distinguishing_values = dict(raw_distinguishing_values)
dependent_values = self.dataset.pop(raw_distinguishing_values)
combined_dependent_values = self.combine_dependent_values(dependent_values)
return (distinguishing_values, combined_dependent_values)
def __getitem__(self, distinguishing_values):
"""Produce the dependent component, a `dict` mapping dependent variables to
combined dependent values, associated with `distinguishing_values`.
Args:
distinguishing_values (`dict`) :
A `dict` mapping distinguishing variables to distinguishing values.
Raises:
KeyError : If `distinguishing_values` is not in the dataset.
"""
raw_distinguishing_values = self.key_from_dict(distinguishing_values)
dependent_values = self.dataset[raw_distinguishing_values]
combined_dependent_values = self.combine_dependent_values(dependent_values)
return combined_dependent_values
###############################################################################
args = process_program_arguments()
if len(args.dependent_variables) == 0:
args.dependent_variables = [
"STL Average Walltime,STL Walltime Uncertainty,STL Trials",
"STL Average Throughput,STL Throughput Uncertainty,STL Trials",
"Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
"Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
]
# Parse dependent variable options.
dependent_variables = []
parse_dependent_variable = dependent_variable_parser()
#if args.dependent_variables is not None:
for var in args.dependent_variables:
dependent_variables.append(parse_dependent_variable(var))
# Read input files and open the output file.
with io_manager(args.baseline_input_file,
args.observed_input_file,
args.output_file,
args.preserve_whitespace) as iom:
# Create record aggregators.
baseline_ra = record_aggregator(dependent_variables, args.control_variables)
observed_ra = record_aggregator(dependent_variables, args.control_variables)
# Duplicate dependent variables: one for baseline results, one for observed
# results.
baseline_suffix = " - `{0}`".format(
args.baseline_input_file
)
observed_suffix = " - `{0}`".format(
args.observed_input_file
)
for var in dependent_variables:
# Remove the existing quantity variable:
#
# [ ..., a, b, c, ... ]
# ^- remove b at index i
#
(quantity_idx, quantity_units) = iom.remove_variable(var.quantity)
# If the `--output-all-variables` option was specified, add the new baseline
# and observed quantity variables. Note that we insert in the reverse of
# the order we desire (which is baseline then observed):
#
# [ ..., a, b_1, c, ... ]
# ^- insert b_1 at index i
#
# [ ..., a, b_0, b_1, c, ... ]
# ^- insert b_0 at index i
#
if args.output_all_variables:
iom.insert_variable(
quantity_idx, var.quantity + observed_suffix, quantity_units
)
iom.insert_variable(
quantity_idx, var.quantity + baseline_suffix, quantity_units
)
# Remove the existing uncertainty variable.
(uncertainty_idx, uncertainty_units) = iom.remove_variable(var.uncertainty)
# If the `--output-all-variables` option was specified, add the new baseline
# and observed uncertainty variables.
if args.output_all_variables:
iom.insert_variable(
uncertainty_idx, var.uncertainty + observed_suffix, uncertainty_units
)
iom.insert_variable(
uncertainty_idx, var.uncertainty + baseline_suffix, uncertainty_units
)
try:
# Remove the existing sample size variable.
(sample_size_idx, sample_size_units) = iom.remove_variable(var.sample_size)
# If the `--output-all-variables` option was specified, add the new
# baseline and observed sample size variables.
if args.output_all_variables:
iom.insert_variable(
sample_size_idx, var.sample_size + observed_suffix, sample_size_units
)
iom.insert_variable(
sample_size_idx, var.sample_size + baseline_suffix, sample_size_units
)
except ValueError:
# This is alright, because dependent variables may share the same sample
# size variable.
pass
for var in args.control_variables:
iom.remove_variable(var)
# Add change variables.
absolute_change_suffix = " - Change (`{0}` - `{1}`)".format(
args.observed_input_file, args.baseline_input_file
)
percent_change_suffix = " - % Change (`{0}` to `{1}`)".format(
args.observed_input_file, args.baseline_input_file
)
for var in dependent_variables:
iom.append_variable(var.quantity + absolute_change_suffix, var.units)
iom.append_variable(var.uncertainty + absolute_change_suffix, var.units)
iom.append_variable(var.quantity + percent_change_suffix, "")
iom.append_variable(var.uncertainty + percent_change_suffix, "")
# Add all baseline input data to the `record_aggregator`.
for record in iom.baseline():
baseline_ra.append(record)
for record in iom.observed():
observed_ra.append(record)
iom.write_header()
# Compare and output results.
for distinguishing_values, observed_dependent_values in observed_ra:
try:
baseline_dependent_values = baseline_ra[distinguishing_values]
except KeyError:
assert False, \
"Distinguishing value `" + \
str(baseline_ra.key_from_dict(distinguishing_values)) + \
"` was not found in the baseline results."
statistically_significant_change = False
record = distinguishing_values.copy()
# Compute changes, add the values and changes to the record, and identify
# changes that are statistically significant.
for var in dependent_variables:
# Compute changes.
baseline_quantity = baseline_dependent_values[var.quantity]
baseline_uncertainty = baseline_dependent_values[var.uncertainty]
baseline_sample_size = baseline_dependent_values[var.sample_size]
observed_quantity = observed_dependent_values[var.quantity]
observed_uncertainty = observed_dependent_values[var.uncertainty]
observed_sample_size = observed_dependent_values[var.sample_size]
(abs_change, abs_change_unc, per_change, per_change_unc) = \
percent_change_uncertainty(
baseline_quantity, baseline_uncertainty,
observed_quantity, observed_uncertainty
)
# Round the change quantities and uncertainties to the significant digit
# of uncertainty.
try:
abs_change_sigdig = max(
find_significant_digit(abs_change),
find_significant_digit(abs_change_unc),
)
# abs_change = round_with_int_conversion(
# abs_change, abs_change_sigdig
# )
# abs_change_unc = round_with_int_conversion(
# abs_change_unc, abs_change_sigdig
# )
except:
# Any value errors should be due to NaNs returned by
# `percent_change_uncertainty` because quantities or change in
# quantities was 0. We can ignore these.
pass
try:
per_change_sigdig = max(
find_significant_digit(per_change),
find_significant_digit(per_change_unc)
)
# per_change = round_with_int_conversion(
# per_change, per_change_sigdig
# )
# per_change_unc = round_with_int_conversion(
# per_change_unc, per_change_sigdig
# )
except:
# Any value errors should be due to NaNs returned by
# `percent_change_uncertainty` because quantities or change in
# quantities was 0. We can ignore these.
pass
# Add the values (if the `--output-all-variables` option was specified)
# and the changes to the record. Note that the record's schema is
# different from the original schema. If multiple dependent variables
# share the same sample size variable, it's fine - they will overwrite
# each other, but with the same value.
if args.output_all_variables:
record[var.quantity + baseline_suffix] = baseline_quantity
record[var.uncertainty + baseline_suffix] = baseline_uncertainty
record[var.sample_size + baseline_suffix] = baseline_sample_size
record[var.quantity + observed_suffix] = observed_quantity
record[var.uncertainty + observed_suffix] = observed_uncertainty
record[var.sample_size + observed_suffix] = observed_sample_size
record[var.quantity + absolute_change_suffix] = abs_change
record[var.uncertainty + absolute_change_suffix] = abs_change_unc
record[var.quantity + percent_change_suffix] = per_change
record[var.uncertainty + percent_change_suffix] = per_change_unc
# If the range of uncertainties overlap don't overlap and the percentage
# change is greater than the change threshold, then change is
# statistically significant.
overlap = ranges_overlap_uncertainty(
baseline_quantity, baseline_uncertainty,
observed_quantity, observed_uncertainty
)
if not overlap and per_change >= args.change_threshold:
statistically_significant_change = True
# Print the record if a statistically significant change was found or if the
# `--output-all-datapoints` option was specified.
if args.output_all_datapoints or statistically_significant_change:
iom.write(record)
| cccl-main | thrust/internal/benchmark/compare_benchmark_results.py |
Subsets and Splits